10-Token与Tokenization详解
引言
Token是大语言模型处理文本的基本单位。理解Token和Tokenization对于高效使用LLM至关重要,它直接影响模型性能、API成本和上下文管理。本章将深入探讨Token的概念、各种Tokenization算法、实际应用和优化技巧。
1. Token是什么
1.1 Token vs 字符 vs 单词
三种文本单位对比:
| 维度 | 字符 | Token | 单词 |
|---|---|---|---|
| 粒度 | 最细 | 中等 | 最粗 |
| 词表大小 | 小(~100) | 中(~50K) | 大(无限) |
| 序列长度 | 长 | 中 | 短 |
| OOV问题 | 无 | 少 | 多 |
| 语义完整性 | 差 | 好 | 最好 |
示例:
text = "unhappiness"
# 字符级别
chars = list(text)
print(f"字符: {chars}")
# 输出: ['u', 'n', 'h', 'a', 'p', 'p', 'i', 'n', 'e', 's', 's']
# 长度: 11
# Token级别(BPE)
tokens = ["un", "happiness"]
print(f"Token: {tokens}")
# 输出: ['un', 'happiness']
# 长度: 2
# 单词级别
words = [text]
print(f"单词: {words}")
# 输出: ['unhappiness']
# 长度: 1
为什么选择Token?
- 平衡粒度:比字符更高效,比单词更灵活
- 固定词表:避免OOV(Out-of-Vocabulary)问题
- 子词结构:能够理解词缀、词根等语言结构
- 跨语言能力:统一处理不同语言
1.2 为什么需要Tokenization
问题1:字符级模型的低效
# 字符级
text = "artificial intelligence"
char_tokens = list(text)
print(f"字符数: {len(char_tokens)}") # 24
# Token级
token_tokens = ["art", "ificial", " intelligence"]
print(f"Token数: {len(token_tokens)}") # 3
# 效率提升: 24 / 3 = 8倍
字符级模型需要处理更长的序列,导致:
- 更多的计算量(注意力是O(n²))
- 更长的训练时间
- 更难学习长距离依赖
问题2:单词级模型的OOV
# 训练词表
vocab = {"hello", "world", "AI"}
# 测试
text = "hello AI enthusiasts"
# "enthusiasts"不在词表中!
# 单词级: ["hello", "AI", "<UNK>"]
# Token级: ["hello", " AI", " enth", "usi", "asts"] # 可以处理
单词级模型的问题:
- 无限的词表大小(新词不断出现)
- 无法处理拼写错误
- 无法处理复合词
- 形态丰富的语言(如德语)词表爆炸
问题3:多语言处理
# 英文
text_en = "Hello world"
# 单词: 2个
# 中文
text_zh = "你好世界"
# 字符: 4个
# 单词: 2个(需要分词)
# Token统一处理
# 英文: ["Hello", " world"]
# 中文: ["你", "好", "世", "界"] 或 ["你好", "世界"](取决于训练数据)
Token化提供了统一的多语言处理框架。
1.3 Token和模型的关系
词表大小的影响
def model_params(vocab_size, d_model, n_layers):
"""
估算模型参数量
Args:
vocab_size: 词表大小
d_model: 隐藏维度
n_layers: 层数
"""
# Embedding层
embedding_params = vocab_size * d_model
# Transformer层
transformer_params = n_layers * 12 * d_model ** 2
# 输出层(与embedding共享则不计)
# output_params = vocab_size * d_model
total = embedding_params + transformer_params
return {
'embedding': embedding_params,
'transformer': transformer_params,
'total': total,
}
# 对比不同词表大小
for vocab_size in [32000, 50000, 100000]:
params = model_params(vocab_size, d_model=4096, n_layers=32)
print(f"\n词表大小: {vocab_size}")
print(f" Embedding参数: {params['embedding']/1e6:.1f}M")
print(f" Transformer参数: {params['transformer']/1e6:.1f}M")
print(f" 总参数: {params['total']/1e9:.2f}B")
# 输出:
# 词表32K: Embedding=131M, Total=6.3B
# 词表50K: Embedding=205M, Total=6.4B
# 词表100K: Embedding=410M, Total=6.6B
词表大小的权衡:
更小的词表:
- 更少的参数
- 更快的训练
- 但序列更长(需要更多token表示同样的文本)
更大的词表:
- 序列更短
- 更好的语义完整性
- 但参数更多
Token效率
def calculate_token_efficiency(text, tokenizer):
"""计算token效率(字符/token比率)"""
tokens = tokenizer.encode(text)
n_chars = len(text)
n_tokens = len(tokens)
efficiency = n_chars / n_tokens
return {
'text': text,
'n_chars': n_chars,
'n_tokens': n_tokens,
'efficiency': efficiency,
}
# 对比不同语言
texts = {
'English': "The quick brown fox jumps over the lazy dog",
'Chinese': "快速的棕色狐狸跳过懒狗",
'German': "Der schnelle braune Fuchs springt über den faulen Hund",
'Code': "def hello_world():\n print('Hello, World!')",
}
for lang, text in texts.items():
result = calculate_token_efficiency(text, tokenizer)
print(f"{lang}: {result['efficiency']:.2f} chars/token")
# 典型输出:
# English: 4.5 chars/token
# Chinese: 1.8 chars/token (效率较低!)
# German: 4.2 chars/token
# Code: 3.8 chars/token
2. Tokenization算法
2.1 Byte Pair Encoding (BPE)
BPE是最流行的Tokenization算法,被GPT系列采用。
算法原理
核心思想: 从字符开始,迭代合并最频繁的相邻字符对。
训练过程:
初始词表: ['a', 'b', 'c', ..., 'z']
语料: "aaabdaaabac"
迭代1:
统计: aa=4, ab=2, bd=1, da=1, ac=1
合并: aa -> Z
结果: "ZabdZabac"
词表: [..., 'Z'(='aa')]
迭代2:
统计: Za=2, ab=2, bd=1, ...
合并: ab -> Y
结果: "ZYdZYac"
词表: [..., 'Z', 'Y'(='ab')]
...
完整实现
import re
from collections import defaultdict, Counter
class BytePairEncoding:
"""完整的BPE实现"""
def __init__(self, vocab_size=1000):
self.vocab_size = vocab_size
self.merges = {} # (pair) -> merged_token
self.vocab = {} # token -> id
def get_stats(self, words):
"""
统计相邻字符对的频率
Args:
words: {word: frequency}
Returns:
{(char1, char2): frequency}
"""
pairs = defaultdict(int)
for word, freq in words.items():
symbols = word.split()
for i in range(len(symbols) - 1):
pairs[(symbols[i], symbols[i+1])] += freq
return pairs
def merge_pair(self, pair, words):
"""
合并词表中的字符对
Args:
pair: (char1, char2) 要合并的pair
words: {word: frequency}
Returns:
合并后的words
"""
bigram = ' '.join(pair)
replacement = ''.join(pair)
new_words = {}
# 使用正则表达式进行精确替换
pattern = re.escape(bigram)
for word, freq in words.items():
new_word = re.sub(pattern, replacement, word)
new_words[new_word] = freq
return new_words
def train(self, texts, verbose=True):
"""
训练BPE模型
Args:
texts: 训练文本列表
verbose: 是否打印进度
"""
# 1. 统计词频
word_freqs = Counter()
for text in texts:
words = text.split()
word_freqs.update(words)
print(f"共 {len(word_freqs)} 个不同的词")
print(f"总词频: {sum(word_freqs.values())}")
# 2. 初始化:每个词拆分为字符,加上结束符
words = {}
for word, freq in word_freqs.items():
# 字符之间加空格,便于split
words[' '.join(list(word)) + ' </w>'] = freq
# 3. 初始词表:所有字符
self.vocab = set()
for word in words.keys():
self.vocab.update(word.split())
print(f"初始词表大小: {len(self.vocab)}")
# 4. 迭代合并
num_merges = self.vocab_size - len(self.vocab)
for i in range(num_merges):
# 统计pair频率
pairs = self.get_stats(words)
if not pairs:
print(f"没有更多的pair可以合并")
break
# 选择频率最高的pair
best_pair = max(pairs, key=pairs.get)
best_freq = pairs[best_pair]
if verbose and (i + 1) % 100 == 0:
print(f"合并 {i+1}/{num_merges}: {best_pair} (频率={best_freq})")
# 合并
words = self.merge_pair(best_pair, words)
# 记录合并规则
self.merges[best_pair] = ''.join(best_pair)
# 更新词表
self.vocab.add(self.merges[best_pair])
# 5. 构建最终词表映射
self.vocab = {token: i for i, token in enumerate(sorted(self.vocab))}
print(f"\n训练完成!")
print(f"最终词表大小: {len(self.vocab)}")
print(f"合并规则数: {len(self.merges)}")
def tokenize(self, text):
"""
对文本进行分词
Args:
text: 输入文本
Returns:
token列表
"""
words = text.split()
tokens = []
for word in words:
# 拆分为字符
word_tokens = list(word) + ['</w>']
# 迭代应用合并规则
while len(word_tokens) > 1:
# 找出所有可能的pair
pairs = [
(word_tokens[i], word_tokens[i+1])
for i in range(len(word_tokens) - 1)
]
# 找出在merges中的pair
mergeable = [p for p in pairs if p in self.merges]
if not mergeable:
break
# 选择优先级最高的pair(训练时最早学到的)
best_pair = min(
mergeable,
key=lambda p: list(self.merges.keys()).index(p)
)
# 执行合并
new_tokens = []
i = 0
while i < len(word_tokens):
if i < len(word_tokens) - 1 and \
(word_tokens[i], word_tokens[i+1]) == best_pair:
new_tokens.append(self.merges[best_pair])
i += 2
else:
new_tokens.append(word_tokens[i])
i += 1
word_tokens = new_tokens
tokens.extend(word_tokens)
return tokens
def encode(self, text):
"""编码为token IDs"""
tokens = self.tokenize(text)
return [self.vocab.get(t, self.vocab.get('<unk>', 0)) for t in tokens]
def decode(self, token_ids):
"""解码为文本"""
id_to_token = {i: token for token, i in self.vocab.items()}
tokens = [id_to_token.get(i, '<unk>') for i in token_ids]
text = ''.join(tokens).replace('</w>', ' ').strip()
return text
def save(self, path):
"""保存模型"""
import json
with open(path, 'w', encoding='utf-8') as f:
json.dump({
'vocab': self.vocab,
'merges': [(list(k), v) for k, v in self.merges.items()],
}, f, ensure_ascii=False, indent=2)
def load(self, path):
"""加载模型"""
import json
with open(path, 'r', encoding='utf-8') as f:
data = json.load(f)
self.vocab = data['vocab']
self.merges = {tuple(k): v for k, v in data['merges']}
# 使用示例
if __name__ == "__main__":
# 训练数据
texts = [
"low lower lowest",
"new newer newest",
"wide wider widest",
] * 1000 # 重复增加频率
# 训练
bpe = BytePairEncoding(vocab_size=200)
bpe.train(texts, verbose=True)
# 测试
test_text = "lower newest wider"
print(f"\n测试文本: {test_text}")
tokens = bpe.tokenize(test_text)
print(f"分词结果: {tokens}")
encoded = bpe.encode(test_text)
print(f"编码: {encoded}")
decoded = bpe.decode(encoded)
print(f"解码: {decoded}")
# 保存
bpe.save('bpe_model.json')
BPE变体:Byte-level BPE
GPT-2使用的改进版本,直接在字节级别操作。
class ByteLevelBPE:
"""字节级BPE(GPT-2使用)"""
def __init__(self):
# 字节到Unicode字符的映射(避免控制字符)
self.byte_encoder = self._bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
def _bytes_to_unicode(self):
"""
创建字节到Unicode的映射
GPT-2将256个字节映射到256个可打印Unicode字符
"""
bs = (
list(range(ord("!"), ord("~") + 1)) +
list(range(ord("¡"), ord("¬") + 1)) +
list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def encode_bytes(self, text):
"""将文本转换为字节序列"""
return ''.join(self.byte_encoder[b] for b in text.encode('utf-8'))
def decode_bytes(self, tokens):
"""将字节序列转换回文本"""
text = ''.join(tokens)
return bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace')
# 使用示例
byte_bpe = ByteLevelBPE()
# 可以处理任何Unicode字符,包括emoji
text = "Hello 世界! 😊"
encoded = byte_bpe.encode_bytes(text)
print(f"原文: {text}")
print(f"字节编码: {encoded}")
decoded = byte_bpe.decode_bytes(list(encoded))
print(f"解码: {decoded}")
assert text == decoded
优势:
- 固定的256个字节词表(base vocabulary)
- 可以处理任何语言和字符
- 无需特殊的未知字符处理
2.2 WordPiece(BERT使用)
WordPiece与BPE类似,但合并策略不同。
算法原理
BPE vs WordPiece:
BPE: 选择频率最高的pair合并
WordPiece: 选择能最大化语言模型似然的pair合并
WordPiece得分:
score(pair) = freq(pair) / (freq(first) * freq(second))
这个分数衡量的是pair的"凝聚力"
实现
import math
from collections import defaultdict, Counter
class WordPieceTokenizer:
"""WordPiece分词器"""
def __init__(self, vocab_size=1000, unk_token='[UNK]', continuing_subword_prefix='##'):
self.vocab_size = vocab_size
self.unk_token = unk_token
self.prefix = continuing_subword_prefix
self.vocab = {}
def get_pair_scores(self, words):
"""
计算pair的分数(似然增益)
score = freq(pair) / (freq(first) * freq(second))
"""
pair_freqs = defaultdict(int)
token_freqs = defaultdict(int)
# 统计频率
for word, freq in words.items():
symbols = word.split()
for symbol in symbols:
token_freqs[symbol] += freq
for i in range(len(symbols) - 1):
pair = (symbols[i], symbols[i+1])
pair_freqs[pair] += freq
# 计算分数
scores = {}
for pair, freq in pair_freqs.items():
first, second = pair
scores[pair] = freq / (token_freqs[first] * token_freqs[second])
return scores
def merge_pair(self, pair, words):
"""合并pair"""
bigram = ' '.join(pair)
replacement = ''.join(pair)
new_words = {}
for word, freq in words.items():
new_word = word.replace(bigram, replacement)
new_words[new_word] = freq
return new_words
def train(self, texts, verbose=True):
"""训练WordPiece"""
# 统计词频
word_freqs = Counter()
for text in texts:
words = text.split()
word_freqs.update(words)
# 初始化:第一个字符不加前缀,其他加##
words = {}
for word, freq in word_freqs.items():
if len(word) == 1:
words[word] = freq
else:
tokens = [word[0]] + [f'{self.prefix}{c}' for c in word[1:]]
words[' '.join(tokens)] = freq
# 初始词表
self.vocab = set([self.unk_token])
for word in words.keys():
self.vocab.update(word.split())
print(f"初始词表大小: {len(self.vocab)}")
# 迭代合并
num_merges = self.vocab_size - len(self.vocab)
for i in range(num_merges):
# 计算分数
scores = self.get_pair_scores(words)
if not scores:
break
# 选择分数最高的pair
best_pair = max(scores, key=scores.get)
best_score = scores[best_pair]
if verbose and (i + 1) % 100 == 0:
print(f"合并 {i+1}: {best_pair}, score={best_score:.6f}")
# 合并
words = self.merge_pair(best_pair, words)
self.vocab.add(''.join(best_pair))
# 构建词表映射
self.vocab = {token: i for i, token in enumerate(sorted(self.vocab))}
print(f"训练完成!词表大小: {len(self.vocab)}")
def tokenize(self, text):
"""
对文本进行WordPiece分词
使用贪心最长匹配算法
"""
words = text.split()
tokens = []
for word in words:
# 贪心最长匹配
word_tokens = []
start = 0
while start < len(word):
end = len(word)
found = False
# 从最长子串开始尝试
while start < end:
substr = word[start:end]
# 非首字符加前缀
if start > 0:
substr = f'{self.prefix}{substr}'
# 检查是否在词表中
if substr in self.vocab:
word_tokens.append(substr)
start = end
found = True
break
end -= 1
if not found:
# 未找到,使用[UNK]
word_tokens.append(self.unk_token)
start += 1
tokens.extend(word_tokens)
return tokens
def encode(self, text):
"""编码为IDs"""
tokens = self.tokenize(text)
return [self.vocab.get(t, self.vocab[self.unk_token]) for t in tokens]
def decode(self, token_ids):
"""解码为文本"""
id_to_token = {i: token for token, i in self.vocab.items()}
tokens = [id_to_token.get(i, self.unk_token) for i in token_ids]
text = ''.join(tokens).replace(self.prefix, '').strip()
return text
# 使用示例
if __name__ == "__main__":
texts = [
"play playing player played",
"run running runner",
] * 500
wordpiece = WordPieceTokenizer(vocab_size=200)
wordpiece.train(texts)
# 测试
test = "playing runner"
tokens = wordpiece.tokenize(test)
print(f"\n文本: {test}")
print(f"Token: {tokens}")
# BERT风格的分词
# "playing" -> ["play", "##ing"]
# "runner" -> ["run", "##ner"]
BERT的完整Tokenizer
from transformers import BertTokenizer
# 加载预训练的BERT tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# 分词
text = "playing with transformers"
tokens = tokenizer.tokenize(text)
print(f"Tokens: {tokens}")
# 输出: ['playing', 'with', 'transform', '##ers']
# 编码(添加特殊token)
encoded = tokenizer.encode(text, add_special_tokens=True)
print(f"Encoded: {encoded}")
# 输出: [101, 2652, 2007, 10938, 2545, 102]
# [CLS] playing with transform ##ers [SEP]
# 解码
decoded = tokenizer.decode(encoded)
print(f"Decoded: {decoded}")
# 输出: [CLS] playing with transformers [SEP]
# 获取词表信息
print(f"词表大小: {len(tokenizer)}") # 30522
print(f"特殊token: {tokenizer.all_special_tokens}")
# ['[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]']
2.3 SentencePiece(LLaMA使用)
SentencePiece是一个语言无关的分词器,直接在原始文本上训练。
特点
- 语言无关:不依赖空格分词
- 可逆:完美还原原文(包括空格)
- 支持多种算法:BPE、Unigram
- 空格处理:空格被编码为特殊字符(▁)
训练SentencePiece
import sentencepiece as spm
# 准备训练数据
corpus = """
This is a sample corpus for training SentencePiece.
SentencePiece is a language-independent tokenizer.
It can handle multiple languages without spaces.
这是一个中文句子,SentencePiece可以处理。
"""
# 保存到文件
with open('corpus.txt', 'w', encoding='utf-8') as f:
f.write(corpus)
# 训练SentencePiece模型
spm.SentencePieceTrainer.train(
input='corpus.txt',
model_prefix='sp_model',
vocab_size=1000,
character_coverage=0.9995, # 字符覆盖率
model_type='bpe', # 或'unigram'
pad_id=0, # [PAD] token ID
unk_id=1, # [UNK] token ID
bos_id=2, # [BOS] (beginning of sentence) token ID
eos_id=3, # [EOS] (end of sentence) token ID
user_defined_symbols=['<|im_start|>', '<|im_end|>'], # 自定义token
)
print("训练完成!生成了 sp_model.model 和 sp_model.vocab")
使用SentencePiece
# 加载模型
sp = spm.SentencePieceProcessor()
sp.load('sp_model.model')
# 编码(多种方式)
text = "Hello, world! 你好世界!"
# 1. 分词(piece)
pieces = sp.encode_as_pieces(text)
print(f"Pieces: {pieces}")
# 输出: ['▁Hello', ',', '▁world', '!', '▁', '你', '好', '世', '界', '!']
# 2. 编码为IDs
ids = sp.encode_as_ids(text)
print(f"IDs: {ids}")
# 输出: [234, 15, 567, 8, 4, 89, 90, 91, 92, 12]
# 3. 解码(完美还原)
decoded = sp.decode_ids(ids)
print(f"Decoded: {decoded}")
# 输出: Hello, world! 你好世界!
assert text == decoded # 可逆!
# 词表信息
print(f"词表大小: {sp.vocab_size()}")
print(f"[PAD] ID: {sp.pad_id()}")
print(f"[UNK] ID: {sp.unk_id()}")
print(f"[BOS] ID: {sp.bos_id()}")
print(f"[EOS] ID: {sp.eos_id()}")
# 查看词表
for i in range(10):
print(f"ID {i}: {sp.id_to_piece(i)}")
Unigram Language Model
SentencePiece支持的另一种算法。
原理:
- 从大词表开始(所有子串)
- 迭代删除对loss影响最小的token
- 直到达到目标词表大小
训练:
# 使用Unigram算法
spm.SentencePieceTrainer.train(
input='corpus.txt',
model_prefix='sp_unigram',
vocab_size=1000,
model_type='unigram', # Unigram语言模型
)
# 加载
sp_unigram = spm.SentencePieceProcessor()
sp_unigram.load('sp_unigram.model')
# 测试
text = "playing with sentencepiece"
print(f"BPE: {sp.encode_as_pieces(text)}")
print(f"Unigram: {sp_unigram.encode_as_pieces(text)}")
# Unigram可以输出多个候选分词
nbest_pieces = sp_unigram.encode_as_pieces(text, enable_sampling=False, nbest_size=3)
print(f"N-best: {nbest_pieces}")
BPE vs Unigram:
| 特性 | BPE | Unigram |
|---|---|---|
| 分词结果 | 确定性 | 可以采样多个 |
| 训练速度 | 快 | 慢 |
| 适用场景 | 通用 | 需要分词多样性 |
2.4 算法对比总结
# 对比不同算法
from transformers import (
GPT2Tokenizer, # BPE
BertTokenizer, # WordPiece
LlamaTokenizer, # SentencePiece
)
text = "Hello, world! Playing with transformers. 你好世界!"
tokenizers = {
'GPT-2 (BPE)': GPT2Tokenizer.from_pretrained('gpt2'),
'BERT (WordPiece)': BertTokenizer.from_pretrained('bert-base-uncased'),
'LLaMA (SentencePiece)': LlamaTokenizer.from_pretrained('huggyllama/llama-7b'),
}
print(f"原文: {text}\n")
for name, tokenizer in tokenizers.items():
tokens = tokenizer.tokenize(text)
encoded = tokenizer.encode(text)
print(f"{name}:")
print(f" Token数: {len(tokens)}")
print(f" Tokens: {tokens[:10]}...") # 只显示前10个
print(f" IDs: {encoded[:10]}...")
print()
3. Tokenizer实战
3.1 Hugging Face Tokenizers库
Hugging Face提供了高效的Tokenizers实现。
安装和基本使用
from tokenizers import Tokenizer, models, trainers, pre_tokenizers
# 创建一个BPE tokenizer
tokenizer = Tokenizer(models.BPE())
# 配置预处理(分割规则)
tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()
# 配置训练器
trainer = trainers.BpeTrainer(
vocab_size=1000,
special_tokens=["<pad>", "<unk>", "<s>", "</s>"],
)
# 训练
files = ["corpus.txt"]
tokenizer.train(files, trainer)
# 保存
tokenizer.save("my_tokenizer.json")
# 加载
tokenizer = Tokenizer.from_file("my_tokenizer.json")
# 使用
output = tokenizer.encode("Hello, world!")
print(f"Tokens: {output.tokens}")
print(f"IDs: {output.ids}")
自定义Tokenizer
from tokenizers import (
Tokenizer,
models,
normalizers,
pre_tokenizers,
decoders,
trainers,
)
from tokenizers.normalizers import NFD, Lowercase, StripAccents
from tokenizers.pre_tokenizers import Whitespace, Digits
from tokenizers.processors import TemplateProcessing
# 1. 创建BPE模型
tokenizer = Tokenizer(models.BPE(unk_token="<unk>"))
# 2. 配置Normalizer(文本规范化)
tokenizer.normalizer = normalizers.Sequence([
NFD(), # Unicode规范化
Lowercase(), # 小写
StripAccents(), # 去除重音符号
])
# 3. 配置Pre-tokenizer(预分词)
tokenizer.pre_tokenizer = pre_tokenizers.Sequence([
Whitespace(), # 按空格分割
Digits(individual_digits=True), # 将数字拆分为单个字符
])
# 4. 配置Decoder(解码)
tokenizer.decoder = decoders.BPEDecoder()
# 5. 配置后处理(添加特殊token)
tokenizer.post_processor = TemplateProcessing(
single="<s> $A </s>",
pair="<s> $A </s> $B:1 </s>:1",
special_tokens=[
("<s>", 1),
("</s>", 2),
],
)
# 6. 训练
trainer = trainers.BpeTrainer(
vocab_size=10000,
min_frequency=2,
special_tokens=["<pad>", "<unk>", "<s>", "</s>"],
show_progress=True,
)
files = ["train.txt"]
tokenizer.train(files, trainer)
# 7. 测试
text = "Hello, World! 123"
encoded = tokenizer.encode(text)
print(f"Tokens: {encoded.tokens}")
print(f"IDs: {encoded.ids}")
# 输出:
# Tokens: ['<s>', 'hello', ',', 'world', '!', '1', '2', '3', '</s>']
批量处理
# 批量编码
texts = [
"Hello, world!",
"Tokenization is important.",
"Batch processing is efficient.",
]
# 启用批量处理
tokenizer.enable_padding(pad_id=0, pad_token="<pad>")
tokenizer.enable_truncation(max_length=20)
# 批量编码
encodings = tokenizer.encode_batch(texts)
for i, encoding in enumerate(encodings):
print(f"\nText {i+1}: {texts[i]}")
print(f"Tokens: {encoding.tokens}")
print(f"IDs: {encoding.ids}")
print(f"Attention mask: {encoding.attention_mask}")
3.2 tiktoken(OpenAI使用)
OpenAI开发的高效tokenizer。
基本使用
import tiktoken
# 加载GPT-4的tokenizer
encoding = tiktoken.encoding_for_model("gpt-4")
# 编码
text = "Hello, world! This is a test of tiktoken."
tokens = encoding.encode(text)
print(f"Token IDs: {tokens}")
print(f"Token count: {len(tokens)}")
# 解码
decoded = encoding.decode(tokens)
print(f"Decoded: {decoded}")
assert text == decoded
# 查看token
for token_id in tokens[:5]:
token_bytes = encoding.decode_single_token_bytes(token_id)
print(f"ID {token_id}: {token_bytes} -> {token_bytes.decode('utf-8')}")
计算token数(用于API计费)
def count_tokens(text, model="gpt-4"):
"""计算文本的token数量"""
encoding = tiktoken.encoding_for_model(model)
return len(encoding.encode(text))
# 示例
texts = [
"Hello",
"Hello, world!",
"The quick brown fox jumps over the lazy dog.",
"这是一个中文句子。",
]
for text in texts:
n_tokens = count_tokens(text)
print(f"{text[:30]:30s} -> {n_tokens} tokens")
# 输出:
# Hello -> 1 tokens
# Hello, world! -> 4 tokens
# The quick brown fox... -> 10 tokens
# 这是一个中文句子。 -> 9 tokens (中文效率较低)
不同模型的编码
# 对比不同模型的编码
text = "Hello, world! 你好世界!"
models = ['gpt-4', 'gpt-3.5-turbo', 'text-davinci-003']
for model in models:
encoding = tiktoken.encoding_for_model(model)
tokens = encoding.encode(text)
print(f"{model}:")
print(f" Token count: {len(tokens)}")
print(f" Tokens: {tokens}")
print()
3.3 词表大小的影响
实验对比
import time
def train_and_evaluate(corpus, vocab_sizes):
"""对比不同词表大小的影响"""
results = []
for vocab_size in vocab_sizes:
# 训练
start_time = time.time()
tokenizer = Tokenizer(models.BPE())
tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()
trainer = trainers.BpeTrainer(vocab_size=vocab_size)
tokenizer.train_from_iterator([corpus], trainer)
train_time = time.time() - start_time
# 评估
encoded = tokenizer.encode(corpus)
n_tokens = len(encoded.ids)
# 压缩率
compression_ratio = len(corpus) / n_tokens
results.append({
'vocab_size': vocab_size,
'train_time': train_time,
'n_tokens': n_tokens,
'compression_ratio': compression_ratio,
})
print(f"词表大小: {vocab_size}")
print(f" 训练时间: {train_time:.2f}s")
print(f" Token数: {n_tokens}")
print(f" 压缩率: {compression_ratio:.2f}")
print()
return results
# 测试
corpus = open('corpus.txt').read()
vocab_sizes = [500, 1000, 2000, 5000, 10000]
results = train_and_evaluate(corpus, vocab_sizes)
# 可视化
import matplotlib.pyplot as plt
fig, axes = plt.subplots(1, 3, figsize=(15, 4))
vocab_sizes = [r['vocab_size'] for r in results]
# 训练时间
axes[0].plot(vocab_sizes, [r['train_time'] for r in results], 'o-')
axes[0].set_xlabel('Vocab Size')
axes[0].set_ylabel('Training Time (s)')
axes[0].set_title('Training Time vs Vocab Size')
# Token数
axes[1].plot(vocab_sizes, [r['n_tokens'] for r in results], 'o-')
axes[1].set_xlabel('Vocab Size')
axes[1].set_ylabel('Number of Tokens')
axes[1].set_title('Sequence Length vs Vocab Size')
# 压缩率
axes[2].plot(vocab_sizes, [r['compression_ratio'] for r in results], 'o-')
axes[2].set_xlabel('Vocab Size')
axes[2].set_ylabel('Compression Ratio')
axes[2].set_title('Compression Ratio vs Vocab Size')
plt.tight_layout()
plt.show()
结论:
- 词表越大,训练时间越长
- 词表越大,token数越少(序列更短)
- 词表越大,压缩率越高
- 权衡:32K-50K是常见选择
3.4 特殊Token
常见特殊Token
# 特殊token的定义和使用
SPECIAL_TOKENS = {
# 通用
'<pad>': 0, # Padding token
'<unk>': 1, # Unknown token
'<s>': 2, # Start of sequence
'</s>': 3, # End of sequence
# BERT特有
'[CLS]': 101, # Classification token
'[SEP]': 102, # Separator token
'[MASK]': 103, # Mask token (for MLM)
# GPT特有
'<|endoftext|>': 50256, # End of text
# 对话模型
'<|im_start|>': 0, # Instruction start
'<|im_end|>': 1, # Instruction end
'<|user|>': 2,
'<|assistant|>': 3,
'<|system|>': 4,
}
BERT的特殊Token
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# 单句
text = "Hello, world!"
encoded = tokenizer.encode(text, add_special_tokens=True)
tokens = tokenizer.convert_ids_to_tokens(encoded)
print("单句:")
print(f"Tokens: {tokens}")
# 输出: ['[CLS]', 'hello', ',', 'world', '!', '[SEP]']
# 句对(用于NLI等任务)
text_a = "The cat is sleeping."
text_b = "The dog is barking."
encoded = tokenizer.encode(text_a, text_b, add_special_tokens=True)
tokens = tokenizer.convert_ids_to_tokens(encoded)
print("\n句对:")
print(f"Tokens: {tokens}")
# 输出: ['[CLS]', 'the', 'cat', 'is', 'sleeping', '.', '[SEP]', 'the', 'dog', 'is', 'barking', '.', '[SEP]']
# Segment IDs
segment_ids = tokenizer.create_token_type_ids_from_sequences(
tokenizer.encode(text_a, add_special_tokens=False),
tokenizer.encode(text_b, add_special_tokens=False),
)
print(f"Segment IDs: {segment_ids}")
# 输出: [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
GPT的特殊Token
from transformers import GPT2Tokenizer
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
# GPT-2使用<|endoftext|>作为分隔符
print(f"EOS token: {tokenizer.eos_token}")
print(f"EOS token ID: {tokenizer.eos_token_id}")
# 编码多个文档
docs = [
"This is document 1.",
"This is document 2.",
"This is document 3.",
]
# 用<|endoftext|>连接
text = tokenizer.eos_token.join(docs)
encoded = tokenizer.encode(text)
tokens = tokenizer.convert_ids_to_tokens(encoded)
print(f"\nTokens: {tokens}")
# 每个文档之间有<|endoftext|>分隔
自定义特殊Token
from transformers import AutoTokenizer
# 加载tokenizer
tokenizer = AutoTokenizer.from_pretrained('gpt2')
# 添加自定义特殊token
special_tokens = {
'additional_special_tokens': ['<|user|>', '<|assistant|>', '<|system|>']
}
num_added = tokenizer.add_special_tokens(special_tokens)
print(f"添加了 {num_added} 个特殊token")
# 使用
text = "<|user|>Hello!<|assistant|>Hi there!<|user|>How are you?"
encoded = tokenizer.encode(text)
print(f"Encoded: {encoded}")添加token后需要调整模型的embedding层大小
# model.resize_token_embeddings(len(tokenizer))
4. Token计数和成本
4.1 如何计算Token数量
准确计算
import tiktoken
def count_tokens_accurate(text, model="gpt-4"):
"""
准确计算token数量
Args:
text: 输入文本
model: 模型名称
Returns:
token数量
"""
encoding = tiktoken.encoding_for_model(model)
return len(encoding.encode(text))
# 示例
text = "Hello, how are you today?"
n_tokens = count_tokens_accurate(text)
print(f"'{text}' contains {n_tokens} tokens")
快速估算
def estimate_tokens(text, chars_per_token=4):
"""
快速估算token数量
经验法则:
- 英文:约4个字符/token
- 中文:约1.5-2个字符/token
- 代码:约3-4个字符/token
Args:
text: 输入文本
chars_per_token: 每个token的平均字符数
Returns:
估算的token数量
"""
return len(text) // chars_per_token
# 对比
text = "The quick brown fox jumps over the lazy dog"
accurate = count_tokens_accurate(text)
estimated = estimate_tokens(text)
print(f"文本: {text}")
print(f"准确计数: {accurate} tokens")
print(f"快速估算: {estimated} tokens")
print(f"误差: {abs(accurate - estimated)} tokens")
消息格式的Token计数
def count_tokens_for_messages(messages, model="gpt-4"):
"""
计算对话消息的token数
OpenAI的消息格式会添加额外的token
"""
encoding = tiktoken.encoding_for_model(model)
tokens_per_message = 3 # 每条消息的开销
tokens_per_name = 1 # 如果有name字段
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # 助手回复的起始token
return num_tokens
# 示例
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi there! How can I help you?"},
{"role": "user", "content": "What's the weather like?"},
]
n_tokens = count_tokens_for_messages(messages)
print(f"Total tokens: {n_tokens}")
# 逐条显示
for i, msg in enumerate(messages):
msg_tokens = count_tokens_for_messages([msg])
print(f"Message {i+1} ({msg['role']}): {msg_tokens} tokens")
4.2 API计费模型
OpenAI API定价
# OpenAI API价格(2024年)
PRICING = {
'gpt-4': {
'input': 0.03, # $0.03 per 1K tokens
'output': 0.06, # $0.06 per 1K tokens
},
'gpt-4-32k': {
'input': 0.06,
'output': 0.12,
},
'gpt-3.5-turbo': {
'input': 0.0015,
'output': 0.002,
},
'gpt-3.5-turbo-16k': {
'input': 0.003,
'output': 0.004,
},
}
def calculate_cost(input_tokens, output_tokens, model='gpt-4'):
"""
计算API调用成本
Args:
input_tokens: 输入token数
output_tokens: 输出token数
model: 模型名称
Returns:
成本(美元)
"""
pricing = PRICING[model]
input_cost = (input_tokens / 1000) * pricing['input']
output_cost = (output_tokens / 1000) * pricing['output']
total_cost = input_cost + output_cost
return {
'input_cost': input_cost,
'output_cost': output_cost,
'total_cost': total_cost,
}
# 示例
input_tokens = 1000
output_tokens = 500
for model in ['gpt-4', 'gpt-3.5-turbo']:
cost = calculate_cost(input_tokens, output_tokens, model)
print(f"{model}:")
print(f" 输入成本: ${cost['input_cost']:.4f}")
print(f" 输出成本: ${cost['output_cost']:.4f}")
print(f" 总成本: ${cost['total_cost']:.4f}")
print()
# 输出:
# gpt-4:
# 输入成本: $0.0300
# 输出成本: $0.0300
# 总成本: $0.0600
#
# gpt-3.5-turbo:
# 输入成本: $0.0015
# 输出成本: $0.0010
# 总成本: $0.0025
批量任务成本估算
def estimate_batch_cost(
texts,
avg_output_tokens=100,
model='gpt-4',
):
"""
估算批量任务的成本
Args:
texts: 文本列表
avg_output_tokens: 平均输出token数
model: 模型名称
Returns:
成本统计
"""
total_input_tokens = sum(count_tokens_accurate(text, model) for text in texts)
total_output_tokens = len(texts) * avg_output_tokens
cost = calculate_cost(total_input_tokens, total_output_tokens, model)
return {
'n_requests': len(texts),
'total_input_tokens': total_input_tokens,
'total_output_tokens': total_output_tokens,
'total_cost': cost['total_cost'],
'cost_per_request': cost['total_cost'] / len(texts),
}批量文本分类
texts = [
"This is a positive review.",
"This is a negative review.",
# ... 更多文本
] * 100 # 200个请求
stats = estimate_batch_cost(texts, avg_output_tokens=10, model='gpt-3.5-turbo')
print(f"批量任务成本估算:")
print(f" 请求数: {stats['n_requests']}")
print(f" 总输入token: {stats['total_input_tokens']}")
print(f" 总输出token: {stats['total_output_tokens']}")
print(f" 总成本: ${stats['total_cost']:.2f}")
print(f" 单次成本: ${stats['cost_per_request']:.4f}")
4.3 Token优化技巧
1. 减少系统提示长度
# 冗长的系统提示
verbose_system = """
You are a helpful, respectful and honest assistant.
Always answer as helpfully as possible, while being safe.
Your answers should not include any harmful, unethical, racist,
sexist, toxic, dangerous, or illegal content.
Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent,
explain why instead of answering something not correct.
If you don't know the answer to a question, please don't share false information.
"""
# 简洁的系统提示
concise_system = "You are a helpful assistant."
print(f"冗长提示: {count_tokens_accurate(verbose_system)} tokens")
print(f"简洁提示: {count_tokens_accurate(concise_system)} tokens")
print(f"节省: {count_tokens_accurate(verbose_system) - count_tokens_accurate(concise_system)} tokens")
2. 使用更短的示例
# Few-shot学习:冗长示例
verbose_examples = """
Input: The movie was absolutely fantastic! I loved every minute of it.
The acting was superb and the story was engaging.
Output: Positive
Input: This is the worst product I have ever purchased. It broke after one day.
I am extremely disappointed and would not recommend it to anyone.
Output: Negative
Input: The restaurant was okay. Nothing special but nothing terrible either.
Output: Neutral
"""
# Few-shot学习:简洁示例
concise_examples = """
"Fantastic movie!" -> Positive
"Worst product ever." -> Negative
"Restaurant was okay." -> Neutral
"""
print(f"冗长示例: {count_tokens_accurate(verbose_examples)} tokens")
print(f"简洁示例: {count_tokens_accurate(concise_examples)} tokens")
print(f"节省: {count_tokens_accurate(verbose_examples) - count_tokens_accurate(concise_examples)} tokens")
3. 截断长文本
def truncate_to_token_limit(text, max_tokens=1000, model='gpt-4'):
"""
截断文本到指定token数
Args:
text: 输入文本
max_tokens: 最大token数
model: 模型名称
Returns:
截断后的文本
"""
encoding = tiktoken.encoding_for_model(model)
tokens = encoding.encode(text)
if len(tokens) <= max_tokens:
return text
# 截断并解码
truncated_tokens = tokens[:max_tokens]
return encoding.decode(truncated_tokens)
# 示例
long_text = "Lorem ipsum " * 500 # 很长的文本
truncated = truncate_to_token_limit(long_text, max_tokens=100)
print(f"原始长度: {count_tokens_accurate(long_text)} tokens")
print(f"截断后: {count_tokens_accurate(truncated)} tokens")
4. 智能摘要
def smart_summarize_for_context(text, max_tokens=500, model='gpt-4'):
"""
智能摘要:如果文本过长,则先摘要再使用
Args:
text: 输入文本
max_tokens: 最大token数
model: 模型名称
Returns:
处理后的文本
"""
current_tokens = count_tokens_accurate(text, model)
if current_tokens <= max_tokens:
return text
# 需要摘要
print(f"文本过长({current_tokens} tokens),正在摘要...")
# 调用API进行摘要(伪代码)
summary_prompt = f"Summarize the following text in {max_tokens} tokens:\n\n{text}"
# summary = call_api(summary_prompt)
# 这里返回截断版本作为示例
return truncate_to_token_limit(text, max_tokens, model)
5. 缓存常用上下文
class ContextCache:
"""缓存常用的上下文,避免重复计算token"""
def __init__(self):
self.cache = {}
def get_or_encode(self, text, model='gpt-4'):
"""获取或编码文本"""
key = (text, model)
if key not in self.cache:
encoding = tiktoken.encoding_for_model(model)
self.cache[key] = {
'tokens': encoding.encode(text),
'n_tokens': len(encoding.encode(text)),
}
return self.cache[key]
def get_token_count(self, text, model='gpt-4'):
"""快速获取token数"""
return self.get_or_encode(text, model)['n_tokens']
# 使用
cache = ContextCache()
# 第一次:需要编码
system_prompt = "You are a helpful assistant."
n_tokens = cache.get_token_count(system_prompt)
print(f"Token count: {n_tokens}")
# 第二次:直接从缓存获取
n_tokens = cache.get_token_count(system_prompt)
print(f"Token count (cached): {n_tokens}")
5. 多语言Tokenization
5.1 中文分词挑战
问题
中文没有明显的词边界(空格),给tokenization带来挑战。
# 英文:天然有空格分隔
text_en = "I love natural language processing"
# 词: ["I", "love", "natural", "language", "processing"]
# 中文:没有空格
text_zh = "我喜欢自然语言处理"
# 字符: ["我", "喜", "欢", "自", "然", "语", "言", "处", "理"]
# 词: ["我", "喜欢", "自然", "语言", "处理"] # 需要分词
中文分词工具
# jieba分词
import jieba
text = "我喜欢自然语言处理和深度学习"
# 精确模式
words = jieba.cut(text, cut_all=False)
print(f"精确模式: {list(words)}")
# 输出: ['我', '喜欢', '自然语言', '处理', '和', '深度', '学习']
# 全模式
words = jieba.cut(text, cut_all=True)
print(f"全模式: {list(words)}")
# 输出: ['我', '喜欢', '自然', '自然语言', '语言', '处理', '和', '深度', '学习']
# 搜索引擎模式
words = jieba.cut_for_search(text)
print(f"搜索模式: {list(words)}")
BPE在中文上的表现
from transformers import BertTokenizer, GPT2Tokenizer
text = "我喜欢自然语言处理"
# BERT (中文优化)
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
bert_tokens = bert_tokenizer.tokenize(text)
print(f"BERT: {bert_tokens}")
# 输出: ['我', '喜', '欢', '自', '然', '语', '言', '处', '理']
# 字符级别!
# GPT-2 (英文训练)
gpt2_tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
gpt2_tokens = gpt2_tokenizer.tokenize(text)
print(f"GPT-2: {gpt2_tokens}")
# 输出: 很多碎片化的token(效率很低)
print(f"BERT token数: {len(bert_tokens)}")
print(f"GPT-2 token数: {len(gpt2_tokens)}")
5.2 Unicode处理
Unicode基础
# Unicode编码
text = "Hello 世界 🌍"
# 查看编码
for char in text:
print(f"{char}: U+{ord(char):04X} ({ord(char)})")
# 输出:
# H: U+0048 (72)
# e: U+0065 (101)
# l: U+006C (108)
# ...
# 世: U+4E16 (19990)
# 界: U+754C (30028)
# 🌍: U+1F30D (127757)
处理特殊字符
def analyze_text_encoding(text):
"""分析文本的编码特征"""
# 字节长度
byte_length = len(text.encode('utf-8'))
# 字符分类
ascii_chars = sum(1 for c in text if ord(c) < 128)
cjk_chars = sum(1 for c in text if 0x4E00 <= ord(c) <= 0x9FFF)
emoji_chars = sum(1 for c in text if ord(c) > 0x1F000)
return {
'text': text,
'char_length': len(text),
'byte_length': byte_length,
'ascii_chars': ascii_chars,
'cjk_chars': cjk_chars,
'emoji_chars': emoji_chars,
}
# 测试
texts = [
"Hello",
"你好",
"Hello 你好",
"Hello 你好 😊",
]
for text in texts:
info = analyze_text_encoding(text)
print(f"'{info['text']}':")
print(f" 字符数: {info['char_length']}")
print(f" 字节数: {info['byte_length']}")
print(f" ASCII: {info['ascii_chars']}, CJK: {info['cjk_chars']}, Emoji: {info['emoji_chars']}")
print()
Byte-level BPE的优势
# Byte-level BPE可以处理任何字符
from transformers import GPT2Tokenizer
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
# 包含各种字符
text = "Hello 世界 🌍 Привет مرحبا"
# 编码
tokens = tokenizer.encode(text)
print(f"Token数: {len(tokens)}")
# 解码(完美还原)
decoded = tokenizer.decode(tokens)
print(f"解码: {decoded}")
assert text == decoded.strip()
# Byte-level BPE将所有字符映射到256个字节
print(f"词表大小: {len(tokenizer)}") # 50257
5.3 不同语言的Token效率
对比实验
from transformers import AutoTokenizer
# 加载不同模型的tokenizer
tokenizers = {
'GPT-2 (英文)': AutoTokenizer.from_pretrained('gpt2'),
'GPT-2 (多语言)': AutoTokenizer.from_pretrained('xlm-roberta-base'),
'BERT (中文)': AutoTokenizer.from_pretrained('bert-base-chinese'),
}
# 测试文本(同样的意思)
texts = {
'English': "The quick brown fox jumps over the lazy dog.",
'Chinese': "敏捷的棕色狐狸跳过懒狗。",
'Spanish': "El rápido zorro marrón salta sobre el perro perezoso.",
'German': "Der schnelle braune Fuchs springt über den faulen Hund.",
'Japanese': "素早い茶色のキツネが怠け者の犬を飛び越える。",
'Arabic': "الثعلب البني السريع يقفز فوق الكلب الكسول.",
}
# 统计
print(f"{'语言':<10} {'字符数':<8} ", end='')
for name in tokenizers.keys():
print(f"{name:<20} ", end='')
print()
print("-" * 80)
for lang, text in texts.items():
print(f"{lang:<10} {len(text):<8} ", end='')
for name, tokenizer in tokenizers.items():
tokens = tokenizer.encode(text)
n_tokens = len(tokens)
efficiency = len(text) / n_tokens
print(f"{n_tokens:>3}t ({efficiency:.2f}c/t) ", end='')
print()
典型结果:
语言 字符数 GPT-2 (英文) GPT-2 (多语言) BERT (中文)
-------------------------------------------------------------------------------
English 45 11t (4.09c/t) 12t (3.75c/t) 15t (3.00c/t)
Chinese 14 28t (0.50c/t) 10t (1.40c/t) 14t (1.00c/t)
Spanish 54 16t (3.38c/t) 14t (3.86c/t) 20t (2.70c/t)
German 54 17t (3.18c/t) 15t (3.60c/t) 22t (2.45c/t)
Japanese 21 42t (0.50c/t) 15t (1.40c/t) 21t (1.00c/t)
Arabic 42 35t (1.20c/t) 18t (2.33c/t) 25t (1.68c/t)
结论:
- 英文模型对英文最高效(~4 chars/token)
- 中文/日文在英文模型上效率很低(~0.5 chars/token)
- 多语言模型平衡各语言(~2-3 chars/token)
- 专门的中文模型对中文最优
提高中文Token效率
# 方法1:使用中文优化的模型
tokenizers_chinese = {
'BERT中文': AutoTokenizer.from_pretrained('bert-base-chinese'),
'ChatGLM': AutoTokenizer.from_pretrained('THUDM/chatglm-6b', trust_remote_code=True),
'Qwen': AutoTokenizer.from_pretrained('Qwen/Qwen-7B', trust_remote_code=True),
}
text_zh = "我喜欢自然语言处理和深度学习"
for name, tokenizer in tokenizers_chinese.items():
tokens = tokenizer.encode(text_zh)
print(f"{name}: {len(tokens)} tokens")
print(f" Tokens: {tokenizer.convert_ids_to_tokens(tokens)}")
print()
# 方法2:预分词
import jieba
def tokenize_chinese_with_segmentation(text, tokenizer):
"""先分词,再tokenize"""
words = jieba.cut(text)
text_segmented = ' '.join(words)
return tokenizer.encode(text_segmented)
# 对比
tokens_raw = tokenizers_chinese['BERT中文'].encode(text_zh)
tokens_seg = tokenize_chinese_with_segmentation(
text_zh,
tokenizers_chinese['BERT中文']
)
print(f"原始: {len(tokens_raw)} tokens")
print(f"预分词: {len(tokens_seg)} tokens")