CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
huggingface

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: huggingface/notebooks
Path: blob/main/course/zh-CN/chapter6/section8.ipynb
Views: 2555
Kernel: Unknown Kernel

逐块地构建标记器

Install the Transformers, Datasets, and Evaluate libraries to run this notebook.

!pip install datasets evaluate transformers[sentencepiece]
from datasets import load_dataset dataset = load_dataset("wikitext", name="wikitext-2-raw-v1", split="train") def get_training_corpus(): for i in range(0, len(dataset), 1000): yield dataset[i : i + 1000]["text"]
with open("wikitext-2.txt", "w", encoding="utf-8") as f: for i in range(len(dataset)): f.write(dataset[i]["text"] + "\n")
from tokenizers import ( decoders, models, normalizers, pre_tokenizers, processors, trainers, Tokenizer, ) tokenizer = Tokenizer(models.WordPiece(unk_token="[UNK]"))
tokenizer.normalizer = normalizers.BertNormalizer(lowercase=True)
tokenizer.normalizer = normalizers.Sequence( [normalizers.NFD(), normalizers.Lowercase(), normalizers.StripAccents()] )
print(tokenizer.normalizer.normalize_str("Héllò hôw are ü?"))
hello how are u?
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()
tokenizer.pre_tokenizer.pre_tokenize_str("Let's test my pre-tokenizer.")
[('Let', (0, 3)), ("'", (3, 4)), ('s', (4, 5)), ('test', (6, 10)), ('my', (11, 13)), ('pre', (14, 17)), ('-', (17, 18)), ('tokenizer', (18, 27)), ('.', (27, 28))]
pre_tokenizer = pre_tokenizers.WhitespaceSplit() pre_tokenizer.pre_tokenize_str("Let's test my pre-tokenizer.")
[("Let's", (0, 5)), ('test', (6, 10)), ('my', (11, 13)), ('pre-tokenizer.', (14, 28))]
pre_tokenizer = pre_tokenizers.Sequence( [pre_tokenizers.WhitespaceSplit(), pre_tokenizers.Punctuation()] ) pre_tokenizer.pre_tokenize_str("Let's test my pre-tokenizer.")
[('Let', (0, 3)), ("'", (3, 4)), ('s', (4, 5)), ('test', (6, 10)), ('my', (11, 13)), ('pre', (14, 17)), ('-', (17, 18)), ('tokenizer', (18, 27)), ('.', (27, 28))]
special_tokens = ["[UNK]", "[PAD]", "[CLS]", "[SEP]", "[MASK]"] trainer = trainers.WordPieceTrainer(vocab_size=25000, special_tokens=special_tokens)
tokenizer.train_from_iterator(get_training_corpus(), trainer=trainer)
tokenizer.model = models.WordPiece(unk_token="[UNK]") tokenizer.train(["wikitext-2.txt"], trainer=trainer)
encoding = tokenizer.encode("Let's test this tokenizer.") print(encoding.tokens)
['let', "'", 's', 'test', 'this', 'tok', '##eni', '##zer', '.']
cls_token_id = tokenizer.token_to_id("[CLS]") sep_token_id = tokenizer.token_to_id("[SEP]") print(cls_token_id, sep_token_id)
(2, 3)
tokenizer.post_processor = processors.TemplateProcessing( single=f"[CLS]:0 $A:0 [SEP]:0", pair=f"[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[("[CLS]", cls_token_id), ("[SEP]", sep_token_id)], )
encoding = tokenizer.encode("Let's test this tokenizer.") print(encoding.tokens)
['[CLS]', 'let', "'", 's', 'test', 'this', 'tok', '##eni', '##zer', '.', '[SEP]']
encoding = tokenizer.encode("Let's test this tokenizer...", "on a pair of sentences.") print(encoding.tokens) print(encoding.type_ids)
['[CLS]', 'let', "'", 's', 'test', 'this', 'tok', '##eni', '##zer', '...', '[SEP]', 'on', 'a', 'pair', 'of', 'sentences', '.', '[SEP]'] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
tokenizer.decoder = decoders.WordPiece(prefix="##")
tokenizer.decode(encoding.ids)
"let's test this tokenizer... on a pair of sentences."
tokenizer.save("tokenizer.json")
new_tokenizer = Tokenizer.from_file("tokenizer.json")
from transformers import PreTrainedTokenizerFast wrapped_tokenizer = PreTrainedTokenizerFast( tokenizer_object=tokenizer, # tokenizer_file="tokenizer.json", # You can load from the tokenizer file, alternatively unk_token="[UNK]", pad_token="[PAD]", cls_token="[CLS]", sep_token="[SEP]", mask_token="[MASK]", )
from transformers import BertTokenizerFast wrapped_tokenizer = BertTokenizerFast(tokenizer_object=tokenizer)
tokenizer = Tokenizer(models.BPE())
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
tokenizer.pre_tokenizer.pre_tokenize_str("Let's test pre-tokenization!")
[('Let', (0, 3)), ("'s", (3, 5)), ('Ġtest', (5, 10)), ('Ġpre', (10, 14)), ('-', (14, 15)), ('tokenization', (15, 27)), ('!', (27, 28))]
trainer = trainers.BpeTrainer(vocab_size=25000, special_tokens=["<|endoftext|>"]) tokenizer.train_from_iterator(get_training_corpus(), trainer=trainer)
tokenizer.model = models.BPE() tokenizer.train(["wikitext-2.txt"], trainer=trainer)
encoding = tokenizer.encode("Let's test this tokenizer.") print(encoding.tokens)
['L', 'et', "'", 's', 'Ġtest', 'Ġthis', 'Ġto', 'ken', 'izer', '.']
tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
sentence = "Let's test this tokenizer." encoding = tokenizer.encode(sentence) start, end = encoding.offsets[4] sentence[start:end]
' test'
tokenizer.decoder = decoders.ByteLevel()
tokenizer.decode(encoding.ids)
"Let's test this tokenizer."
from transformers import PreTrainedTokenizerFast wrapped_tokenizer = PreTrainedTokenizerFast( tokenizer_object=tokenizer, bos_token="<|endoftext|>", eos_token="<|endoftext|>", )
from transformers import GPT2TokenizerFast wrapped_tokenizer = GPT2TokenizerFast(tokenizer_object=tokenizer)
tokenizer = Tokenizer(models.Unigram())
from tokenizers import Regex tokenizer.normalizer = normalizers.Sequence( [ normalizers.Replace("``", '"'), normalizers.Replace("''", '"'), normalizers.NFKD(), normalizers.StripAccents(), normalizers.Replace(Regex(" {2,}"), " "), ] )
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace()
tokenizer.pre_tokenizer.pre_tokenize_str("Let's test the pre-tokenizer!")
[("▁Let's", (0, 5)), ('▁test', (5, 10)), ('▁the', (10, 14)), ('▁pre-tokenizer!', (14, 29))]
special_tokens = ["<cls>", "<sep>", "<unk>", "<pad>", "<mask>", "<s>", "</s>"] trainer = trainers.UnigramTrainer( vocab_size=25000, special_tokens=special_tokens, unk_token="<unk>" ) tokenizer.train_from_iterator(get_training_corpus(), trainer=trainer)
tokenizer.model = models.Unigram() tokenizer.train(["wikitext-2.txt"], trainer=trainer)
encoding = tokenizer.encode("Let's test this tokenizer.") print(encoding.tokens)
['▁Let', "'", 's', '▁test', '▁this', '▁to', 'ken', 'izer', '.']
cls_token_id = tokenizer.token_to_id("<cls>") sep_token_id = tokenizer.token_to_id("<sep>") print(cls_token_id, sep_token_id)
0 1
tokenizer.post_processor = processors.TemplateProcessing( single="$A:0 <sep>:0 <cls>:2", pair="$A:0 <sep>:0 $B:1 <sep>:1 <cls>:2", special_tokens=[("<sep>", sep_token_id), ("<cls>", cls_token_id)], )
encoding = tokenizer.encode("Let's test this tokenizer...", "on a pair of sentences!") print(encoding.tokens) print(encoding.type_ids)
['▁Let', "'", 's', '▁test', '▁this', '▁to', 'ken', 'izer', '.', '.', '.', '<sep>', '▁', 'on', '▁', 'a', '▁pair', '▁of', '▁sentence', 's', '!', '<sep>', '<cls>'] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2]
tokenizer.decoder = decoders.Metaspace()
from transformers import PreTrainedTokenizerFast wrapped_tokenizer = PreTrainedTokenizerFast( tokenizer_object=tokenizer, bos_token="<s>", eos_token="</s>", unk_token="<unk>", pad_token="<pad>", cls_token="<cls>", sep_token="<sep>", mask_token="<mask>", padding_side="left", )
from transformers import XLNetTokenizerFast wrapped_tokenizer = XLNetTokenizerFast(tokenizer_object=tokenizer)