Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/main/course/fr/chapter6/section6.ipynb
Views: 2555
Kernel: Python 3
WordPiece tokenization
Aucun modèle en français utilise WordPiece. Nous utilisons ici CamemBERT utilise SentencePiece.
Installez les bibliothèques 🤗 Transformers et 🤗 Datasets pour exécuter ce notebook.
In [ ]:
!pip install datasets transformers[sentencepiece]
In [ ]:
corpus = [ "C'est le cours d'Hugging Face.", "Ce chapitre traite de la tokenisation.", "Cette section présente plusieurs algorithmes de tokenizer.", "Avec un peu de chance, vous serez en mesure de comprendre comment ils sont entraînés et génèrent des tokens.", ]
In [ ]:
from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("camembert-base")
In [ ]:
from collections import defaultdict word_freqs = defaultdict(int) for text in corpus: words_with_offsets = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(text) new_words = [word for word, offset in words_with_offsets] for word in new_words: word_freqs[word] += 1 word_freqs
In [ ]:
alphabet = [] for word in word_freqs.keys(): if word[0] not in alphabet: alphabet.append(word[0]) for letter in word[1:]: if f"##{letter}" not in alphabet: alphabet.append(f"##{letter}") alphabet.sort() alphabet print(alphabet)
In [ ]:
vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] + alphabet.copy()
In [ ]:
splits = { word: [c if i == 0 else f"##{c}" for i, c in enumerate(word)] for word in word_freqs.keys() }
In [ ]:
def compute_pair_scores(splits): letter_freqs = defaultdict(int) pair_freqs = defaultdict(int) for word, freq in word_freqs.items(): split = splits[word] if len(split) == 1: letter_freqs[split[0]] += freq continue for i in range(len(split) - 1): pair = (split[i], split[i + 1]) letter_freqs[split[i]] += freq pair_freqs[pair] += freq letter_freqs[split[-1]] += freq scores = { pair: freq / (letter_freqs[pair[0]] * letter_freqs[pair[1]]) for pair, freq in pair_freqs.items() } return scores
In [ ]:
pair_scores = compute_pair_scores(splits) for i, key in enumerate(pair_scores.keys()): print(f"{key}: {pair_scores[key]}") if i >= 5: break
In [ ]:
best_pair = "" max_score = None for pair, score in pair_scores.items(): if max_score is None or max_score < score: best_pair = pair max_score = score print(best_pair, max_score)
In [ ]:
vocab.append("ab")
In [ ]:
def merge_pair(a, b, splits): for word in word_freqs: split = splits[word] if len(split) == 1: continue i = 0 while i < len(split) - 1: if split[i] == a and split[i + 1] == b: merge = a + b[2:] if b.startswith("##") else a + b split = split[:i] + [merge] + split[i + 2 :] else: i += 1 splits[word] = split return splits
In [ ]:
splits = merge_pair("a", "##b", splits) splits
In [ ]:
vocab_size = 70 while len(vocab) < vocab_size: scores = compute_pair_scores(splits) best_pair, max_score = "", None for pair, score in scores.items(): if max_score is None or max_score < score: best_pair = pair max_score = score splits = merge_pair(*best_pair, splits) new_token = ( best_pair[0] + best_pair[1][2:] if best_pair[1].startswith("##") else best_pair[0] + best_pair[1] ) vocab.append(new_token)
In [ ]:
print(vocab)
In [ ]:
def encode_word(word): tokens = [] while len(word) > 0: i = len(word) while i > 0 and word[:i] not in vocab: i -= 1 if i == 0: return ["[UNK]"] tokens.append(word[:i]) word = word[i:] if len(word) > 0: word = f"##{word}" return tokens
In [ ]:
print(encode_word("Hugging")) print(encode_word("HOgging"))
In [ ]:
def tokenize(text): pre_tokenize_result = tokenizer._tokenizer.pre_tokenizer.pre_tokenize_str(text) pre_tokenized_text = [word for word, offset in pre_tokenize_result] encoded_words = [encode_word(word) for word in pre_tokenized_text] return sum(encoded_words, [])
In [ ]:
tokenize("C'est le cours d'Hugging Face !")