1 instantiation of LlamaTokenizer
Microsoft.ML.Tokenizers (1)
Model\LlamaTokenizer.cs (1)
64return new LlamaTokenizer(modelProto, addBeginOfSentence, addEndOfSentence, specialTokens);
43 references to LlamaTokenizer
Microsoft.ML.GenAI.Core.Tests (1)
CasualLMDatasetTest.cs (1)
26return LlamaTokenizer.Create(remoteStream);
Microsoft.ML.GenAI.Mistral (4)
MistralTokenizerHelper.cs (4)
37public static LlamaTokenizer FromPretrained( 91public static LlamaTokenizer FromPretrained( 100var llamaTokenizer = LlamaTokenizer.Create(
Microsoft.ML.GenAI.Mistral.Tests (1)
Mistral_7B_Instruct_V0_3Tests.cs (1)
110var tokenizer = MistralTokenizerHelper.FromPretrained(modelWeightFolder);
Microsoft.ML.GenAI.Phi (3)
Phi3\Phi3TokenizerHelper.cs (3)
23public static LlamaTokenizer FromPretrained( 37var llamaTokenizer = LlamaTokenizer.Create(
Microsoft.ML.GenAI.Phi.Tests (1)
Phi3Tests.cs (1)
120var tokenizer = Phi3TokenizerHelper.FromPretrained(modelPath);
Microsoft.ML.GenAI.Samples (14)
MEAI\Phi3.cs (2)
29var tokenizer = Phi3TokenizerHelper.FromPretrained(tokenizerPath); 31var pipeline = new CausalLMPipeline<LlamaTokenizer, Phi3ForCasualLM>(tokenizer, model, device);
Mistral\Mistral_7B_Instruct.cs (6)
43var tokenizer = MistralTokenizerHelper.FromPretrained(originalWeightFolder); 46var pipeline = new CausalLMPipeline<LlamaTokenizer, MistralForCausalLM>(tokenizer, model, device); 74var tokenizer = MistralTokenizerHelper.FromPretrained(originalWeightFolder, modelName: "tokenizer.model"); 81var pipeline = new CausalLMPipeline<LlamaTokenizer, MistralModel>(tokenizer, model, device); 128var tokenizer = MistralTokenizerHelper.FromPretrained(originalWeightFolder); 131var pipeline = new CausalLMPipeline<LlamaTokenizer, MistralForCausalLM>(tokenizer, model, device);
Phi3Mini\AutoGenSample.cs (2)
31var tokenizer = Phi3TokenizerHelper.FromPretrained(tokenizerPath); 33var pipeline = new CausalLMPipeline<LlamaTokenizer, Phi3ForCasualLM>(tokenizer, model, device);
Phi3Mini\SemanticKernelSample.cs (4)
27var tokenizer = Phi3TokenizerHelper.FromPretrained(tokenizerPath); 29var pipeline = new CausalLMPipeline<LlamaTokenizer, Phi3ForCasualLM>(tokenizer, model, device); 58var tokenizer = Phi3TokenizerHelper.FromPretrained(tokenizerPath); 60var pipeline = new CausalLMPipeline<LlamaTokenizer, Phi3ForCasualLM>(tokenizer, model, device);
Microsoft.ML.Tokenizers (1)
Model\LlamaTokenizer.cs (1)
34public static LlamaTokenizer Create(
Microsoft.ML.Tokenizers.Tests (18)
LlamaTests.cs (18)
33return LlamaTokenizer.Create(remoteStream); 40return LlamaTokenizer.Create(remoteStream); 47LlamaTokenizer tokenizer = LlamaTokenizer.Create(remoteStream, addBeginOfSentence: true, addEndOfSentence: false, 241LlamaTokenizer bpe = (llamaTokenizer as LlamaTokenizer)!; 298private void TestDecodingWithSpan(LlamaTokenizer tokenizer, int[] ids, string expectedDecoded) 354LlamaTokenizer? bpe = llamaTokenizer as LlamaTokenizer; 384public void TestDecodeSpecialTokenWithSmallId(LlamaTokenizer llamaTokenizer) 433normalizer = new SentencePieceNormalizer(removeExtraWhiteSpaces: false, addDummyPrefix: true, escapeWhiteSpaces: true, treatWhitespaceAsSuffix: false, specialTokens: (_llamaPhi3Tokenizer as LlamaTokenizer)!.SpecialTokens); 441normalizer = new SentencePieceNormalizer(removeExtraWhiteSpaces: false, addDummyPrefix: true, escapeWhiteSpaces: true, treatWhitespaceAsSuffix: true, specialTokens: (_llamaPhi3Tokenizer as LlamaTokenizer)!.SpecialTokens); 653LlamaTokenizer tokenizer = (_llamaPhi3Tokenizer as LlamaTokenizer)!; 834LlamaTokenizer tokenizer = (_llamaPhi3Tokenizer as LlamaTokenizer)!; 872LlamaTokenizer tokenizerWithSuffix = (_llamaPhi3TokenizerWithTreatSpaceSuffix as LlamaTokenizer)!;