8 instantiations of LlamaForCausalLM
Microsoft.ML.GenAI.LLaMA (3)
LlamaForCausalLM.cs (3)
105var model = new LlamaForCausalLM(modelConfig); 138var model = new LlamaForCausalLM(modelConfig); 156model = new LlamaForCausalLM(modelConfig);
Microsoft.ML.GenAI.LLaMA.Tests (5)
LLaMA3_1Tests.cs (3)
37var model = new LlamaForCausalLM(LlamaConfig.Llama3_1_8B_Instruct, "meta"); 47var model = new LlamaForCausalLM(LlamaConfig.Llama3_1_70B_Instruct, "meta"); 57var model = new LlamaForCausalLM(LlamaConfig.Llama3_1_405B_Instruct, "meta");
LLaMA3_2Tests.cs (2)
32var model = new LlamaForCausalLM(LlamaConfig.Llama3_2_1B_Instruct); 42var model = new LlamaForCausalLM(LlamaConfig.Llama_3_2_3B_Instruct);
29 references to LlamaForCausalLM
Microsoft.ML.GenAI.LLaMA (13)
Llama3CausalLMChatClient.cs (2)
12public class Llama3CausalLMChatClient : CausalLMPipelineChatClient<Tokenizer, LlamaForCausalLM> 17ICausalLMPipeline<Tokenizer, LlamaForCausalLM> pipeline,
LlamaCausalLMAgent.cs (2)
15private readonly ICausalLMPipeline<Tokenizer, LlamaForCausalLM> _pipeline; 27ICausalLMPipeline<Tokenizer, LlamaForCausalLM> pipeline,
LlamaChatCompletionService.cs (2)
15private readonly ICausalLMPipeline<Tokenizer, LlamaForCausalLM> _pipeline; 24public LlamaChatCompletionService(ICausalLMPipeline<Tokenizer, LlamaForCausalLM> pipeline, ISemanticKernelChatTemplateBuilder? templateBuilder = null)
LlamaForCausalLM.cs (5)
28: base(nameof(LlamaForCausalLM)) 95public static LlamaForCausalLM FromPretrained( 105var model = new LlamaForCausalLM(modelConfig); 118public static LlamaForCausalLM FromPretrained( 138var model = new LlamaForCausalLM(modelConfig);
LlamaTextCompletionService.cs (2)
20private readonly ICausalLMPipeline<Tokenizer, LlamaForCausalLM> _pipeline; 22public LlamaTextCompletionService(ICausalLMPipeline<Tokenizer, LlamaForCausalLM> pipeline)
Microsoft.ML.GenAI.LLaMA.Tests (5)
LLaMA3_1Tests.cs (3)
37var model = new LlamaForCausalLM(LlamaConfig.Llama3_1_8B_Instruct, "meta"); 47var model = new LlamaForCausalLM(LlamaConfig.Llama3_1_70B_Instruct, "meta"); 57var model = new LlamaForCausalLM(LlamaConfig.Llama3_1_405B_Instruct, "meta");
LLaMA3_2Tests.cs (2)
32var model = new LlamaForCausalLM(LlamaConfig.Llama3_2_1B_Instruct); 42var model = new LlamaForCausalLM(LlamaConfig.Llama_3_2_3B_Instruct);
Microsoft.ML.GenAI.Samples (11)
Llama\LlamaSample.cs (3)
37var model = LlamaForCausalLM.FromPretrained(weightFolder, configName, checkPointName: checkPointName, layersOnTargetDevice: 26, quantizeToInt8: true); 39var pipeline = new CausalLMPipeline<TiktokenTokenizer, LlamaForCausalLM>(tokenizer, model, device);
Llama\SFT_Llama_3_2_1B.cs (5)
63if (p is not ICausalLMPipeline<Tokenizer, LlamaForCausalLM> llamaPipeline) 81public static ICausalLMPipeline<TiktokenTokenizer, LlamaForCausalLM> LoadModel(string weightFolder, string checkPointName = "model.safetensors.index.json") 92var model = LlamaForCausalLM.FromPretrained(weightFolder, configName, checkPointName: checkPointName, layersOnTargetDevice: -1, quantizeToInt8: false); 94var pipeline = new CausalLMPipeline<TiktokenTokenizer, LlamaForCausalLM>(tokenizer, model, device);
MEAI\Llama3_1.cs (3)
38var model = LlamaForCausalLM.FromPretrained(weightFolder, configName, checkPointName: checkPointName, layersOnTargetDevice: 26, quantizeToInt8: true); 40var pipeline = new CausalLMPipeline<TiktokenTokenizer, LlamaForCausalLM>(tokenizer, model, device);