Use BF16 for llama v3 by default.

This commit is contained in:
laurent 2024-04-19 14:22:01 +02:00
parent 9c532aef47
commit 6d6d87f8b3
1 changed files with 4 additions and 1 deletions

View File

@ -120,7 +120,10 @@ fn main() -> Result<()> {
Some("bf16") => DType::BF16,
Some("f32") => DType::F32,
Some(dtype) => bail!("Unsupported dtype {dtype}"),
None => DType::F16,
None => match args.which {
Which::V3 | Which::V3Instruct => DType::BF16,
Which::V1 | Which::V2 | Which::Solar10_7B | Which::TinyLlama1_1BChat => DType::F16,
},
};
let (llama, tokenizer_filename, mut cache, config) = {
let api = Api::new()?;