diff --git a/llama/bridge/LlamaPredictOperation.mm b/llama/bridge/LlamaPredictOperation.mm index f3955d7..8c67836 100644 --- a/llama/bridge/LlamaPredictOperation.mm +++ b/llama/bridge/LlamaPredictOperation.mm @@ -89,8 +89,6 @@ struct llama_model { // load the model's weights from a file bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab & vocab, int n_ctx) { - printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str()); - auto fin = std::ifstream(fname, std::ios::binary); if (!fin) { fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); @@ -127,17 +125,6 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult; n_parts = LLAMA_N_PARTS.at(hparams.n_embd); - - printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); - printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); - printf("%s: n_embd = %d\n", __func__, hparams.n_embd); - printf("%s: n_mult = %d\n", __func__, hparams.n_mult); - printf("%s: n_head = %d\n", __func__, hparams.n_head); - printf("%s: n_layer = %d\n", __func__, hparams.n_layer); - printf("%s: n_rot = %d\n", __func__, hparams.n_rot); - printf("%s: f16 = %d\n", __func__, hparams.f16); - printf("%s: n_ff = %d\n", __func__, n_ff); - printf("%s: n_parts = %d\n", __func__, n_parts); } // load vocab @@ -220,8 +207,6 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v ctx_size += (5 + 10*n_layer)*256; // object overhead - - printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); } // create the ggml context @@ -307,8 +292,6 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); - - printf("%s: memory_size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem); } const size_t file_offset = fin.tellg(); @@ -326,8 +309,6 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab fname_part += "." + std::to_string(i); } - printf("%s: loading model part %d/%d from '%s'\n", __func__, i+1, n_parts, fname_part.c_str()); - fin = std::ifstream(fname_part, std::ios::binary); fin.seekg(file_offset); @@ -336,8 +317,6 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab int n_tensors = 0; size_t total_size = 0; - printf("%s: ", __func__); - while (true) { int32_t n_dims; int32_t length; @@ -436,7 +415,6 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab if (0) { static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; - printf("%24s - [%5d, %5d], type = %6s, split = %d\n", name.data(), ne[0], ne[1], ftype_str[ftype], split_type); } size_t bpe = 0; @@ -498,17 +476,7 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab total_size += ggml_nbytes(tensor)/n_parts; } - - //printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); - if (++n_tensors % 8 == 0) { - printf("."); - fflush(stdout); - } } - - printf(" done\n"); - - printf("%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, n_tensors); } fin.close(); @@ -794,8 +762,6 @@ NSError *makeLlamaError(LlamaErrorCode errorCode, NSString *description) ggml_time_init(); const int64_t t_main_start_us = ggml_time_us(); - printf("%s: seed = %d\n", __func__, _params.seed); - std::mt19937 rng(_params.seed); if (_params.prompt.empty()) { _params.prompt = gpt_random_prompt(rng); @@ -841,16 +807,6 @@ NSError *makeLlamaError(LlamaErrorCode errorCode, NSString *description) // tokenize the reverse prompt std::vector antiprompt_inp = ::llama_tokenize(vocab, _params.antiprompt, false); - printf("\n"); - printf("%s: prompt: '%s'\n", __func__, _params.prompt.c_str()); - printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); - for (int i = 0; i < (int) embd_inp.size(); i++) { - printf("%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).c_str()); - } - printf("\n"); - printf("sampling parameters: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", _params.temp, _params.top_k, _params.top_p, _params.repeat_last_n, _params.repeat_penalty); - printf("\n\n"); - std::vector embd; // determine the required inference memory per token: @@ -926,28 +882,10 @@ NSError *makeLlamaError(LlamaErrorCode errorCode, NSString *description) NSString *token = [[NSString alloc] initWithCString:vocab.id_to_token[id].c_str() encoding:NSUTF8StringEncoding]; [self postEvent:[_LlamaEvent outputTokenWithToken:token]]; } - - // end of text token - if (embd.back() == 2) { - printf(" [end of text]\n"); - break; - } } [self postEvent:[_LlamaEvent completed]]; - // report timing - { - const int64_t t_main_end_us = ggml_time_us(); - - printf("\n\n"); - printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token); - printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); - printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); - printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); - printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); - } - ggml_free(model.ctx); }