Skip to content

Commit 366e0c8

Browse files
committed
Fix indent model sizes
1 parent cf8375c commit 366e0c8

File tree

1 file changed

+14
-14
lines changed

1 file changed

+14
-14
lines changed

src/llama.cpp

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -6982,13 +6982,13 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
69826982
// LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, ml.n_elements*1e-3);
69836983
}
69846984

6985-
LLAMA_LOG_INFO("%s: model size - %.2f Bytes (%.3f BPW) Total \n", __func__, ml.n_bytes/1.0, ml.n_bytes*8.0/ml.n_elements);
6986-
LLAMA_LOG_INFO("%s: model size - %.2f KB (%.3f BPW) Total \n", __func__, ml.n_bytes/1000.0, ml.n_bytes*8.0/ml.n_elements);
6987-
LLAMA_LOG_INFO("%s: model size - %.2f KiB (%.3f BPW) Total \n", __func__, ml.n_bytes/1024.0, ml.n_bytes*8.0/ml.n_elements);
6988-
LLAMA_LOG_INFO("%s: model size - %.2f MB (%.3f BPW) Total \n", __func__, ml.n_bytes/1000.0/1000.0 , ml.n_bytes*8.0/ml.n_elements);
6989-
LLAMA_LOG_INFO("%s: model size - %.2f MiB (%.3f BPW) Total \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
6990-
LLAMA_LOG_INFO("%s: model size - %.2f GB (%.3f BPW) Total \n", __func__, ml.n_bytes/1000.0/1000.0/1000.0, ml.n_bytes*8.0/ml.n_elements);
6991-
LLAMA_LOG_INFO("%s: model size - %.2f GiB (%.3f BPW) Total \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
6985+
LLAMA_LOG_INFO("%s: model size = %.2f Bytes (%.3f BPW) \n", __func__, ml.n_bytes/1.0, ml.n_bytes*8.0/ml.n_elements);
6986+
LLAMA_LOG_INFO("%s: model size = %.2f KB (%.3f BPW) \n", __func__, ml.n_bytes/1000.0, ml.n_bytes*8.0/ml.n_elements);
6987+
LLAMA_LOG_INFO("%s: model size = %.2f KiB (%.3f BPW) \n", __func__, ml.n_bytes/1024.0, ml.n_bytes*8.0/ml.n_elements);
6988+
LLAMA_LOG_INFO("%s: model size = %.2f MB (%.3f BPW) \n", __func__, ml.n_bytes/1000.0/1000.0, ml.n_bytes*8.0/ml.n_elements);
6989+
LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.3f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
6990+
LLAMA_LOG_INFO("%s: model size = %.2f GB (%.3f BPW) \n", __func__, ml.n_bytes/1000.0/1000.0/1000.0, ml.n_bytes*8.0/ml.n_elements);
6991+
LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.3f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
69926992

69936993
// if (ml.n_bytes < GiB) {
69946994
// LLAMA_LOG_INFO("%s: model size = %.3f MiB (%.3f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
@@ -7007,13 +7007,13 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
70077007
n_bytes -= ggml_nbytes(meta_out);
70087008
n_elements -= ggml_nelements(meta_out);
70097009

7010-
LLAMA_LOG_INFO("%s: repeating layers - %.2f Bytes (%.3f BPW) RL \n", __func__, n_bytes/1.0, n_bytes*8.0/n_elements);
7011-
LLAMA_LOG_INFO("%s: repeating layers - %.2f KB (%.3f BPW) RL \n", __func__, n_bytes/1000.0, n_bytes*8.0/n_elements);
7012-
LLAMA_LOG_INFO("%s: repeating layers - %.2f KiB (%.3f BPW) RL \n", __func__, n_bytes/1024.0, n_bytes*8.0/n_elements);
7013-
LLAMA_LOG_INFO("%s: repeating layers - %.2f MB (%.3f BPW) RL \n", __func__, n_bytes/1000.0/1000.0, n_bytes*8.0/n_elements);
7014-
LLAMA_LOG_INFO("%s: repeating layers - %.2f MiB (%.3f BPW) RL \n", __func__, n_bytes/1024.0/1024.0, n_bytes*8.0/n_elements);
7015-
LLAMA_LOG_INFO("%s: repeating layers - %.2f GB (%.3f BPW) RL \n", __func__, n_bytes/1000.0/1000.0/1000.0, n_bytes*8.0/n_elements);
7016-
LLAMA_LOG_INFO("%s: repeating layers - %.2f GiB (%.3f BPW) RL \n", __func__, n_bytes/1024.0/1024.0/1024.0, n_bytes*8.0/n_elements);
7010+
LLAMA_LOG_INFO("%s: repeating layers = %.2f Bytes (%.3f BPW) \n", __func__, n_bytes/1.0, n_bytes*8.0/n_elements);
7011+
LLAMA_LOG_INFO("%s: repeating layers = %.2f KB (%.3f BPW) \n", __func__, n_bytes/1000.0, n_bytes*8.0/n_elements);
7012+
LLAMA_LOG_INFO("%s: repeating layers = %.2f KiB (%.3f BPW) \n", __func__, n_bytes/1024.0, n_bytes*8.0/n_elements);
7013+
LLAMA_LOG_INFO("%s: repeating layers = %.2f MB (%.3f BPW) \n", __func__, n_bytes/1000.0/1000.0, n_bytes*8.0/n_elements);
7014+
LLAMA_LOG_INFO("%s: repeating layers = %.2f MiB (%.3f BPW) \n", __func__, n_bytes/1024.0/1024.0, n_bytes*8.0/n_elements);
7015+
LLAMA_LOG_INFO("%s: repeating layers = %.2f GB (%.3f BPW) \n", __func__, n_bytes/1000.0/1000.0/1000.0, n_bytes*8.0/n_elements);
7016+
LLAMA_LOG_INFO("%s: repeating layers = %.2f GiB (%.3f BPW) \n", __func__, n_bytes/1024.0/1024.0/1024.0, n_bytes*8.0/n_elements);
70177017

70187018
// if (n_bytes < GiB) {
70197019
// LLAMA_LOG_INFO("%s: repeating layers = %.3f MiB (%.3f BPW", __func__, n_bytes/1024.0/1024.0, n_bytes*8.0/n_elements);

0 commit comments

Comments
 (0)