Skip to content

Commit 7a04ebf

Browse files
richardkissteleprint-me
authored andcommitted
english : use typos to fix comments and logs (ggml-org#4354)
1 parent b99dd03 commit 7a04ebf

17 files changed

+34
-34
lines changed

common/log.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -61,13 +61,13 @@
6161
// #define LOG_TARGET stderr
6262
// #include "log.h"
6363
//
64-
// The log target can also be redirected to a diffrent function
64+
// The log target can also be redirected to a different function
6565
// like so:
6666
//
67-
// #define LOG_TARGET log_handler_diffrent()
67+
// #define LOG_TARGET log_handler_different()
6868
// #include "log.h"
6969
//
70-
// FILE* log_handler_diffrent()
70+
// FILE* log_handler_different()
7171
// {
7272
// return stderr;
7373
// }
@@ -421,7 +421,7 @@ inline FILE *log_handler2_impl(bool change = false, LogTriState append = LogTriS
421421

422422
// Disables logs entirely at runtime.
423423
// Makes LOG() and LOG_TEE() produce no output,
424-
// untill enabled back.
424+
// until enabled back.
425425
#define log_disable() log_disable_impl()
426426

427427
// INTERNAL, DO NOT USE

convert.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -585,7 +585,7 @@ def merge_multifile_models(models_plus: list[ModelPlus]) -> ModelPlus:
585585

586586
if any("model.embed_tokens.weight" in mp.model for mp in models_plus):
587587
# Transformers models put different tensors in different files, but
588-
# don't split indivdual tensors between files.
588+
# don't split individual tensors between files.
589589
model: LazyModel = {}
590590
for mp in models_plus:
591591
model.update(mp.model)
@@ -678,7 +678,7 @@ def rebuild_from_type_v2(func, new_type, args, state):
678678
return func(*args)
679679

680680
CLASSES: dict[tuple[str, str], Any] = {
681-
# getattr used here as a workaround for mypy not being smart enough to detrmine
681+
# getattr used here as a workaround for mypy not being smart enough to determine
682682
# the staticmethods have a __func__ attribute.
683683
('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'),
684684
('torch._utils', '_rebuild_tensor_v2'): getattr(lazy_rebuild_tensor_v2, '__func__'),

examples/llava/clip.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -739,7 +739,7 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip
739739
temp->ny = longer_side;
740740
temp->size = 3 * longer_side * longer_side;
741741
temp->data = new uint8_t[temp->size]();
742-
uint8_t bc[3] = {122, 116, 104}; // bakground color in RGB from LLaVA
742+
uint8_t bc[3] = {122, 116, 104}; // background color in RGB from LLaVA
743743

744744
// fill with background color
745745
for (size_t i = 0; i < temp->size; i++) {

examples/llava/convert-image-encoder-to-gguf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def bytes_to_unicode():
5151
The reversible bpe codes work on unicode strings.
5252
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
5353
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
54-
This is a signficant percentage of your normal, say, 32K bpe vocab.
54+
This is a significant percentage of your normal, say, 32K bpe vocab.
5555
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
5656
And avoids mapping to whitespace/control characters the bpe code barfs on.
5757
"""

examples/lookahead/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# llama.cpp/examples/lookahead
22

3-
Demonstartion of lookahead decoding technique:
3+
Demonstration of lookahead decoding technique:
44

55
https://lmsys.org/blog/2023-11-21-lookahead-decoding/
66

examples/server/json.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -11227,7 +11227,7 @@ class binary_reader
1122711227
}
1122811228
if (is_ndarray) // ndarray dimensional vector can only contain integers, and can not embed another array
1122911229
{
11230-
return sax->parse_error(chars_read, get_token_string(), parse_error::create(113, chars_read, exception_message(input_format, "ndarray dimentional vector is not allowed", "size"), nullptr));
11230+
return sax->parse_error(chars_read, get_token_string(), parse_error::create(113, chars_read, exception_message(input_format, "ndarray dimensional vector is not allowed", "size"), nullptr));
1123111231
}
1123211232
std::vector<size_t> dim;
1123311233
if (JSON_HEDLEY_UNLIKELY(!get_ubjson_ndarray_size(dim)))

examples/server/public/completion.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ export async function* llama(prompt, params = {}, config = {}) {
114114
return content;
115115
}
116116

117-
// Call llama, return an event target that you can subcribe to
117+
// Call llama, return an event target that you can subscribe to
118118
//
119119
// Example:
120120
//

examples/server/public/index.html

+3-3
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@
238238
cache_prompt: true
239239
})
240240

241-
/* START: Support for storing prompt templates and parameters in borwser LocalStorage */
241+
/* START: Support for storing prompt templates and parameters in browsers LocalStorage */
242242

243243
const local_storage_storageKey = "llamacpp_server_local_storage";
244244

@@ -282,7 +282,7 @@
282282
let importedTemplates = local_storage_getDataAsObject('user_templates')
283283

284284
if (importedTemplates) {
285-
// saved templates were successfuly imported.
285+
// saved templates were successfully imported.
286286

287287
console.log('Processing saved templates and updating default template')
288288
params.value = { ...params.value, image_data: [] };
@@ -303,7 +303,7 @@
303303
}
304304

305305
function userTemplateResetToDefault() {
306-
console.log('Reseting themplate to default')
306+
console.log('Resetting template to default')
307307
selectedUserTemplate.value.name = 'default';
308308
selectedUserTemplate.value.data = savedUserTemplates.value['default'];
309309
}

examples/speculative/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# llama.cpp/examples/speculative
22

3-
Demonstartion of speculative decoding and tree-based speculative decoding techniques
3+
Demonstration of speculative decoding and tree-based speculative decoding techniques
44

55
More info:
66

examples/speculative/speculative.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -428,7 +428,7 @@ int main(int argc, char ** argv) {
428428
++n_past_tgt;
429429
}
430430

431-
// the first token is always proposed by the traget model before the speculation loop so we erase it here
431+
// the first token is always proposed by the target model before the speculation loop so we erase it here
432432
for (int s = 0; s < n_seq_dft; ++s) {
433433
if (!drafts[s].active) {
434434
continue;

ggml-alloc.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ GGML_API size_t ggml_allocr_alloc_graph(ggml_allocr_t alloc, struct ggml_cgraph
4343
// ggml-backend v2 API
4444
//
4545

46-
// Seperate tensor and graph allocator objects
46+
// Separate tensor and graph allocator objects
4747
// This is necessary for multi-backend allocation because the graph allocator needs to use multiple tensor allocators
4848
// The original API is kept as a wrapper around the new API
4949

ggml-quants.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -3114,7 +3114,7 @@ void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restri
31143114

31153115
size_t vl = __riscv_vsetvl_e8m1(qk/2);
31163116

3117-
// These tempory registers are for masking and shift operations
3117+
// These temporary registers are for masking and shift operations
31183118
vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
31193119
vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);
31203120

@@ -4757,7 +4757,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
47574757

47584758
vl = 16;
47594759

4760-
// retreive lane to multiply with scale
4760+
// retrieve lane to multiply with scale
47614761
vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl);
47624762
vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl);
47634763
vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl);

ggml.c

+6-6
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
1+
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnings on Windows
22
#define _USE_MATH_DEFINES // For M_PI on MSVC
33

44
#include "ggml-impl.h"
@@ -33,7 +33,7 @@
3333
// we should just be careful :)
3434
#pragma warning(disable: 4244 4267)
3535

36-
// disable POSIX deprecation warnigns
36+
// disable POSIX deprecation warnings
3737
// these functions are never going away, anyway
3838
#pragma warning(disable: 4996)
3939
#endif
@@ -1762,7 +1762,7 @@ static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size
17621762
static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
17631763

17641764
// WARN:
1765-
// Mis-confguration can lead to problem that's hard to reason about:
1765+
// Mis-configuration can lead to problem that's hard to reason about:
17661766
// * At best it crash or talks nosense.
17671767
// * At worst it talks slightly difference but hard to perceive.
17681768
//
@@ -7524,7 +7524,7 @@ static void ggml_compute_forward_acc_f32(
75247524
GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
75257525

75267526
// view src0 and dst with these strides and data offset inbytes during acc
7527-
// nb0 is implicitely element_size because src0 and dst are contiguous
7527+
// nb0 is implicitly element_size because src0 and dst are contiguous
75287528
size_t nb1 = ((int32_t *) dst->op_params)[0];
75297529
size_t nb2 = ((int32_t *) dst->op_params)[1];
75307530
size_t nb3 = ((int32_t *) dst->op_params)[2];
@@ -10165,7 +10165,7 @@ static void ggml_compute_forward_set_f32(
1016510165
GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
1016610166

1016710167
// view src0 and dst with these strides and data offset inbytes during set
10168-
// nb0 is implicitely element_size because src0 and dst are contiguous
10168+
// nb0 is implicitly element_size because src0 and dst are contiguous
1016910169
size_t nb1 = ((int32_t *) dst->op_params)[0];
1017010170
size_t nb2 = ((int32_t *) dst->op_params)[1];
1017110171
size_t nb3 = ((int32_t *) dst->op_params)[2];
@@ -14491,7 +14491,7 @@ void ggml_build_backward_gradient_checkpointing(
1449114491
// insert new tensors recomputing src, reusing already made replacements,
1449214492
// remember replacements: remember new tensors with mapping from corresponding gf nodes
1449314493
// recurse for input tensors,
14494-
// unless (i.e. terminating when) input tensors are replacments (like checkpoints)
14494+
// unless (i.e. terminating when) input tensors are replacements (like checkpoints)
1449514495
node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]);
1449614496
}
1449714497
// insert rewritten backward node with replacements made into resulting backward graph gb

gguf-py/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ If you want to publish the package manually for any reason, you need to have `tw
6161
pip install build twine
6262
```
6363

64-
Then, folow these steps to release a new version:
64+
Then, follow these steps to release a new version:
6565

6666
1. Bump the version in `pyproject.toml`.
6767
2. Build the package:

llama.cpp

+5-5
Original file line numberDiff line numberDiff line change
@@ -2791,7 +2791,7 @@ static void llm_load_vocab(
27912791
// The assumption is, since special tokens aren't meant to be exposed to end user, they are designed
27922792
// to be unmatchable by the tokenizer, therefore tokens from the vocab, which are unmatchable by the tokenizer
27932793
// are special tokens.
2794-
// From testing, this appears to corelate 1:1 with special tokens.
2794+
// From testing, this appears to correlate 1:1 with special tokens.
27952795
//
27962796

27972797
// Counting special tokens and verifying in only one direction
@@ -5888,7 +5888,7 @@ static int llama_decode_internal(
58885888
const int64_t n_embd = hparams.n_embd;
58895889
const int64_t n_vocab = hparams.n_vocab;
58905890

5891-
// helpers for smoother batch API transistion
5891+
// helpers for smoother batch API transition
58925892
// after deprecating the llama_eval calls, these will be removed
58935893
std::vector<llama_pos> pos;
58945894

@@ -6691,12 +6691,12 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
66916691

66926692
// loop over the text
66936693
while (true) {
6694-
// find the first occurence of a given special token in this fragment
6694+
// find the first occurrence of a given special token in this fragment
66956695
// passing offset argument only limit the "search area" but match coordinates
66966696
// are still relative to the source full raw_text
66976697
auto match = raw_text->find(special_token, raw_text_base_offset);
66986698

6699-
// no occurences found, stop processing this fragment for a given special token
6699+
// no occurrences found, stop processing this fragment for a given special token
67006700
if (match == std::string::npos) break;
67016701

67026702
// check if match is within bounds of offset <-> length
@@ -7895,7 +7895,7 @@ struct llama_beam_search_data {
78957895
}
78967896

78977897
// Min-heaps are used to efficiently collect the top-k elements (k=n_beams).
7898-
// The repetative patterns below reflect the 2 stages of heaps:
7898+
// The repetitive patterns below reflect the 2 stages of heaps:
78997899
// * Gather elements until the vector is full, then call std::make_heap() on it.
79007900
// * If the heap is full and a new element is found that should be included, pop the
79017901
// least element to the back(), replace it with the new, then push it into the heap.

tests/test-grad0.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
1+
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnings on Windows
22
#include "ggml.h"
33

44
#include <cmath>

tests/test-quantize-perf.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ static void usage(char * argv[]) {
117117
printf(" --size SIZE set test size, divisible by 32 (L1_SIZE:%d)\n", L1_SIZE);
118118
printf(" -3 use size as L1, L2, L3 sizes (L1:%d L2:%d L3:%d)\n", L1_SIZE, L2_SIZE, L3_SIZE);
119119
printf(" -4 use size as L1, L2, L3, MEM sizes (L1:%d L2:%d L3:%d MEM:%d)\n", L1_SIZE, L2_SIZE, L3_SIZE, MEM_SIZE);
120-
printf(" --op OP set test opration as quantize_row_q_reference, quantize_row_q, dequantize_row_q,\n");
120+
printf(" --op OP set test operation as quantize_row_q_reference, quantize_row_q, dequantize_row_q,\n");
121121
printf(" quantize_row_q_dot, vec_dot_q (all)\n");
122122
printf(" --type TYPE set test type as");
123123
for (int i = 0; i < GGML_TYPE_COUNT; i++) {
@@ -202,7 +202,7 @@ int main(int argc, char * argv[]) {
202202
}
203203
int alignment = std::stoi(argv[i]);
204204
if (alignment < 0 || alignment > MAX_ALIGNMENT) {
205-
fprintf(stderr, "error: aligment-offset must be less than %d\n", MAX_ALIGNMENT);
205+
fprintf(stderr, "error: alignment-offset must be less than %d\n", MAX_ALIGNMENT);
206206
invalid_param = true;
207207
break;
208208
}

0 commit comments

Comments
 (0)