Skip to content

Commit 72cb1ed

Browse files
committedDec 7, 2023
Use typos to fix comments and logs.
1 parent 05cd6e5 commit 72cb1ed

19 files changed

+44
-44
lines changed
 

‎common/log.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -61,13 +61,13 @@
6161
// #define LOG_TARGET stderr
6262
// #include "log.h"
6363
//
64-
// The log target can also be redirected to a diffrent function
64+
// The log target can also be redirected to a different function
6565
// like so:
6666
//
67-
// #define LOG_TARGET log_handler_diffrent()
67+
// #define LOG_TARGET log_handler_different()
6868
// #include "log.h"
6969
//
70-
// FILE* log_handler_diffrent()
70+
// FILE* log_handler_different()
7171
// {
7272
// return stderr;
7373
// }
@@ -421,7 +421,7 @@ inline FILE *log_handler2_impl(bool change = false, LogTriState append = LogTriS
421421

422422
// Disables logs entirely at runtime.
423423
// Makes LOG() and LOG_TEE() produce no output,
424-
// untill enabled back.
424+
// until enabled back.
425425
#define log_disable() log_disable_impl()
426426

427427
// INTERNAL, DO NOT USE

‎common/stb_image.h

+9-9
Original file line numberDiff line numberDiff line change
@@ -2191,7 +2191,7 @@ stbi_inline static int stbi__extend_receive(stbi__jpeg * j, int n) {
21912191
if (j->code_bits < n)
21922192
stbi__grow_buffer_unsafe(j);
21932193
if (j->code_bits < n)
2194-
return 0; // ran out of bits from stream, return 0s intead of continuing
2194+
return 0; // ran out of bits from stream, return 0s instead of continuing
21952195

21962196
sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative)
21972197
k = stbi_lrot(j->code_buffer, n);
@@ -2207,7 +2207,7 @@ stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg * j, int n) {
22072207
if (j->code_bits < n)
22082208
stbi__grow_buffer_unsafe(j);
22092209
if (j->code_bits < n)
2210-
return 0; // ran out of bits from stream, return 0s intead of continuing
2210+
return 0; // ran out of bits from stream, return 0s instead of continuing
22112211
k = stbi_lrot(j->code_buffer, n);
22122212
j->code_buffer = k & ~stbi__bmask[n];
22132213
k &= stbi__bmask[n];
@@ -2220,7 +2220,7 @@ stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg * j) {
22202220
if (j->code_bits < 1)
22212221
stbi__grow_buffer_unsafe(j);
22222222
if (j->code_bits < 1)
2223-
return 0; // ran out of bits from stream, return 0s intead of continuing
2223+
return 0; // ran out of bits from stream, return 0s instead of continuing
22242224
k = j->code_buffer;
22252225
j->code_buffer <<= 1;
22262226
--j->code_bits;
@@ -5015,13 +5015,13 @@ static int stbi__create_png_image_raw(stbi__png * a, stbi_uc * raw, stbi__uint32
50155015

50165016
// we make a separate pass to expand bits to pixels; for performance,
50175017
// this could run two scanlines behind the above code, so it won't
5018-
// intefere with filtering but will still be in the cache.
5018+
// interfere with filtering but will still be in the cache.
50195019
if (depth < 8) {
50205020
for (j = 0; j < y; ++j) {
50215021
stbi_uc * cur = a->out + stride * j;
50225022
stbi_uc * in = a->out + stride * j + x * out_n - img_width_bytes;
50235023
// unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for
5024-
// 1/2/4-bit png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that
5024+
// 1/2/4-bit png guarantee byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that
50255025
// will be skipped in the later loop
50265026
stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range
50275027

@@ -5215,7 +5215,7 @@ static int stbi__expand_png_palette(stbi__png * a, stbi_uc * palette, int len, i
52155215
if (p == NULL)
52165216
return stbi__err("outofmem", "Out of memory");
52175217

5218-
// between here and free(out) below, exitting would leak
5218+
// between here and free(out) below, exiting would leak
52195219
temp_out = p;
52205220

52215221
if (pal_img_n == 3) {
@@ -6281,7 +6281,7 @@ static void * stbi__tga_load(stbi__context * s, int * x, int * y, int * comp, in
62816281
if (tga_width > STBI_MAX_DIMENSIONS)
62826282
return stbi__errpuc("too large", "Very large image (corrupt?)");
62836283

6284-
// do a tiny bit of precessing
6284+
// do a tiny bit of processing
62856285
if (tga_image_type >= 8) {
62866286
tga_image_type -= 8;
62876287
tga_is_RLE = 1;
@@ -7213,7 +7213,7 @@ static stbi_uc * stbi__gif_load_next(stbi__context * s, stbi__gif * g, int * com
72137213
// 0: not specified.
72147214
}
72157215

7216-
// background is what out is after the undoing of the previou frame;
7216+
// background is what out is after the undoing of the previous frame;
72177217
memcpy(g->background, g->out, 4 * g->w * g->h);
72187218
}
72197219

@@ -8277,7 +8277,7 @@ STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const * c, void * us
82778277
1.31 (2011-06-20)
82788278
a few more leak fixes, bug in PNG handling (SpartanJ)
82798279
1.30 (2011-06-11)
8280-
added ability to load files via callbacks to accomidate custom input streams (Ben Wenger)
8280+
added ability to load files via callbacks to accommodate custom input streams (Ben Wenger)
82818281
removed deprecated format-specific test/load functions
82828282
removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks
82838283
anyway error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) fix inefficiency in

‎convert.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -585,7 +585,7 @@ def merge_multifile_models(models_plus: list[ModelPlus]) -> ModelPlus:
585585

586586
if any("model.embed_tokens.weight" in mp.model for mp in models_plus):
587587
# Transformers models put different tensors in different files, but
588-
# don't split indivdual tensors between files.
588+
# don't split individual tensors between files.
589589
model: LazyModel = {}
590590
for mp in models_plus:
591591
model.update(mp.model)
@@ -678,7 +678,7 @@ def rebuild_from_type_v2(func, new_type, args, state):
678678
return func(*args)
679679

680680
CLASSES: dict[tuple[str, str], Any] = {
681-
# getattr used here as a workaround for mypy not being smart enough to detrmine
681+
# getattr used here as a workaround for mypy not being smart enough to determine
682682
# the staticmethods have a __func__ attribute.
683683
('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'),
684684
('torch._utils', '_rebuild_tensor_v2'): getattr(lazy_rebuild_tensor_v2, '__func__'),

‎examples/llava/clip.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -739,7 +739,7 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip
739739
temp->ny = longer_side;
740740
temp->size = 3 * longer_side * longer_side;
741741
temp->data = new uint8_t[temp->size]();
742-
uint8_t bc[3] = {122, 116, 104}; // bakground color in RGB from LLaVA
742+
uint8_t bc[3] = {122, 116, 104}; // background color in RGB from LLaVA
743743

744744
// fill with background color
745745
for (size_t i = 0; i < temp->size; i++) {

‎examples/llava/convert-image-encoder-to-gguf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def bytes_to_unicode():
5151
The reversible bpe codes work on unicode strings.
5252
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
5353
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
54-
This is a signficant percentage of your normal, say, 32K bpe vocab.
54+
This is a significant percentage of your normal, say, 32K bpe vocab.
5555
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
5656
And avoids mapping to whitespace/control characters the bpe code barfs on.
5757
"""

‎examples/lookahead/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# llama.cpp/examples/lookahead
22

3-
Demonstartion of lookahead decoding technique:
3+
Demonstration of lookahead decoding technique:
44

55
https://lmsys.org/blog/2023-11-21-lookahead-decoding/
66

‎examples/server/json.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -11227,7 +11227,7 @@ class binary_reader
1122711227
}
1122811228
if (is_ndarray) // ndarray dimensional vector can only contain integers, and can not embed another array
1122911229
{
11230-
return sax->parse_error(chars_read, get_token_string(), parse_error::create(113, chars_read, exception_message(input_format, "ndarray dimentional vector is not allowed", "size"), nullptr));
11230+
return sax->parse_error(chars_read, get_token_string(), parse_error::create(113, chars_read, exception_message(input_format, "ndarray dimensional vector is not allowed", "size"), nullptr));
1123111231
}
1123211232
std::vector<size_t> dim;
1123311233
if (JSON_HEDLEY_UNLIKELY(!get_ubjson_ndarray_size(dim)))

‎examples/server/public/completion.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ export async function* llama(prompt, params = {}, config = {}) {
114114
return content;
115115
}
116116

117-
// Call llama, return an event target that you can subcribe to
117+
// Call llama, return an event target that you can subscribe to
118118
//
119119
// Example:
120120
//

‎examples/server/public/index.html

+3-3
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@
238238
cache_prompt: true
239239
})
240240

241-
/* START: Support for storing prompt templates and parameters in borwser LocalStorage */
241+
/* START: Support for storing prompt templates and parameters in browsers LocalStorage */
242242

243243
const local_storage_storageKey = "llamacpp_server_local_storage";
244244

@@ -282,7 +282,7 @@
282282
let importedTemplates = local_storage_getDataAsObject('user_templates')
283283

284284
if (importedTemplates) {
285-
// saved templates were successfuly imported.
285+
// saved templates were successfully imported.
286286

287287
console.log('Processing saved templates and updating default template')
288288
params.value = { ...params.value, image_data: [] };
@@ -303,7 +303,7 @@
303303
}
304304

305305
function userTemplateResetToDefault() {
306-
console.log('Reseting themplate to default')
306+
console.log('Resetting template to default')
307307
selectedUserTemplate.value.name = 'default';
308308
selectedUserTemplate.value.data = savedUserTemplates.value['default'];
309309
}

‎examples/speculative/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# llama.cpp/examples/speculative
22

3-
Demonstartion of speculative decoding and tree-based speculative decoding techniques
3+
Demonstration of speculative decoding and tree-based speculative decoding techniques
44

55
More info:
66

‎examples/speculative/speculative.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -428,7 +428,7 @@ int main(int argc, char ** argv) {
428428
++n_past_tgt;
429429
}
430430

431-
// the first token is always proposed by the traget model before the speculation loop so we erase it here
431+
// the first token is always proposed by the target model before the speculation loop so we erase it here
432432
for (int s = 0; s < n_seq_dft; ++s) {
433433
if (!drafts[s].active) {
434434
continue;

‎ggml-alloc.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ GGML_API size_t ggml_allocr_alloc_graph(ggml_allocr_t alloc, struct ggml_cgraph
4242
// ggml-backend v2 API
4343
//
4444

45-
// Seperate tensor and graph allocator objects
45+
// Separate tensor and graph allocator objects
4646
// This is necessary for multi-backend allocation because the graph allocator needs to use multiple tensor allocators
4747
// The original API is kept as a wrapper around the new API
4848

‎ggml-backend-impl.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ extern "C" {
6060
void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
6161
void (*synchronize) (ggml_backend_t backend);
6262

63-
// (optional) copy tensor between different backends, allow for single-copy tranfers
63+
// (optional) copy tensor between different backends, allow for single-copy transfers
6464
void (*cpy_tensor_from)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
6565
void (*cpy_tensor_to) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
6666

‎ggml-quants.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -3114,7 +3114,7 @@ void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restri
31143114

31153115
size_t vl = __riscv_vsetvl_e8m1(qk/2);
31163116

3117-
// These tempory registers are for masking and shift operations
3117+
// These temporary registers are for masking and shift operations
31183118
vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
31193119
vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);
31203120

@@ -4757,7 +4757,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
47574757

47584758
vl = 16;
47594759

4760-
// retreive lane to multiply with scale
4760+
// retrieve lane to multiply with scale
47614761
vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl);
47624762
vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl);
47634763
vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl);

‎ggml.c

+6-6
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
1+
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnings on Windows
22
#define _USE_MATH_DEFINES // For M_PI on MSVC
33

44
#include "ggml-impl.h"
@@ -33,7 +33,7 @@
3333
// we should just be careful :)
3434
#pragma warning(disable: 4244 4267)
3535

36-
// disable POSIX deprecation warnigns
36+
// disable POSIX deprecation warnings
3737
// these functions are never going away, anyway
3838
#pragma warning(disable: 4996)
3939
#endif
@@ -1756,7 +1756,7 @@ static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size
17561756
static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
17571757

17581758
// WARN:
1759-
// Mis-confguration can lead to problem that's hard to reason about:
1759+
// Mis-configuration can lead to problem that's hard to reason about:
17601760
// * At best it crash or talks nosense.
17611761
// * At worst it talks slightly difference but hard to perceive.
17621762
//
@@ -7421,7 +7421,7 @@ static void ggml_compute_forward_acc_f32(
74217421
GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
74227422

74237423
// view src0 and dst with these strides and data offset inbytes during acc
7424-
// nb0 is implicitely element_size because src0 and dst are contiguous
7424+
// nb0 is implicitly element_size because src0 and dst are contiguous
74257425
size_t nb1 = ((int32_t *) dst->op_params)[0];
74267426
size_t nb2 = ((int32_t *) dst->op_params)[1];
74277427
size_t nb3 = ((int32_t *) dst->op_params)[2];
@@ -10027,7 +10027,7 @@ static void ggml_compute_forward_set_f32(
1002710027
GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
1002810028

1002910029
// view src0 and dst with these strides and data offset inbytes during set
10030-
// nb0 is implicitely element_size because src0 and dst are contiguous
10030+
// nb0 is implicitly element_size because src0 and dst are contiguous
1003110031
size_t nb1 = ((int32_t *) dst->op_params)[0];
1003210032
size_t nb2 = ((int32_t *) dst->op_params)[1];
1003310033
size_t nb3 = ((int32_t *) dst->op_params)[2];
@@ -14272,7 +14272,7 @@ void ggml_build_backward_gradient_checkpointing(
1427214272
// insert new tensors recomputing src, reusing already made replacements,
1427314273
// remember replacements: remember new tensors with mapping from corresponding gf nodes
1427414274
// recurse for input tensors,
14275-
// unless (i.e. terminating when) input tensors are replacments (like checkpoints)
14275+
// unless (i.e. terminating when) input tensors are replacements (like checkpoints)
1427614276
node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]);
1427714277
}
1427814278
// insert rewritten backward node with replacements made into resulting backward graph gb

‎gguf-py/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ If you want to publish the package manually for any reason, you need to have `tw
6161
pip install build twine
6262
```
6363

64-
Then, folow these steps to release a new version:
64+
Then, follow these steps to release a new version:
6565

6666
1. Bump the version in `pyproject.toml`.
6767
2. Build the package:

‎llama.cpp

+5-5
Original file line numberDiff line numberDiff line change
@@ -2746,7 +2746,7 @@ static void llm_load_vocab(
27462746
// The assumption is, since special tokens aren't meant to be exposed to end user, they are designed
27472747
// to be unmatchable by the tokenizer, therefore tokens from the vocab, which are unmatchable by the tokenizer
27482748
// are special tokens.
2749-
// From testing, this appears to corelate 1:1 with special tokens.
2749+
// From testing, this appears to correlate 1:1 with special tokens.
27502750
//
27512751

27522752
// Counting special tokens and verifying in only one direction
@@ -5896,7 +5896,7 @@ static int llama_decode_internal(
58965896
const int64_t n_embd = hparams.n_embd;
58975897
const int64_t n_vocab = hparams.n_vocab;
58985898

5899-
// helpers for smoother batch API transistion
5899+
// helpers for smoother batch API transition
59005900
// after deprecating the llama_eval calls, these will be removed
59015901
std::vector<llama_pos> pos;
59025902

@@ -6674,12 +6674,12 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
66746674

66756675
// loop over the text
66766676
while (true) {
6677-
// find the first occurence of a given special token in this fragment
6677+
// find the first occurrence of a given special token in this fragment
66786678
// passing offset argument only limit the "search area" but match coordinates
66796679
// are still relative to the source full raw_text
66806680
auto match = raw_text->find(special_token, raw_text_base_offset);
66816681

6682-
// no occurences found, stop processing this fragment for a given special token
6682+
// no occurrences found, stop processing this fragment for a given special token
66836683
if (match == std::string::npos) break;
66846684

66856685
// check if match is within bounds of offset <-> length
@@ -7878,7 +7878,7 @@ struct llama_beam_search_data {
78787878
}
78797879

78807880
// Min-heaps are used to efficiently collect the top-k elements (k=n_beams).
7881-
// The repetative patterns below reflect the 2 stages of heaps:
7881+
// The repetitive patterns below reflect the 2 stages of heaps:
78827882
// * Gather elements until the vector is full, then call std::make_heap() on it.
78837883
// * If the heap is full and a new element is found that should be included, pop the
78847884
// least element to the back(), replace it with the new, then push it into the heap.

‎tests/test-grad0.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
1+
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnings on Windows
22
#include "ggml.h"
33

44
#include <cmath>

‎tests/test-quantize-perf.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ static void usage(char * argv[]) {
117117
printf(" --size SIZE set test size, divisible by 32 (L1_SIZE:%d)\n", L1_SIZE);
118118
printf(" -3 use size as L1, L2, L3 sizes (L1:%d L2:%d L3:%d)\n", L1_SIZE, L2_SIZE, L3_SIZE);
119119
printf(" -4 use size as L1, L2, L3, MEM sizes (L1:%d L2:%d L3:%d MEM:%d)\n", L1_SIZE, L2_SIZE, L3_SIZE, MEM_SIZE);
120-
printf(" --op OP set test opration as quantize_row_q_reference, quantize_row_q, dequantize_row_q,\n");
120+
printf(" --op OP set test operation as quantize_row_q_reference, quantize_row_q, dequantize_row_q,\n");
121121
printf(" quantize_row_q_dot, vec_dot_q (all)\n");
122122
printf(" --type TYPE set test type as");
123123
for (int i = 0; i < GGML_TYPE_COUNT; i++) {
@@ -202,7 +202,7 @@ int main(int argc, char * argv[]) {
202202
}
203203
int alignment = std::stoi(argv[i]);
204204
if (alignment < 0 || alignment > MAX_ALIGNMENT) {
205-
fprintf(stderr, "error: aligment-offset must be less than %d\n", MAX_ALIGNMENT);
205+
fprintf(stderr, "error: alignment-offset must be less than %d\n", MAX_ALIGNMENT);
206206
invalid_param = true;
207207
break;
208208
}

0 commit comments

Comments
 (0)
Please sign in to comment.