Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Windows fixes #31

Merged
merged 5 commits into from
Mar 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions ggml.c
Original file line number Diff line number Diff line change
Expand Up @@ -407,8 +407,8 @@ void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
const int nb = k / QK;
const size_t bs = sizeof(float) + QK/2;

uint8_t * restrict pd = (uint8_t *) (y + 0*bs);
uint8_t * restrict pb = (uint8_t *) (y + 0*bs + sizeof(float));
uint8_t * restrict pd = ((uint8_t *)y + 0*bs);
uint8_t * restrict pb = ((uint8_t *)y + 0*bs + sizeof(float));

uint8_t pp[QK/2];

Expand Down Expand Up @@ -654,8 +654,8 @@ void dequantize_row_q4_0(const void * restrict x, float * restrict y, int k) {
const int nb = k / QK;
const size_t bs = sizeof(float) + QK/2;

const uint8_t * restrict pd = (const uint8_t *) (x + 0*bs);
const uint8_t * restrict pb = (const uint8_t *) (x + 0*bs + sizeof(float));
const uint8_t * restrict pd = ((const uint8_t *)x + 0*bs);
const uint8_t * restrict pb = ((const uint8_t *)x + 0*bs + sizeof(float));

// scalar
for (int i = 0; i < nb; i++) {
Expand Down Expand Up @@ -1301,11 +1301,11 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void

const size_t bs = sizeof(float) + QK/2;

const uint8_t * restrict pd0 = (const uint8_t *) (x + 0*bs);
const uint8_t * restrict pd1 = (const uint8_t *) (y + 0*bs);
const uint8_t * restrict pd0 = ((const uint8_t *)x + 0*bs);
const uint8_t * restrict pd1 = ((const uint8_t *)y + 0*bs);

const uint8_t * restrict pb0 = (const uint8_t *) (x + 0*bs + sizeof(float));
const uint8_t * restrict pb1 = (const uint8_t *) (y + 0*bs + sizeof(float));
const uint8_t * restrict pb0 = ((const uint8_t *)x + 0*bs + sizeof(float));
const uint8_t * restrict pb1 = ((const uint8_t *)y + 0*bs + sizeof(float));

float sumf = 0.0;

Expand Down Expand Up @@ -1731,8 +1731,8 @@ inline static void ggml_vec_mad_q4_0(const int n, float * restrict y, void * res
const int nb = n / QK;
const size_t bs = sizeof(float) + QK/2;

const uint8_t * restrict pd = (const uint8_t *) (x + 0*bs);
const uint8_t * restrict pb = (const uint8_t *) (x + 0*bs + sizeof(float));
const uint8_t * restrict pd = ((const uint8_t *)x + 0*bs);
const uint8_t * restrict pb = ((const uint8_t *)x + 0*bs + sizeof(float));

#if __ARM_NEON
#if QK == 32
Expand Down
12 changes: 7 additions & 5 deletions main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -209,8 +209,8 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
// create the ggml context
{
struct ggml_init_params params = {
.mem_size = ctx_size,
.mem_buffer = NULL,
/*.mem_size =*/ ctx_size,
/*.mem_buffer =*/ NULL,
};

model.ctx = ggml_init(params);
Expand Down Expand Up @@ -546,12 +546,13 @@ bool llama_eval(
}

struct ggml_init_params params = {
.mem_size = buf_size,
.mem_buffer = buf,
/*.mem_size =*/ buf_size,
/*.mem_buffer =*/ buf,
};

struct ggml_context * ctx0 = ggml_init(params);
struct ggml_cgraph gf = { .n_threads = n_threads };
ggml_cgraph gf = {};
gf.n_threads = n_threads;

struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
Expand Down Expand Up @@ -733,6 +734,7 @@ bool llama_eval(
}

int main(int argc, char ** argv) {
ggml_time_init();
const int64_t t_main_start_us = ggml_time_us();

gpt_params params;
Expand Down
1 change: 1 addition & 0 deletions quantize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ bool llama_model_quantize(const std::string & fname_inp, const std::string & fna
// ./llama-quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type
//
int main(int argc, char ** argv) {
ggml_time_init();
if (argc != 4) {
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]);
fprintf(stderr, " type = 2 - q4_0\n");
Expand Down
16 changes: 12 additions & 4 deletions utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,12 @@
#include <fstream>
#include <regex>

#if defined(_MSC_VER) || defined(__MINGW32__)
#include <malloc.h> // using malloc.h with MSC/MINGW
#elif !defined(__FreeBSD__)
#include <alloca.h>
#endif

bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
for (int i = 1; i < argc; i++) {
std::string arg = argv[i];
Expand Down Expand Up @@ -453,7 +459,8 @@ size_t ggml_quantize_q4_0(float * src, void * dst, int n, int k, int qk, int64_t

assert(k % qk == 0);

uint8_t pp[qk/2];
const size_t pp_size = qk / 2;
uint8_t *pp = static_cast<uint8_t*>(alloca(pp_size));

char * pdst = (char *) dst;

Expand Down Expand Up @@ -492,7 +499,7 @@ size_t ggml_quantize_q4_0(float * src, void * dst, int n, int k, int qk, int64_t
pp[l/2] = vi0 | (vi1 << 4);
}

memcpy(pb, pp, sizeof(pp));
memcpy(pb, pp, pp_size);
pb += bs;
}
}
Expand All @@ -507,7 +514,8 @@ size_t ggml_quantize_q4_1(float * src, void * dst, int n, int k, int qk, int64_t

assert(k % qk == 0);

uint8_t pp[qk/2];
const size_t pp_size = qk / 2;
uint8_t *pp = static_cast<uint8_t*>(alloca(pp_size));

char * pdst = (char *) dst;

Expand Down Expand Up @@ -551,7 +559,7 @@ size_t ggml_quantize_q4_1(float * src, void * dst, int n, int k, int qk, int64_t
pp[l/2] = vi0 | (vi1 << 4);
}

memcpy(pb + i*qk/2, pp, sizeof(pp));
memcpy(pb + i*qk/2, pp, pp_size);
}
}
}
Expand Down