Skip to content

Commit 148b286

Browse files
committed
examples : update to ggml-opt and ggml-backend changes (#0)
ggml-ci
1 parent e219fb3 commit 148b286

File tree

6 files changed

+20
-20
lines changed

6 files changed

+20
-20
lines changed

examples/gpt-2/main-sched.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -947,7 +947,7 @@ int main(int argc, char ** argv) {
947947
ggml_backend_sched_t sched;
948948
{
949949
// initialize the scheduler
950-
sched = ggml_backend_sched_new(model.backends.data(), NULL, model.backends.size(), GPT2_MAX_NODES, false);
950+
sched = ggml_backend_sched_new(model.backends.data(), NULL, model.backends.size(), GPT2_MAX_NODES, false, true);
951951

952952
// create the worst case graph for memory usage estimation
953953
int n_tokens = std::min(model.hparams.n_ctx, params.n_batch);

examples/mnist/mnist-common.cpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -310,10 +310,10 @@ mnist_model mnist_model_init_random(const std::string & arch, const std::string
310310

311311
void mnist_model_build(mnist_model & model) {
312312
if (model.arch == "mnist-fc") {
313-
ggml_set_param(model.ctx_compute, model.fc1_weight);
314-
ggml_set_param(model.ctx_compute, model.fc1_bias);
315-
ggml_set_param(model.ctx_compute, model.fc2_weight);
316-
ggml_set_param(model.ctx_compute, model.fc2_bias);
313+
ggml_set_param(model.fc1_weight);
314+
ggml_set_param(model.fc1_bias);
315+
ggml_set_param(model.fc2_weight);
316+
ggml_set_param(model.fc2_bias);
317317

318318
ggml_tensor * fc1 = ggml_relu(model.ctx_compute, ggml_add(model.ctx_compute,
319319
ggml_mul_mat(model.ctx_compute, model.fc1_weight, model.images),
@@ -322,12 +322,12 @@ void mnist_model_build(mnist_model & model) {
322322
ggml_mul_mat(model.ctx_compute, model.fc2_weight, fc1),
323323
model.fc2_bias);
324324
} else if (model.arch == "mnist-cnn") {
325-
ggml_set_param(model.ctx_compute, model.conv1_kernel);
326-
ggml_set_param(model.ctx_compute, model.conv1_bias);
327-
ggml_set_param(model.ctx_compute, model.conv2_kernel);
328-
ggml_set_param(model.ctx_compute, model.conv2_bias);
329-
ggml_set_param(model.ctx_compute, model.dense_weight);
330-
ggml_set_param(model.ctx_compute, model.dense_bias);
325+
ggml_set_param(model.conv1_kernel);
326+
ggml_set_param(model.conv1_bias);
327+
ggml_set_param(model.conv2_kernel);
328+
ggml_set_param(model.conv2_bias);
329+
ggml_set_param(model.dense_weight);
330+
ggml_set_param(model.dense_bias);
331331

332332
struct ggml_tensor * images_2D = ggml_reshape_4d(model.ctx_compute, model.images, MNIST_HW, MNIST_HW, 1, model.images->ne[1]);
333333

@@ -384,7 +384,7 @@ void mnist_model_build(mnist_model & model) {
384384
ggml_opt_result_t mnist_model_eval(mnist_model & model, ggml_opt_dataset_t dataset) {
385385
ggml_opt_result_t result = ggml_opt_result_init();
386386

387-
ggml_opt_params params = ggml_opt_default_params(model.backend_sched, model.ctx_compute, model.images, model.logits, GGML_OPT_LOSS_TYPE_CROSS_ENTROPY);
387+
ggml_opt_params params = ggml_opt_default_params(model.backend_sched, GGML_OPT_LOSS_TYPE_CROSS_ENTROPY);
388388
params.build_type = GGML_OPT_BUILD_TYPE_FORWARD;
389389
ggml_opt_context_t opt_ctx = ggml_opt_init(params);
390390

@@ -453,7 +453,7 @@ extern "C" {
453453
int wasm_eval(uint8_t * digitPtr) {
454454
std::vector<float> digit(digitPtr, digitPtr + MNIST_NINPUT);
455455

456-
ggml_opt_dataset_t dataset = ggml_opt_dataset_init(MNIST_NINPUT, MNIST_NCLASSES, 1, 1);
456+
ggml_opt_dataset_t dataset = ggml_opt_dataset_init(GGML_TYPE_F32, GGML_TYPE_F32, MNIST_NINPUT, MNIST_NCLASSES, 1, 1);
457457
struct ggml_tensor * data = ggml_opt_dataset_data(dataset);
458458
memcpy(data->data, digitPtr, ggml_nbytes(data));
459459
ggml_set_zero(ggml_opt_dataset_labels(dataset)); // The labels are not needed.

examples/mnist/mnist-common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ struct mnist_model {
108108
}
109109

110110
// The order of the backends passed to ggml_backend_sched_new determines which backend is given priority.
111-
backend_sched = ggml_backend_sched_new(backends.data(), nullptr, backends.size(), GGML_DEFAULT_GRAPH_SIZE, false);
111+
backend_sched = ggml_backend_sched_new(backends.data(), nullptr, backends.size(), GGML_DEFAULT_GRAPH_SIZE, false, true);
112112
fprintf(stderr, "%s: using %s (%s) as primary backend\n",
113113
__func__, ggml_backend_name(backends[0]), ggml_backend_dev_description(devices[0]));
114114
if (backends.size() >= 2) {

examples/mnist/mnist-eval.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ int main(int argc, char ** argv) {
2525
exit(1);
2626
}
2727

28-
ggml_opt_dataset_t dataset = ggml_opt_dataset_init(MNIST_NINPUT, MNIST_NCLASSES, MNIST_NTEST, MNIST_NBATCH_PHYSICAL);
28+
ggml_opt_dataset_t dataset = ggml_opt_dataset_init(GGML_TYPE_F32, GGML_TYPE_F32, MNIST_NINPUT, MNIST_NCLASSES, MNIST_NTEST, MNIST_NBATCH_PHYSICAL);
2929

3030
if (!mnist_image_load(argv[2], dataset)) {
3131
return 1;

examples/mnist/mnist-train.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ int main(int argc, char ** argv) {
2020
// The MNIST model is so small that the overhead from data shuffling is non-negligible, especially with CUDA.
2121
// With a shard size of 10 this overhead is greatly reduced at the cost of less shuffling (does not seem to have a significant impact).
2222
// A batch of 500 images then consists of 50 random shards of size 10 instead of 500 random shards of size 1.
23-
ggml_opt_dataset_t dataset = ggml_opt_dataset_init(MNIST_NINPUT, MNIST_NCLASSES, MNIST_NTRAIN, /*ndata_shard =*/ 10);
23+
ggml_opt_dataset_t dataset = ggml_opt_dataset_init(GGML_TYPE_F32, GGML_TYPE_F32, MNIST_NINPUT, MNIST_NCLASSES, MNIST_NTRAIN, /*ndata_shard =*/ 10);
2424

2525
if (!mnist_image_load(argv[3], dataset)) {
2626
return 1;

tests/test-mul-mat0.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -101,8 +101,8 @@ bool check_gradient(
101101

102102
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, GGML_DEFAULT_GRAPH_SIZE, true);
103103
ggml_build_forward_expand(gf, f);
104-
struct ggml_cgraph * gb = ggml_graph_dup(ctx0, gf);
105-
ggml_build_backward_expand(ctx0, ctx0, gb, false);
104+
struct ggml_cgraph * gb = ggml_graph_dup(ctx0, gf, false);
105+
ggml_build_backward_expand(ctx0, gb, false);
106106

107107
ggml_graph_compute_with_ctx(ctx0, gf, n_threads);
108108
ggml_graph_reset(gb);
@@ -266,7 +266,7 @@ int main(int argc, const char ** argv) {
266266
ne[1] = rand()%4 + 1;
267267
x[1] = get_random_tensor(ctx0, ndims, ne, -1.0f, 1.0f);
268268

269-
ggml_set_param(ctx0, x[0]);
269+
ggml_set_param(x[0]);
270270

271271
struct ggml_tensor * m = ggml_mul_mat(ctx0, x[1], x[0]);
272272
struct ggml_tensor * f = ggml_sum(ctx0, m);
@@ -303,7 +303,7 @@ int main(int argc, const char ** argv) {
303303
ne[0] = rand()%4 + 1;
304304
x[1] = ggml_cont(ctx0, ggml_transpose(ctx0, get_random_tensor(ctx0, ndims, ne, -1.0f, 1.0f)));
305305

306-
ggml_set_param(ctx0, x[0]);
306+
ggml_set_param(x[0]);
307307

308308
struct ggml_tensor * m = ggml_mul_mat(ctx0, x[1], x[0]);
309309
struct ggml_tensor * f = ggml_sum(ctx0, m);

0 commit comments

Comments
 (0)