@@ -365,6 +365,108 @@ static void common_params_print_usage(common_params_context & ctx_arg) {
365
365
print_options (specific_options);
366
366
}
367
367
368
+ static void common_params_print_completion (common_params_context & ctx_arg) {
369
+ std::vector<common_arg *> common_options;
370
+ std::vector<common_arg *> sparam_options;
371
+ std::vector<common_arg *> specific_options;
372
+
373
+ for (auto & opt : ctx_arg.options ) {
374
+ if (opt.is_sparam ) {
375
+ sparam_options.push_back (&opt);
376
+ } else if (opt.in_example (ctx_arg.ex )) {
377
+ specific_options.push_back (&opt);
378
+ } else {
379
+ common_options.push_back (&opt);
380
+ }
381
+ }
382
+
383
+ printf (" _llama_completions() {\n " );
384
+ printf (" local cur prev opts\n " );
385
+ printf (" COMPREPLY=()\n " );
386
+ printf (" cur=\" ${COMP_WORDS[COMP_CWORD]}\"\n " );
387
+ printf (" prev=\" ${COMP_WORDS[COMP_CWORD-1]}\"\n\n " );
388
+
389
+ printf (" opts=\" " );
390
+ auto print_options = [](const std::vector<common_arg *> & options) {
391
+ for (const common_arg * opt : options) {
392
+ for (const char * arg : opt->args ) {
393
+ printf (" %s " , arg);
394
+ }
395
+ }
396
+ };
397
+
398
+ print_options (common_options);
399
+ print_options (sparam_options);
400
+ print_options (specific_options);
401
+ printf (" \"\n\n " );
402
+
403
+ printf (" case \" $prev\" in\n " );
404
+ printf (" --model)\n " );
405
+ printf (" COMPREPLY=( $(compgen -f -X '!*.gguf' -- \" $cur\" ) $(compgen -d -- \" $cur\" ) )\n " );
406
+ printf (" return 0\n " );
407
+ printf (" ;;\n " );
408
+ printf (" --grammar-file)\n " );
409
+ printf (" COMPREPLY=( $(compgen -f -X '!*.gbnf' -- \" $cur\" ) $(compgen -d -- \" $cur\" ) )\n " );
410
+ printf (" return 0\n " );
411
+ printf (" ;;\n " );
412
+ printf (" *)\n " );
413
+ printf (" COMPREPLY=( $(compgen -W \" ${opts}\" -- \" $cur\" ) )\n " );
414
+ printf (" return 0\n " );
415
+ printf (" ;;\n " );
416
+ printf (" esac\n " );
417
+ printf (" }\n\n " );
418
+
419
+ std::set<std::string> executables = {
420
+ " llama-batched" ,
421
+ " llama-batched-bench" ,
422
+ " llama-bench" ,
423
+ " llama-cli" ,
424
+ " llama-convert-llama2c-to-ggml" ,
425
+ " llama-cvector-generator" ,
426
+ " llama-embedding" ,
427
+ " llama-eval-callback" ,
428
+ " llama-export-lora" ,
429
+ " llama-gbnf-validator" ,
430
+ " llama-gen-docs" ,
431
+ " llama-gguf" ,
432
+ " llama-gguf-hash" ,
433
+ " llama-gguf-split" ,
434
+ " llama-gritlm" ,
435
+ " llama-imatrix" ,
436
+ " llama-infill" ,
437
+ " llama-llava-cli" ,
438
+ " llama-llava-clip-quantize-cli" ,
439
+ " llama-lookahead" ,
440
+ " llama-lookup" ,
441
+ " llama-lookup-create" ,
442
+ " llama-lookup-merge" ,
443
+ " llama-lookup-stats" ,
444
+ " llama-minicpmv-cli" ,
445
+ " llama-parallel" ,
446
+ " llama-passkey" ,
447
+ " llama-perplexity" ,
448
+ " llama-q8dot" ,
449
+ " llama-quantize" ,
450
+ " llama-quantize-stats" ,
451
+ " llama-qwen2vl-cli" ,
452
+ " llama-retrieval" ,
453
+ " llama-run" ,
454
+ " llama-save-load-state" ,
455
+ " llama-server" ,
456
+ " llama-simple" ,
457
+ " llama-simple-chat" ,
458
+ " llama-speculative" ,
459
+ " llama-speculative-simple" ,
460
+ " llama-tokenize" ,
461
+ " llama-tts" ,
462
+ " llama-vdot"
463
+ };
464
+
465
+ for (const auto & exe : executables) {
466
+ printf (" complete -F _llama_completions %s\n " , exe.c_str ());
467
+ }
468
+ }
469
+
368
470
static std::vector<ggml_backend_dev_t > parse_device_list (const std::string & value) {
369
471
std::vector<ggml_backend_dev_t > devices;
370
472
auto dev_names = string_split<std::string>(value, ' ,' );
@@ -426,6 +528,10 @@ bool common_params_parse(int argc, char ** argv, common_params & params, llama_e
426
528
}
427
529
exit (0 );
428
530
}
531
+ if (ctx_arg.params .completion ) {
532
+ common_params_print_completion (ctx_arg);
533
+ exit (0 );
534
+ }
429
535
} catch (const std::invalid_argument & ex) {
430
536
fprintf (stderr, " %s\n " , ex.what ());
431
537
ctx_arg.params = params_org;
@@ -494,6 +600,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
494
600
exit (0 );
495
601
}
496
602
));
603
+ add_opt (common_arg (
604
+ {" --completion-bash" },
605
+ " print source-able bash completion script for llama.cpp" ,
606
+ [](common_params & params) {
607
+ params.completion = true ;
608
+ }
609
+ ));
497
610
add_opt (common_arg (
498
611
{" --verbose-prompt" },
499
612
string_format (" print a verbose prompt before generation (default: %s)" , params.verbose_prompt ? " true" : " false" ),
0 commit comments