22
33import PackageDescription
44
5- var sources = [
6- " src/llama.cpp " ,
7- " src/llama-vocab.cpp " ,
8- " src/llama-grammar.cpp " ,
9- " src/llama-sampling.cpp " ,
10- " src/unicode.cpp " ,
11- " src/unicode-data.cpp " ,
12- " ggml/src/ggml.c " ,
13- " ggml/src/ggml-alloc.c " ,
14- " ggml/src/ggml-backend.cpp " ,
15- " ggml/src/ggml-backend-reg.cpp " ,
16- " ggml/src/ggml-cpu/ggml-cpu.c " ,
17- " ggml/src/ggml-cpu/ggml-cpu.cpp " ,
18- " ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp " ,
19- " ggml/src/ggml-cpu/ggml-cpu-hbm.cpp " ,
20- " ggml/src/ggml-cpu/ggml-cpu-quants.c " ,
21- " ggml/src/ggml-cpu/ggml-cpu-traits.cpp " ,
22- " ggml/src/ggml-threading.cpp " ,
23- " ggml/src/ggml-quants.c " ,
24- ]
25-
26- var resources : [ Resource ] = [ ]
27- var linkerSettings : [ LinkerSetting ] = [ ]
28- var cSettings : [ CSetting ] = [
29- . unsafeFlags( [ " -Wno-shorten-64-to-32 " , " -O3 " , " -DNDEBUG " ] ) ,
30- . unsafeFlags( [ " -fno-objc-arc " ] ) ,
31- . headerSearchPath( " ggml/src " ) ,
32- . headerSearchPath( " ggml/src/ggml-cpu " ) ,
33- // NOTE: NEW_LAPACK will required iOS version 16.4+
34- // We should consider add this in the future when we drop support for iOS 14
35- // (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
36- // .define("ACCELERATE_NEW_LAPACK"),
37- // .define("ACCELERATE_LAPACK_ILP64")
38- . define( " GGML_USE_CPU " ) ,
39- ]
40-
41-
42- #if canImport(Darwin)
43- sources. append ( " ggml/src/ggml-common.h " )
44- sources. append ( " ggml/src/ggml-metal/ggml-metal.m " )
45- resources. append ( . process( " ggml/src/ggml-metal/ggml-metal.metal " ) )
46- linkerSettings. append ( . linkedFramework( " Accelerate " ) )
47- cSettings. append (
48- contentsOf: [
49- . define( " GGML_USE_ACCELERATE " ) ,
50- . define( " GGML_USE_METAL " ) ,
51- ]
52- )
53- #endif
54-
55- #if os(Linux)
56- cSettings. append ( . define( " _GNU_SOURCE " ) )
57- #endif
58-
595let package = Package (
606 name: " llama " ,
617 platforms: [
@@ -68,26 +14,6 @@ let package = Package(
6814 . library( name: " llama " , targets: [ " llama " ] ) ,
6915 ] ,
7016 targets: [
71- . target(
72- name: " llama " ,
73- path: " . " ,
74- exclude: [
75- " build " ,
76- " cmake " ,
77- " examples " ,
78- " scripts " ,
79- " models " ,
80- " tests " ,
81- " CMakeLists.txt " ,
82- " Makefile " ,
83- " ggml/src/ggml-metal-embed.metal "
84- ] ,
85- sources: sources,
86- resources: resources,
87- publicHeadersPath: " spm-headers " ,
88- cSettings: cSettings,
89- linkerSettings: linkerSettings
90- )
91- ] ,
92- cxxLanguageStandard: . cxx17
17+ . systemLibrary( name: " llama " , pkgConfig: " llama " ) ,
18+ ]
9319)
0 commit comments