diff --git a/gallery/index.yaml b/gallery/index.yaml index 77c5c107594f..1f52fec8791c 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -66,6 +66,24 @@ - filename: Qwen2.5-14B_Uncencored-Q4_K_M.gguf sha256: 066b9341b67e0fd0956de3576a3b7988574a5b9a0028aef2b9c8edeadd6dbbd1 uri: huggingface://bartowski/Qwen2.5-14B_Uncencored-GGUF/Qwen2.5-14B_Uncencored-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "qwen2.5-coder-7b-instruct" + urls: + - https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct + - https://huggingface.co/bartowski/Qwen2.5-Coder-7B-Instruct-GGUF + description: | + Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). For Qwen2.5-Coder, we release three base language models and instruction-tuned language models, 1.5, 7 and 32 (coming soon) billion parameters. Qwen2.5-Coder brings the following improvements upon CodeQwen1.5: + + Significantly improvements in code generation, code reasoning and code fixing. Base on the strong Qwen2.5, we scale up the training tokens into 5.5 trillion including source code, text-code grounding, Synthetic data, etc. + A more comprehensive foundation for real-world applications such as Code Agents. Not only enhancing coding capabilities but also maintaining its strengths in mathematics and general competencies. + Long-context Support up to 128K tokens. + overrides: + parameters: + model: Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf + files: + - filename: Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf + sha256: 1664fccab734674a50763490a8c6931b70e3f2f8ec10031b54806d30e5f956b6 + uri: huggingface://bartowski/Qwen2.5-Coder-7B-Instruct-GGUF/Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf ## SmolLM - &smollm url: "github:mudler/LocalAI/gallery/chatml.yaml@master"