Source: llama.cpp
Section: science
Priority: optional
Maintainer: Mathieu Baudier <mbaudier@argeo.org>
Standards-Version: 4.7.2
Vcs-Browser: https://git.djapps.eu/?p=pkg/ggml/sources/llama.cpp;a=summary
Vcs-Git: https://git.djapps.eu/pkg/ggml/sources/llama.cpp
Homepage: https://github.com/ggml-org/llama.cpp/
Build-Depends: dh-sequence-bash-completion,
               cmake,
               debhelper-compat (= 13),
               libcurl4-openssl-dev,
               libggml-dev,
               pkgconf,
Rules-Requires-Root: no

Package: libllama0
Section: libs
Architecture: any
Multi-Arch: same
Pre-Depends: ${misc:Pre-Depends}
Depends: libggml0,
         libggml-backend-cpu,
         ${misc:Depends},
         ${shlibs:Depends},
# Explicitly conflict with Debian official
Conflicts: llama.cpp
Description: Inference of large language models in pure C/C++ (shared library)
 llama.cpp leverages the ggml tensor library in order to run
 large language models (LLMs) provided in the GGUF file format.

Package: libmtmd0
Section: libs
Architecture: any
Multi-Arch: same
Pre-Depends: ${misc:Pre-Depends}
Depends: libllama0,
         ${misc:Depends},
         ${shlibs:Depends},
# Explicitly conflict with Debian official
Conflicts: llama.cpp
Description: Inference of large language models in pure C/C++ (multimodal library)
 mtmd provides multimodal inference.

# We only distribute a few useful tools, with stable CLI options
Package: llama.cpp-tools
Architecture: any
Depends: libllama0 (= ${binary:Version}),
         curl,
         ${misc:Depends},
         ${shlibs:Depends},
Description: Inference of large language models in pure C/C++ (tools)
 llama-cli: versatile tool wrapping most features provided by libllama.
 It typically allows one to run one-shot prompts or to "chat"
 with a large language model.
 .
 llama-quantize: utility to "quantize" a large language model
 GGUF file. Quantizing is the process of reducing the precision of
 the underlying neural-network at  aminimal cost to its accuracy.
 .
 llama-bench: benchmarking of large language models or
 ggml backends.

Package: llama.cpp-tools-multimodal
Architecture: any
Depends: libmtmd0 (= ${binary:Version}),
         curl,
         ${misc:Depends},
         ${shlibs:Depends},
Description: Inference of large language models in pure C/C++ (multimodal tools)
 llama-mtmd-cli: multimodal support.

Package: libllama-dev
Section: libdevel
Architecture: any
Multi-Arch: same
Depends: libllama0 (= ${binary:Version}),
         libggml-dev,
         ${misc:Depends},
Description: Inference of large language models in pure C/C++ (development files)
 Development files required for building software based on the
 stable and documented llama.cpp API.

Package: libmtmd-dev
Section: libdevel
Architecture: any
Multi-Arch: same
Depends: libmtmd0 (= ${binary:Version}),
         libllama-dev (= ${binary:Version}),
         ${misc:Depends},
Description: Inference of large language models in pure C/C++ (multimodal development files)
 Development files required for building software based on the
 multimodal llama.cpp API.
