File llama.cpp.spec of Package llama.cpp

#
# spec file for package llama.cpp
#
# Copyright (c) 2024 SUSE LLC
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.

# Please submit bugfixes or comments via https://bugs.opensuse.org/
#


Name:           llama.cpp
Version:        0
Release:        0
Summary: LLM inference in C/C++
# FIXME: Select a correct license from https://github.com/openSUSE/spec-cleaner#spdx-licenses
License:	MIT
URL:	https://github.com/ggerganov/llama.cpp
Source:	_service
Patch0:	0001-dl-load-path.patch
BuildRequires:	cmake(VulkanLoader)
BuildRequires:	libopenblas_openmp-devel
BuildRequires:	cmake(clBLAS)
BuildRequires:	cmake(glslang)
BuildRequires:	pkgconfig(shaderc)
BuildRequires:	shaderc
BuildRequires:	curl-devel
BuildRequires:	cmake-full
BuildRequires:	gcc-c++
BuildRequires:	openssl-devel
BuildRequires:	pkgconfig(OpenCL)
BuildRequires:	python3

%description
Inference of Meta's LLaMA model (and others) in pure C/C++
%package        devel
License:        MIT
Summary:        LLM inference in C/C++
Requires:       libllama = %{version}
%description    devel
Inference of Meta's LLaMA model (and others) in pure C/C++

%package    -n ggml-devel
License:        MIT
Summary:        Tensor library for machine learning
Requires:       libggml = %{version}
Requires:       ggml = %{version}
%description -n ggml-devel
Inference of Meta's LLaMA model (and others) in pure C/C++

%package    -n ggml
License:        MIT
Summary:        Tensor library for machine learning
%description -n ggml
Inference of Meta's LLaMA model (and others) in pure C/C++

%package -n     libllama
License:        MIT
Summary:        LLM inference in C/C++
%description -n libllama
Inference of Meta's LLaMA model (and others) in pure C/C++

%package -n     libmtmd
License:        MIT
Summary:        LLM inference in C/C++
%description -n libmtmd
Inference of Meta's LLaMA model (and others) in pure C/C++

%package -n     libggml
License:        MIT
Summary:        Tensor library for machine learning
%description -n libggml
Inference of Meta's LLaMA model (and others) in pure C/C++

%package        doc
License:        MIT
Summary:        LLM inference in C/C++
%description    doc
Inference of Meta's LLava model in pure C/C++

%package        mtmd
License:        MIT
Summary:        LLM inference in C/C++
%description    mtmd
Inference of Meta's LLava model in pure C/C++

%package        batched
License:        MIT
Summary:        LLM inference in C/C++
%description    batched
Inference of Meta's LLava model in pure C/C++

%package        batched-bench
License:        MIT
Summary:        LLM inference in C/C++
%description    batched-bench
Inference of Meta's LLava model in pure C/C++

%package        bench
License:        MIT
Summary:        LLM inference in C/C++
%description    bench
Inference of Meta's LLava model in pure C/C++

%package        diffusion-cli
License:        MIT
Summary:        LLM inference in C/C++
%description    diffusion-cli
Inference of Meta's LLava model in pure C/C++

%package        embedding
License:        MIT
Summary:        LLM inference in C/C++
%description    embedding
Inference of Meta's LLava model in pure C/C++

%package        eval-callback
License:        MIT
Summary:        LLM inference in C/C++
%description    eval-callback
Inference of Meta's LLava model in pure C/C++

%package        finetune
License:        MIT
Summary:        LLM inference in C/C++
%description    finetune
Inference of Meta's LLava model in pure C/C++

%package        gguf
License:        MIT
Summary:        LLM inference in C/C++
%description    gguf
Inference of Meta's LLava model in pure C/C++

%package        gguf-hash
License:        MIT
Summary:        LLM inference in C/C++
%description    gguf-hash
Inference of Meta's LLava model in pure C/C++

%package        gguf-split
License:        MIT
Summary:        LLM inference in C/C++
%description    gguf-split
Inference of Meta's LLava model in pure C/C++

%package        gritlm
License:        MIT
Summary:        LLM inference in C/C++
%description    gritlm
Inference of Meta's LLava model in pure C/C++

%package        imatrix
License:        MIT
Summary:        LLM inference in C/C++
%description    imatrix
Inference of Meta's LLava model in pure C/C++

%package        lookahead
License:        MIT
Summary:        LLM inference in C/C++
%description    lookahead
Inference of Meta's LLava model in pure C/C++

%package        lookup
License:        MIT
Summary:        LLM inference in C/C++
%description    lookup
Inference of Meta's LLava model in pure C/C++

%package        lookup-create
License:        MIT
Summary:        LLM inference in C/C++
%description    lookup-create
Inference of Meta's LLava model in pure C/C++

%package        lookup-merge
License:        MIT
Summary:        LLM inference in C/C++
%description    lookup-merge
Inference of Meta's LLava model in pure C/C++

%package        lookup-stats
License:        MIT
Summary:        LLM inference in C/C++
%description    lookup-stats
Inference of Meta's LLava model in pure C/C++

%package        parallel
License:        MIT
Summary:        LLM inference in C/C++
%description    parallel
Inference of Meta's LLava model in pure C/C++

%package        passkey
License:        MIT
Summary:        LLM inference in C/C++
%description    passkey
Inference of Meta's LLava model in pure C/C++

%package        perplexity
License:        MIT
Summary:        LLM inference in C/C++
%description    perplexity
Inference of Meta's LLava model in pure C/C++

%package        quantize
License:        MIT
Summary:        LLM inference in C/C++
%description    quantize
Inference of Meta's LLava model in pure C/C++

%package        retrieval
License:        MIT
Summary:        LLM inference in C/C++
%description    retrieval
Inference of Meta's LLava model in pure C/C++

%package        run
License:        MIT
Summary:        LLM inference in C/C++
%description    run
Inference of Meta's LLava model in pure C/C++

%package        save-load-state
License:        MIT
Summary:        LLM inference in C/C++
%description    save-load-state
Inference of Meta's LLava model in pure C/C++

%package        simple
License:        MIT
Summary:        LLM inference in C/C++
%description    simple
Inference of Meta's LLava model in pure C/C++

%package        simple-chat
License:        MIT
Summary:        LLM inference in C/C++
%description    simple-chat
Inference of Meta's LLava model in pure C/C++

%package        speculative
License:        MIT
Summary:        LLM inference in C/C++
%description    speculative
Inference of Meta's LLava model in pure C/C++

%package        speculative-simple
License:        MIT
Summary:        LLM inference in C/C++
%description    speculative-simple
Inference of Meta's LLava model in pure C/C++

%package        tokenize
License:        MIT
Summary:        LLM inference in C/C++
%description    tokenize
Inference of Meta's LLava model in pure C/C++

%package        tts
License:        MIT
Summary:        LLM inference in C/C++
%description    tts
Inference of Meta's LLava model in pure C/C++


%prep
%setup -q -n %_sourcedir/%name-%version -T -D
%__mkdir -p %_builddir/%_sourcedir
%__ln -rs %_sourcedir/%name-%version %_builddir/%_sourcedir
%patch 0
sed -i '3 c libdir=%_libdir' cmake/llama.pc.in
echo 'prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=${prefix}
includedir=${prefix}/include
libdir=${prefix}/lib64

Name: ggml
Description: The GGML Tensor Library for Machine Learning
Version: %{version}
Cflags: -I${includedir}/ggml
Libs: -L${libdir} -lggml -lggml-base
' > ggml/ggml.pc.in
%__sed -i -e 's|lib/pkgconfig|%{_libdir}/pkgconfig|g' \
-e 's|set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)|set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/%_lib)|g' \
CMakeLists.txt
%__sed -i -e 's|LIBRARY_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}|LIBRARY_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}|g' \
-e 's|LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR}|LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}|g' \
ggml/src/CMakeLists.txt
%__sed -i -e 's|share/pkgconfig|%{_libdir}/pkgconfig|g' \
-e 's|if (GGML_STANDALONE)|if (LLAMA_STANDALONE)|g' \
-e 's|git git.exe REQUIRED|true|g' ggml/CMakeLists.txt
%build
%ifarch riscv64
%define _lto_cflags %{nil}
%endif
%cmake -DGGML_LTO=1 -DCMAKE_SKIP_RPATH=ON \
%ifarch riscv64
-DGGML_LTO=OFF \
%endif
-DGGML_NATIVE=OFF \
-DGGML_BUILD_NUMBER=%{version} \
-DLLAMA_CURL=1 \
-DLLAMA_BUILD_TESTS=OFF \
-DGGML_OPENMP=1 \
-DGGML_BACKEND_DL=ON \
-DGGML_CPU_ALL_VARIANTS=ON \
-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DLLAMA_CLBLAST=ON  \
-DGGML_MAX_NAME=128 \
-DGGML_VULKAN=1 \
-DLLAMA_SERVER_VERBOSE=ON -DLLAMA_BUILD_SERVER=ON -DLLAMA_SERVER_SSL=ON \
-DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=OFF \
-DGGML_RPC=ON  -DGGML_WEBGPU=OFF
%cmake_build
%install
%cmake_install
cp -rp  ggml/include/ggml-opencl.h %{buildroot}%{_includedir}/ggml-opencl.h

%files
%license LICENSE

%_bindir/llama-cli
%_bindir/llama-server

%files doc
%doc  README.md SECURITY.md docs/*
%_bindir/llama-gen-docs
%_bindir/convert_hf_to_gguf.py

%files mtmd
%_bindir/llama-mtmd-*
%files -n libmtmd
%_libdir/libmtmd*.so
%files -n libllama
%_libdir/libllama.so
%files -n libggml
%_libdir/libggml.so
%_libdir/libggml-*.so

%files -n ggml
%license LICENSE
%files -n ggml-devel
%{_includedir}/ggml*
%{_includedir}/gguf*
%dir %_libdir/cmake
%_libdir/cmake/ggml/
%{_libdir}/pkgconfig/ggml.pc
%files  devel
%{_includedir}/llama*
%{_includedir}/mtmd*.h
%dir %_libdir/cmake
%_libdir/cmake/llama/
%{_libdir}/pkgconfig/llama.pc

%files batched
%_bindir/llama-batched
%files batched-bench
%_bindir/llama-batched-bench
%files bench
%_bindir/llama-bench
%files diffusion-cli
%_bindir/llama-diffusion-cli
%files embedding
%_bindir/llama-embedding
%files eval-callback
%_bindir/llama-eval-callback
%files finetune
%_bindir/llama-finetune
%files gguf
%_bindir/llama-gguf
%files gguf-hash
%_bindir/llama-gguf-hash
%files gguf-split
%_bindir/llama-gguf-split
%files gritlm
%_bindir/llama-gritlm
%files imatrix
%_bindir/llama-imatrix
%files lookahead
%_bindir/llama-lookahead
%files lookup
%_bindir/llama-lookup
%files lookup-create
%_bindir/llama-lookup-create
%files lookup-merge
%_bindir/llama-lookup-merge
%files lookup-stats
%_bindir/llama-lookup-stats
%files parallel
%_bindir/llama-parallel
%files passkey
%_bindir/llama-passkey
%files perplexity
%_bindir/llama-perplexity
%files quantize
%_bindir/llama-quantize
%files retrieval
%_bindir/llama-retrieval
%files run
%_bindir/llama-run
%files save-load-state
%_bindir/llama-save-load-state
%files simple
%_bindir/llama-simple
%files simple-chat
%_bindir/llama-simple-chat
%files speculative
%_bindir/llama-speculative
%files speculative-simple
%_bindir/llama-speculative-simple
%files tokenize
%_bindir/llama-tokenize
%files tts
%_bindir/llama-tts


%changelog
openSUSE Build Service is sponsored by