File llamacpp-acap.spec of Package llamacpp-acap

#
# spec file for package llamacpp-acap
#
# Copyright (c) 2026 SUSE LLC and contributors
# Copyright (c) 2025 Eyad Issa <eyadlorenzo@gmail.com>
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.

# Please submit bugfixes or comments via https://bugs.opensuse.org/
#

# Fork of llama.cpp with activation capping (--acap) for Karma Electric.
# Based on: https://github.com/anicka-net/llama.cpp branch activation-capping
# Upstream: https://github.com/ggml-org/llama.cpp tag b8064

# Vulkan/OpenCL need <filesystem> (C++17 stdlib), not available on SLE 15
%if 0%{?suse_version} > 1599
%bcond_without gpu_backends
%else
%bcond_with gpu_backends
%endif

%global backend_dir %{_libdir}/ggml

%global llama_sover        0.0.%{version}
%global llama_sover_suffix 0

%global mtmd_sover         0.0.%{version}
%global mtmd_sover_suffix  0

%global ggml_sover         0.9.5
%global ggml_sover_suffix  0

Name:           llamacpp-acap
Version:        8064
Release:        0
Summary:        llama.cpp with activation capping (Karma Electric fork)
License:        MIT
URL:            https://github.com/anicka-net/llama.cpp
Source:         %{name}-%{version}.tar.gz
Source1:        %{name}-rpmlintrc
BuildRequires:  cmake >= 3.14
%if 0%{?suse_version} > 1599
BuildRequires:  gcc-c++
%else
BuildRequires:  gcc12-c++
%endif
BuildRequires:  git
BuildRequires:  ninja
BuildRequires:  pkgconfig
BuildRequires:  pkgconfig(libcurl)
%if %{with gpu_backends}
BuildRequires:  shaderc
BuildRequires:  pkgconfig(OpenCL)
BuildRequires:  pkgconfig(vulkan)
%endif
Requires:       libllama-acap%{llama_sover_suffix}
Requires:       libggml-acap%{ggml_sover_suffix}
Requires:       libggml-acap-base%{ggml_sover_suffix}
Requires:       libmtmd-acap%{mtmd_sover_suffix}
Conflicts:      llamacpp
# 32bit seems not to be supported anymore
ExcludeArch:    %{ix86} %{arm}

%description
Fork of llama.cpp with activation capping (--acap) support for
inference-time persona stabilization via hidden state clamping.

Based on upstream llama.cpp b8064 with two additional patches:
- Activation capping infrastructure (project-clamp-subtract on per-layer axis)
- One-sided clamping matching PyTorch behavior

Additional CLI flags: --acap, --acap-threshold, --acap-layer-range

See: https://github.com/anicka-net/karma-electric-project/blob/main/docs/activation-capping.md

%package devel
Summary:        Development files for llama.cpp (acap fork)
Conflicts:      llamacpp-devel

%description devel
Development files for the activation capping fork of llama.cpp.

%package -n libllama-acap%{llama_sover_suffix}
Summary:        A C++ interface for running inference with large language models (acap fork)
Conflicts:      libllama%{llama_sover_suffix}

%description -n libllama-acap%{llama_sover_suffix}
The llama.cpp library provides a C++ interface for running inference
with large language models (LLMs). This is the Karma Electric fork
with activation capping support.

%package -n libggml-acap%{ggml_sover_suffix}
Summary:        A tensor library for C++ (acap fork)
Requires:       libggml-acap-base%{ggml_sover_suffix}
Requires:       libggml-acap-cpu
%if %{with gpu_backends}
Recommends:     libggml-acap-opencl
Recommends:     libggml-acap-vulkan
%endif
Conflicts:      libggml%{ggml_sover_suffix}

%description -n libggml-acap%{ggml_sover_suffix}
A tensor library for C++. Karma Electric fork with activation capping.

%package -n libggml-acap-base%{ggml_sover_suffix}
Summary:        A tensor library for C++ (base, acap fork)
Conflicts:      libggml-base%{ggml_sover_suffix}

%description -n libggml-acap-base%{ggml_sover_suffix}
A tensor library for C++ (base). Karma Electric fork.

%package -n libggml-acap-cpu
Summary:        A tensor library for C++ (CPU backend, acap fork)
Conflicts:      libggml-cpu

%description -n libggml-acap-cpu
CPU backend for ggml. Karma Electric fork.

%if %{with gpu_backends}
%package -n libggml-acap-vulkan
Summary:        A tensor library for C++ (Vulkan backend, acap fork)
Conflicts:      libggml-vulkan

%description -n libggml-acap-vulkan
Vulkan backend for ggml. Karma Electric fork.

%package -n libggml-acap-opencl
Summary:        A tensor library for C++ (OpenCL backend, acap fork)
Conflicts:      libggml-opencl

%description -n libggml-acap-opencl
OpenCL backend for ggml. Karma Electric fork.
%endif

%package -n ggml-acap-devel
Summary:        Development files for ggml (acap fork)
Conflicts:      ggml-devel

%description -n ggml-acap-devel
Development files for ggml. Karma Electric fork.

%package -n libmtmd-acap%{mtmd_sover_suffix}
Summary:        Library to run multimodals inference models (acap fork)
Conflicts:      libmtmd%{mtmd_sover_suffix}

%description -n libmtmd-acap%{mtmd_sover_suffix}
Multimodal inference library. Karma Electric fork.

%package -n libllava-acap
Summary:        Library to run multimodals inference models (acap fork)
Conflicts:      libllava

%description -n libllava-acap
Library to handle multimodal inputs. Karma Electric fork.

%ldconfig_scriptlets -n libllama-acap%{llama_sover_suffix}
%ldconfig_scriptlets -n libggml-acap%{ggml_sover_suffix}
%ldconfig_scriptlets -n libggml-acap-base%{ggml_sover_suffix}
%ldconfig_scriptlets -n libmtmd-acap%{mtmd_sover_suffix}

%prep
%autosetup -p1 -n llama.cpp-acap-b%{version}

%build

%define _lto_cflags %{nil}
%define __builder ninja

mkdir -p %{_libdir}

%if 0%{?suse_version} <= 1599
export CC=gcc-12
export CXX=g++-12
%endif

%cmake \
    -DCMAKE_SKIP_RPATH=ON \
    -DLLAMA_BUILD_TESTS=OFF \
    -DLLAMA_BUILD_EXAMPLES=OFF \
    -DLLAMA_BUILD_TOOLS=ON \
    -DLLAMA_CURL=ON \
    -DGGML_NATIVE=OFF \
    -DGGML_CPU=ON \
%if %{with gpu_backends}
    -DGGML_VULKAN=ON \
    -DGGML_OPENCL=ON \
    -DGGML_OPENCL_USE_ADRENO_KERNELS=OFF \
%else
    -DGGML_VULKAN=OFF \
    -DGGML_OPENCL=OFF \
%endif
    -DGGML_BACKEND_DL=ON \
    -DGGML_BACKEND_DIR="%{backend_dir}" \
    -DLLAMA_BUILD_NUMBER=%{version} \
    -DLLAMA_VERSION="0.0.%{version}" \
    %{nil}

%cmake_build

%install
%cmake_install

# dev scripts
rm %{buildroot}%{_bindir}/convert_hf_to_gguf.py

%files
%doc README.md
%license LICENSE
%{_bindir}/llama-*

%files devel
%license LICENSE
%{_includedir}/llama*
%{_includedir}/mtmd*
%{_libdir}/cmake/llama
%{_libdir}/pkgconfig/llama.pc
# unversioned linker symlinks (for -l flags at compile time)
%{_libdir}/libllama.so
%{_libdir}/libmtmd.so

%files -n libllama-acap%{llama_sover_suffix}
%license LICENSE
%{_libdir}/libllama.so.0
%{_libdir}/libllama.so.%{llama_sover}

%files -n libggml-acap%{ggml_sover_suffix}
%license LICENSE
%{_libdir}/libggml.so.0
%{_libdir}/libggml.so.%{ggml_sover}

%files -n libggml-acap-base%{ggml_sover_suffix}
%license LICENSE
%{_libdir}/libggml-base.so.0
%{_libdir}/libggml-base.so.%{ggml_sover}

%files -n libggml-acap-cpu
%license LICENSE
%dir %{backend_dir}
%{backend_dir}/libggml-cpu.so

%if %{with gpu_backends}
%files -n libggml-acap-vulkan
%license LICENSE
%dir %{backend_dir}
%{backend_dir}/libggml-vulkan.so

%files -n libggml-acap-opencl
%license LICENSE
%dir %{backend_dir}
%{backend_dir}/libggml-opencl.so
%endif

%files -n ggml-acap-devel
%license LICENSE
%{_includedir}/ggml*.h
%{_includedir}/gguf.h
%{_libdir}/cmake/ggml
# unversioned linker symlinks
%{_libdir}/libggml.so
%{_libdir}/libggml-base.so

%files -n libmtmd-acap%{mtmd_sover_suffix}
%license LICENSE
%{_libdir}/libmtmd.so.0
%{_libdir}/libmtmd.so.%{mtmd_sover}

%changelog
openSUSE Build Service is sponsored by