File ollama.spec of Package ollama
#
# spec file for package ollama
#
# Copyright (c) 2025 SUSE LLC and contributors
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.
# Please submit bugfixes or comments via https://bugs.opensuse.org/
#
%global flavor @BUILD_FLAVOR@%{nil}
%if "%{?flavor}" == "nvidia"
%{bcond_without nvidia}
%{bcond_with rocm}
%global _flavor -nvidia
%else
%if "%{?flavor}" == "rocm"
%{bcond_without rocm}
%{bcond_with nvidia}
%global _flavor -rocm
%else
%{bcond_with nvidia}
%{bcond_with rocm}
%endif # rocm
%endif # nvidia
%if 0%{?sle_version} && 0%{?sle_version} >= 150600
%global force_gcc_version 12
%endif
%if 0%{?suse_version} >= 1699
%global force_gcc_version 14
%endif
%define cuda_version_major 12
%define cuda_version_minor 9
%define cuda_version %{cuda_version_major}-%{cuda_version_minor}
%define pname ollama
Name: %{pname}%{?_flavor}
Version: 0.11.4
Release: 0
Summary: Tool for running AI models on-premise
License: MIT
URL: https://ollama.com
Source: https://github.com/ollama/ollama/archive/v%{version}/%{pname}-%{version}.tar.gz
Source1: vendor.tar.zstd
Source2: %{pname}.service
Source3: %{pname}-user.conf
Source4: sysconfig.%{pname}
BuildRequires: cmake >= 3.24
BuildRequires: git-core
BuildRequires: ninja
BuildRequires: sysuser-tools
BuildRequires: zstd
BuildRequires: golang(API) >= 1.24
%if %{with nvidia}
BuildRequires: cuda-cccl-%{cuda_version}
BuildRequires: cuda-crt-%{cuda_version}
BuildRequires: cuda-cudart-%{cuda_version}
BuildRequires: cuda-cudart-devel-%{cuda_version}
BuildRequires: cuda-driver-devel-%{cuda_version}
BuildRequires: cuda-nvcc-%{cuda_version}
BuildRequires: cuda-nvvm-%{cuda_version}
BuildRequires: cuda-toolkit-%{cuda_version}-config-common
BuildRequires: cuda-toolkit-12-config-common
BuildRequires: cuda-toolkit-config-common
#BuildRequires: git
BuildRequires: libcublas-%{cuda_version}
BuildRequires: libcublas-devel-%{cuda_version}
Requires: cuda-crt-%{cuda_version}
Requires: cuda-cudart-%{cuda_version}
Requires: cuda-driver-devel-%{cuda_version}
Requires: cuda-nvvm-%{cuda_version}
Requires: cuda-toolkit-%{cuda_version}-config-common
Requires: cuda-toolkit-%{cuda_version_major}-config-common
Requires: cuda-toolkit-config-common
Requires: libcublas-%{cuda_version}
%endif
Requires(pre): %fillup_prereq
# 32bit seems not to be supported anymore
ExcludeArch: %{ix86} %{arm}
%sysusers_requires
%if 0%{?force_gcc_version}
BuildRequires: gcc%{?force_gcc_version}-c++
%else
BuildRequires: gcc-c++ >= 11.4.0
%endif
%description
Ollama is a tool for running AI models on one's own hardware.
It offers a command-line interface and a RESTful API.
New models can be created or existing ones modified in the
Ollama library using the Modelfile syntax.
Source model weights found on Hugging Face and similar sites
can be imported.
%prep
%autosetup -a1 -p1 -n ollama-%{version}
%build
%define __builder ninja
%sysusers_generate_pre %{SOURCE3} %{pname} %{pname}-user.conf
export GOFLAGS="-buildmode=pie -mod=vendor"
%if 0%{?force_gcc_version}
export CXX="g++-%{?force_gcc_version}"
export CC="gcc-%{?force_gcc_version}"
# pie doesn't work with gcc12 on leap
%else
:
%endif
export GOFLAGS="-mod=vendor -v"
%if %{with nvidia}
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64
export LIBRARY_PATH=/usr/local/cuda/lib64/stubs
export PATH=/usr/local/cuda-%{cuda_version_major}.%{cuda_version_minor}/bin:$PATH
export LDFLAGS=-s
export GOFLAGS="${GOFLAGS} '-ldflags=-w -s \"-X=github.com/ollama/ollama/versio.Version=%{version}\" \"-X=github.com/ollama/ollama/server.mode=release\"'"
export CGO_ENABLED=1
# NOTE: NVIDIA compiler (nvcc) will look for the default g++ so we
# have to set it to g++12 explicitly
export CUDAHOSTCXX=/usr/bin/g++%{?force_gcc_version:-%{force_gcc_version}}
%else
:
%endif
%cmake -UOLLAMA_INSTALL_DIR -DOLLAMA_INSTALL_DIR=%{_libdir}/ollama
%cmake_build
cd ..
go build -trimpath -o %{pname} .
%install
%cmake_install
install -D -m 0755 %{pname} %{buildroot}/%{_bindir}/%{pname}
install -D -m 0644 %{SOURCE2} %{buildroot}%{_unitdir}/%{pname}.service
install -D -m 0644 %{SOURCE3} %{buildroot}%{_sysusersdir}/%{pname}-user.conf
install -D -m 0644 %{SOURCE4} %{buildroot}%{_fillupdir}/sysconfig.%{pname}
install -d %{buildroot}%{_localstatedir}/lib/%{pname}
mkdir -p "%{buildroot}/%{_docdir}/%{pname}"
cp -Ra docs/* "%{buildroot}/%{_docdir}/%{pname}"
%check
%if 0%{?force_gcc_version}
export CXX="g++-%{?force_gcc_version}"
export CC="gcc-%{?force_gcc_version}"
# pie doesn't work with gcc12 on leap
export GOFLAGS="-mod=vendor"
%endif
go test -v ./...
%pre -f %{pname}.pre
%service_add_pre %{pname}.service
%post
%service_add_post %{pname}.service
%fillup_only
%preun
%service_del_preun %{pname}.service
%postun
%service_del_postun %{pname}.service
%files
%doc README.md
%license LICENSE
%{_docdir}/%{pname}
%{_bindir}/%{pname}
%{_unitdir}/%{pname}.service
%{_sysusersdir}/%{pname}-user.conf
%{_prefix}/lib/ollama
%{_fillupdir}/sysconfig.%{pname}
%attr(-, ollama, ollama) %{_localstatedir}/lib/%{pname}
%changelog