File gpt4all.spec of Package gpt4all
#
# spec file for package gpt4all
#
# Copyright (c) 2025 SUSE LLC
# Copyright (c) 2025 Boian Berberov
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.
# Please submit bugfixes or comments via https://bugs.opensuse.org/
#
%global git_branch linux-build
Name: gpt4all
Version: 3.10.0
Release: 0%{?dist}
Summary: Run Local LLMs
License: MIT
URL: https://nomic.ai/gpt4all
Source0: %{name}-%{version}.tar.gz
# BUG: Cannot use upstream git archive because the repo contains submodules
# Have to use the tar_scm _service configuration
# See https://github.com/dear-github/dear-github/issues/214
Source1: https://gpt4all.io/models/gguf/nomic-embed-text-v1.5.f16.gguf
# NOTE For tests, 700MB+
# Source2: https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q4_0.gguf
# PATCH-FIX-OPENSUSE vk301.patch
# https://github.com/KhronosGroup/Vulkan-Samples#1269
# https://github.com/nomic-ai/gpt4all/issues/3468
Patch1: vk301.patch
# PATCH-FIX-UPSTREAM fix-aarch64.patch
# https://github.com/nomic-ai/gpt4all/issues/3536
Patch2: fix-aarch64.patch
# cmake >= 3.25, gpt4all-chat/CMakeLists.txt
BuildRequires: cmake >= 3.25
BuildRequires: gcc-c++
# GUI
BuildRequires: cmake(Qt6Core) >= 6.8
BuildRequires: cmake(Qt6HttpServer) >= 6.8
BuildRequires: cmake(Qt6LinguistTools) >= 6.8
BuildRequires: cmake(Qt6Pdf) >= 6.8
BuildRequires: cmake(Qt6Quick) >= 6.8
BuildRequires: cmake(Qt6QuickDialogs2) >= 6.8
BuildRequires: cmake(Qt6Sql) >= 6.8
BuildRequires: cmake(Qt6Svg) >= 6.8
# For QXslx
%if 0%{?fedora} && 0%{?fedora} < 42
BuildRequires: qt6-qtbase-private-devel >= 6.8
%else
BuildRequires: cmake(Qt6Gui) >= 6.8
BuildRequires: cmake(Qt6GuiPrivate) >= 6.8
%endif
# For Kompute
BuildRequires: cmake(fmt) >= 11
BuildRequires: cmake(glslang)
%if 0%{?is_opensuse}
BuildRequires: shaderc
%else
BuildRequires: glslc
%endif
BuildRequires: cmake(VulkanHeaders)
%description
Run Local LLMs
%prep
%setup -q
%__cp %{SOURCE1} %{_builddir}/%{name}-%{version}/gpt4all-chat/
%patch -P1 -p1
%patch -P2 -p1
cd %{_builddir}/%{name}-%{version}/gpt4all-backend
# sed patch
# https://github.com/nomic-ai/gpt4all/issues/3468
# https://github.com/KomputeProject/kompute/issues/410
%__sed -i \
-e '/^extern long int strtol/d; /^extern long int ftell/d' \
deps/llama.cpp-mainline/ggml/src/kompute/external/bin/xxd.c
%build
cd %{_builddir}/%{name}-%{version}/gpt4all-chat
%cmake \
-DGPT4ALL_TEST=OFF \
-DGPT4ALL_USE_QTPDF=ON \
\
-DLLMODEL_CUDA=OFF \
-DLLMODEL_KOMPUTE=ON \
-DLLMODEL_ROCM=OFF \
-DLLMODEL_VULKAN=OFF \
\
-DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON \
-DKOMPUTE_OPT_USE_BUILT_IN_FMT=OFF \
-DKOMPUTE_OPT_USE_BUILT_IN_VULKAN_HEADER=OFF
%cmake_build
%install
cd %{_builddir}/%{name}-%{version}/gpt4all-chat
%cmake_install
# Remove all bundled libraries
%__rm -rf \
%{buildroot}%{_includedir}/QXlsxQt6 \
%{buildroot}%{_libdir}/cmake/QXlsxQt6 \
%{buildroot}%{_libdir}/libQXlsxQt6.a
%post -p /sbin/ldconfig
%postun -p /sbin/ldconfig
%files
%license LICENSE.txt
# %%doc add-docs-here
%dir %{_datadir}/%{name}
%{_bindir}/%{name}
%{_datadir}/%{name}/*
# %%{_libdir}/libllamamodel-mainline-cpu-avxonly.so
# %%{_libdir}/libllamamodel-mainline-cpu.so
%{_libdir}/libllamamodel-mainline-kompute-avxonly.so
%{_libdir}/libllamamodel-mainline-kompute.so
%{_libdir}/libllmodel.so
%{_libdir}/libllmodel.so.0
%{_libdir}/libllmodel.so.0.5.0
%changelog
* Tue Jul 15 2025 Boian Berberov
- Added changelog for openSUSE Leap 16