{ lib, python3Packages, fetchFromGitHub, appstream, meson, ninja, pkg-config, gobject-introspection, wrapGAppsHook4, desktop-file-utils, libadwaita, gtksourceview5, xdg-utils, ollama, vte-gtk4, libspelling, nix-update-script, }: python3Packages.buildPythonApplication rec { pname = "alpaca"; version = "6.1.7"; pyproject = false; # Built with meson src = fetchFromGitHub { owner = "Jeffser"; repo = "Alpaca"; tag = version; hash = "sha256-9UXaJpkz9F2D490bMKU/xv+rgfrxstm1DuDwpMmydI0="; }; nativeBuildInputs = [ appstream meson ninja pkg-config gobject-introspection wrapGAppsHook4 desktop-file-utils ]; buildInputs = [ libadwaita gtksourceview5 vte-gtk4 libspelling ]; dependencies = with python3Packages; [ pygobject3 requests pillow html2text youtube-transcript-api pydbus odfpy pyicu matplotlib openai markitdown ]; optional-dependencies = { speech-to-text = [ python3Packages.openai-whisper ]; }; dontWrapGApps = true; makeWrapperArgs = [ "\${gappsWrapperArgs[@]}" "--prefix PATH : ${ lib.makeBinPath [ xdg-utils ollama ] }" # Declared but not used in src/window.py, for later reference # https://github.com/flatpak/flatpak/issues/3229 "--set FLATPAK_DEST ${placeholder "out"}" ]; passthru.updateScript = nix-update-script { }; meta = { description = "Ollama client made with GTK4 and Adwaita"; longDescription = '' To run Alpaca with GPU acceleration enabled, simply override it: ```nix pkgs.alpaca.override { ollama = pkgs.ollama-cuda; } ``` Or using `pkgs.ollama-rocm` for AMD GPUs. ''; homepage = "https://jeffser.com/alpaca"; license = lib.licenses.gpl3Plus; mainProgram = "alpaca"; maintainers = with lib.maintainers; [ aleksana Gliczy ]; platforms = lib.platforms.unix; }; }