mirror of
https://git.FreeBSD.org/ports.git
synced 2024-12-29 05:38:00 +00:00
misc/py-llama-cpp-python: New port: Python bindings for the llama.cpp library
This commit is contained in:
parent
a36a5394da
commit
cd0465f5d2
@ -445,6 +445,7 @@
|
||||
SUBDIR += py-lightgbm
|
||||
SUBDIR += py-lightning-utilities
|
||||
SUBDIR += py-litellm
|
||||
SUBDIR += py-llama-cpp-python
|
||||
SUBDIR += py-llm
|
||||
SUBDIR += py-llm-claude-3
|
||||
SUBDIR += py-log_symbols
|
||||
|
38
misc/py-llama-cpp-python/Makefile
Normal file
38
misc/py-llama-cpp-python/Makefile
Normal file
@ -0,0 +1,38 @@
|
||||
PORTNAME= llama-cpp-python
|
||||
DISTVERSIONPREFIX= v
|
||||
DISTVERSION= 0.2.84
|
||||
CATEGORIES= misc # machine-learning
|
||||
PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
|
||||
|
||||
MAINTAINER= yuri@FreeBSD.org
|
||||
COMMENT= Python bindings for the llama.cpp library
|
||||
WWW= https://llama-cpp-python.readthedocs.io/en/latest/
|
||||
|
||||
LICENSE= MIT
|
||||
LICENSE_FILE= ${WRKSRC}/LICENSE.md
|
||||
|
||||
BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}scikit-build-core>0:devel/py-scikit-build-core@${PY_FLAVOR} \
|
||||
cmake:devel/cmake-core
|
||||
RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}diskcache>=5.6.1:devel/py-diskcache@${PY_FLAVOR} \
|
||||
${PYTHON_PKGNAMEPREFIX}Jinja2>=2.11.3:devel/py-Jinja2@${PY_FLAVOR} \
|
||||
${PYNUMPY} \
|
||||
${PYTHON_PKGNAMEPREFIX}typing-extensions>=4.5.0:devel/py-typing-extensions@${PY_FLAVOR}
|
||||
RUN_DEPENDS+= ${PYTHON_PKGNAMEPREFIX}fastapi>=0.100.0:www/py-fastapi@${PY_FLAVOR} \
|
||||
${PYTHON_PKGNAMEPREFIX}pydantic-settings>=2.0.1:devel/py-pydantic-settings@${PY_FLAVOR} \
|
||||
${PYTHON_PKGNAMEPREFIX}sse-starlette>=1.6.1:www/py-sse-starlette@${PY_FLAVOR} \
|
||||
${PYTHON_PKGNAMEPREFIX}starlette-context>=0.3.6:www/py-starlette-context@${PY_FLAVOR} \
|
||||
${PYTHON_PKGNAMEPREFIX}uvicorn>=0.22.0:www/py-uvicorn@${PY_FLAVOR} \
|
||||
${PYTHON_PKGNAMEPREFIX}pyyaml>=5.1:devel/py-pyyaml@${PY_FLAVOR}
|
||||
TEST_DEPENDS= ${PYTHON_PKGNAMEPREFIX}httpx>=0.24.1:www/py-httpx@${PY_FLAVOR} \
|
||||
${PYTHON_PKGNAMEPREFIX}scipy>=1.10:science/py-scipy@${PY_FLAVOR}
|
||||
|
||||
USES= python shebangfix
|
||||
USE_PYTHON= pep517 autoplist pytest
|
||||
|
||||
USE_GITHUB= yes
|
||||
GH_ACCOUNT= abetlen
|
||||
GH_TUPLE= ggerganov:llama.cpp:4730fac:cpp/vendor/llama.cpp
|
||||
|
||||
SHEBANG_GLOB= *.py
|
||||
|
||||
.include <bsd.port.mk>
|
5
misc/py-llama-cpp-python/distinfo
Normal file
5
misc/py-llama-cpp-python/distinfo
Normal file
@ -0,0 +1,5 @@
|
||||
TIMESTAMP = 1722209930
|
||||
SHA256 (abetlen-llama-cpp-python-v0.2.84_GH0.tar.gz) = 5b030c3ee8aeefa2f3b49c0788f34b7da3fef2502dd856c68d9ede6b93dd8e53
|
||||
SIZE (abetlen-llama-cpp-python-v0.2.84_GH0.tar.gz) = 274756
|
||||
SHA256 (ggerganov-llama.cpp-4730fac_GH0.tar.gz) = 8b54c2f6c0560e48cf2af91840c33d81f688edfecbd11043d4bf7c098a125497
|
||||
SIZE (ggerganov-llama.cpp-4730fac_GH0.tar.gz) = 19005559
|
@ -0,0 +1,11 @@
|
||||
--- llama_cpp/llama_cpp.py.orig 2024-07-28 03:36:26 UTC
|
||||
+++ llama_cpp/llama_cpp.py
|
||||
@@ -28,7 +28,7 @@ def _load_shared_library(lib_base_name: str):
|
||||
# for llamacpp) and "llama" (default name for this repo)
|
||||
_lib_paths: List[pathlib.Path] = []
|
||||
# Determine the file extension based on the platform
|
||||
- if sys.platform.startswith("linux"):
|
||||
+ if sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
|
||||
_lib_paths += [
|
||||
_base_path / f"lib{lib_base_name}.so",
|
||||
]
|
5
misc/py-llama-cpp-python/pkg-descr
Normal file
5
misc/py-llama-cpp-python/pkg-descr
Normal file
@ -0,0 +1,5 @@
|
||||
llama-cpp-python is the Python bindings for the llama.cpp library/
|
||||
|
||||
The main goal of llama.cpp is to enable LLM inference with minimal setup and
|
||||
state-of-the-art performance on a wide variety of hardware - locally and in
|
||||
the cloud.
|
Loading…
Reference in New Issue
Block a user