1
0
mirror of https://git.FreeBSD.org/ports.git synced 2025-01-16 07:58:04 +00:00

misc/py-pytorch: update 2.3.1 → 2.4.0

This commit is contained in:
Yuri Victorovich 2024-08-27 12:43:40 -07:00
parent 387b43b09c
commit 6a05558353
11 changed files with 74 additions and 58 deletions

View File

@ -1,7 +1,6 @@
PORTNAME= pytorch
DISTVERSIONPREFIX= v
DISTVERSION= 2.3.1
PORTREVISION= 5
DISTVERSION= 2.4.0
CATEGORIES= misc # machine-learning
MASTER_SITES= https://github.com/pytorch/pytorch/releases/download/v${DISTVERSION}/
PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
@ -52,6 +51,7 @@ MAKE_ENV+= BUILD_TEST=0 # ninja breaks for some reason
MAKE_ENV+= USE_MKLDNN=0 # disable MKLDNN that doesn't exist, see https://github.com/pytorch/pytorch/issues/100957
MAKE_ENV+= USE_CUDNN=0
MAKE_ENV+= USE_LAPACK=1 # needed on FreeBSD to run w/out GPU
MAKE_ENV+= USE_QNNPACK=0
LDFLAGS+= -lexecinfo
BINARY_ALIAS= make=${GMAKE}

View File

@ -1,5 +1,5 @@
TIMESTAMP = 1718402293
SHA256 (pytorch/pytorch-v2.3.1.tar.gz) = 6c66b59345091907cd62a693b647cee224558e7f15a9b04f4f322f4f6ffeb75b
SIZE (pytorch/pytorch-v2.3.1.tar.gz) = 277997681
TIMESTAMP = 1724635082
SHA256 (pytorch/pytorch-v2.4.0.tar.gz) = a890d4342149adbc6c8b116a9afe437fe347527a9ecc0650086cdec82ecdcfb7
SIZE (pytorch/pytorch-v2.4.0.tar.gz) = 296908090
SHA256 (pytorch/pytorch-cpuinfo-3a3b76bc8845d0f4a2ea3108e256313458c5de03_GH0.tar.gz) = 658acaf67573484abc7a093066d3766f9a3b94aff5b77e0f4e9c72cee5ebaa4a
SIZE (pytorch/pytorch-cpuinfo-3a3b76bc8845d0f4a2ea3108e256313458c5de03_GH0.tar.gz) = 3534893

View File

@ -1,6 +1,6 @@
--- CMakeLists.txt.orig 2024-06-05 19:17:56 UTC
--- CMakeLists.txt.orig 2024-07-24 18:41:35 UTC
+++ CMakeLists.txt
@@ -148,7 +148,7 @@ set(CPU_INTEL OFF)
@@ -181,7 +181,7 @@ set(CPU_INTEL OFF)
set(CPU_AARCH64 OFF)
set(CPU_INTEL OFF)
@ -9,25 +9,25 @@
set(CPU_INTEL ON)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64)")
set(CPU_AARCH64 ON)
@@ -173,7 +173,7 @@ option(BUILD_DOCS "Build Caffe2 documentation" OFF)
option(ATEN_NO_TEST "Do not build ATen test binaries" OFF)
@@ -210,7 +210,7 @@ option(BUILD_CUSTOM_PROTOBUF
option(BUILD_BINARY "Build C++ binaries" OFF)
option(BUILD_DOCS "Build Caffe2 documentation" OFF)
-option(BUILD_CUSTOM_PROTOBUF "Build and use Caffe2's own protobuf under third_party" ON)
+option(BUILD_CUSTOM_PROTOBUF "Build and use Caffe2's own protobuf under third_party" OFF)
option(BUILD_CUSTOM_PROTOBUF
- "Build and use Caffe2's own protobuf under third_party" ON)
+ "Build and use Caffe2's own protobuf under third_party" OFF)
option(BUILD_PYTHON "Build Python binaries" ON)
option(BUILD_CAFFE2 "Master flag to build Caffe2" OFF)
option(BUILD_LITE_INTERPRETER "Master flag to build Lite Interpreter" OFF)
@@ -226,7 +226,7 @@ option(USE_LEVELDB "Use LEVELDB" OFF)
option(BUILD_SHARED_LIBS "Build libcaffe2.so" ON)
@@ -258,7 +258,7 @@ option(USE_GLOG "Use GLOG" OFF)
option(USE_FAKELOWP "Use FakeLowp operators" OFF)
option(USE_GFLAGS "Use GFLAGS" OFF)
option(USE_GLOG "Use GLOG" OFF)
option(USE_LEVELDB "Use LEVELDB" OFF)
-option(USE_LITE_PROTO "Use lite protobuf instead of full." OFF)
+option(USE_LITE_PROTO "Use lite protobuf instead of full." ON) # use lite protobuf to workaround for protobuf-related failure described here: https://github.com/onnx/optimizer/issues/38
option(USE_LMDB "Use LMDB" OFF)
+option(USE_LITE_PROTO "Use lite protobuf instead of full." ON)
option(USE_MAGMA "Use MAGMA" ON)
option(USE_METAL "Use Metal for Caffe2 iOS build" ON)
@@ -409,15 +409,15 @@ option(USE_SYSTEM_CPUINFO "Use system-provided cpuinfo
option(USE_PYTORCH_METAL "Use Metal for PyTorch iOS build" OFF)
option(USE_PYTORCH_METAL_EXPORT "Export Metal models on MacOSX desktop" OFF)
@@ -451,15 +451,15 @@ option(USE_SYSTEM_CPUINFO "Use system-provided cpuinfo
# USE_SYSTEM_LIBS being "OFF".
option(USE_SYSTEM_LIBS "Use all available system-provided libraries." OFF)
option(USE_SYSTEM_CPUINFO "Use system-provided cpuinfo." OFF)
@ -44,5 +44,5 @@
-option(USE_SYSTEM_ONNX "Use system-provided onnx." OFF)
+option(USE_SYSTEM_ONNX "Use system-provided onnx." ON)
option(USE_SYSTEM_XNNPACK "Use system-provided xnnpack." OFF)
option(USE_SYSTEM_ZSTD "Use system-provided zstd." OFF)
option(USE_GOLD_LINKER "Use ld.gold to link" OFF)
if(USE_SYSTEM_LIBS)

View File

@ -1,11 +0,0 @@
--- aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h.orig 2023-10-12 12:49:42 UTC
+++ aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h
@@ -266,7 +266,7 @@ static_assert( (public)
}
return b;
}
- Vectorized<T> map(const __m256 (*const vop)(__m256)) const {
+ Vectorized<T> map(__m256 (*const vop)(__m256)) const {
__m256 lo, hi;
cvt_to_fp32<T>(values, lo, hi);
const auto o1 = vop(lo);

View File

@ -1,11 +0,0 @@
--- aten/src/ATen/cpu/vec/vec512/vec512_bfloat16.h.orig 2023-10-12 12:54:40 UTC
+++ aten/src/ATen/cpu/vec/vec512/vec512_bfloat16.h
@@ -345,7 +345,7 @@ static_assert( (public)
}
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wignored-qualifiers"
- Vectorized<T> map(const __m512 (*const vop)(__m512)) const {
+ Vectorized<T> map(__m512 (*const vop)(__m512)) const {
__m512 lo, hi;
cvt_to_fp32<T>(values, lo, hi);
const auto o1 = vop(lo);

View File

@ -1,6 +1,6 @@
--- aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h.orig 2023-12-15 02:03:27 UTC
--- aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h.orig 2024-07-24 18:41:35 UTC
+++ aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h
@@ -40,7 +40,7 @@ INVARIANT_CHECK_FUNC_API
@@ -39,7 +39,7 @@ INVARIANT_CHECK_FUNC_API
// use `cidx/idx` to refer to `compressed_indices/plain_indices` respectively.
INVARIANT_CHECK_FUNC_API
@ -9,7 +9,7 @@
#ifdef GPUCC
CUDA_KERNEL_ASSERT(cond && message);
#else
@@ -58,9 +58,9 @@ INVARIANT_CHECK_FUNC_API _check_first_cidx_is_zero(
@@ -57,9 +57,9 @@ INVARIANT_CHECK_FUNC_API _check_first_cidx_is_zero(
const index_t& zero) {
const bool invariant = cidx == zero;
if (cdim_name == CDimName::CRow) {
@ -21,7 +21,7 @@
}
}
@@ -72,9 +72,9 @@ INVARIANT_CHECK_FUNC_API _check_last_cidx_is_nnz(
@@ -71,9 +71,9 @@ INVARIANT_CHECK_FUNC_API _check_last_cidx_is_nnz(
const index_t& nnz) {
const bool invariant = cidx == nnz;
if (cdim_name == CDimName::CRow) {
@ -33,7 +33,7 @@
}
}
@@ -89,11 +89,11 @@ INVARIANT_CHECK_FUNC_API _check_cidx_nondecreasing_loc
@@ -88,11 +88,11 @@ INVARIANT_CHECK_FUNC_API _check_cidx_nondecreasing_loc
const auto s_cidx = cidx_next - cidx;
const bool invariant = zero <= s_cidx && s_cidx <= dim;
if (cdim_name == CDimName::CRow) {
@ -47,7 +47,7 @@
invariant,
"`0 <= ccol_indices[..., 1:] - ccol_indices[..., :-1] <= nrows` is not satisfied.");
}
@@ -108,9 +108,9 @@ INVARIANT_CHECK_FUNC_API _check_idx_bounds(
@@ -107,9 +107,9 @@ INVARIANT_CHECK_FUNC_API _check_idx_bounds(
const index_t& dim) {
const bool invariant = zero <= idx && idx < dim;
if (cdim_name == CDimName::CRow) {
@ -59,9 +59,9 @@
}
}
@@ -129,14 +129,14 @@ INVARIANT_CHECK_FUNC_API _check_idx_sorted_distinct_va
for (auto* RESTRICT curr = slice_begin + 1; curr < slice_end; ++curr) {
const auto invariant = *(curr - 1) < *curr;
@@ -128,14 +128,14 @@ INVARIANT_CHECK_FUNC_API _check_idx_sorted_distinct_va
for (auto* RESTRICT curr = slice_begin; (slice_begin < slice_end) && (curr + 1 < slice_end); ++curr) {
const auto invariant = *curr < *(curr + 1);
if (cdim_name == CDimName::CRow) {
- _assert(
+ __assert(

View File

@ -2,7 +2,7 @@
- /usr/local/lib/python3.9/site-packages/torch/include/c10/core/DynamicCast.h:112:22: error: use of undeclared identifier '__assert_fail'
- see https://github.com/pytorch/pytorch/issues/113941
--- c10/core/DynamicCast.h.orig 2023-10-06 19:49:01 UTC
--- c10/core/DynamicCast.h.orig 2024-07-24 18:41:35 UTC
+++ c10/core/DynamicCast.h
@@ -54,7 +54,7 @@ namespace c10 {
//
@ -13,7 +13,7 @@
#else
#define ERROR_UNSUPPORTED_CAST TORCH_CHECK(false, "Unexpected scalar type");
#endif
@@ -99,13 +99,13 @@ C10_HOST_DEVICE inline void cast_and_store(
@@ -105,13 +105,13 @@ C10_HOST_DEVICE inline void cast_and_store(
template <> \
C10_HOST_DEVICE inline T fetch_and_cast<T>( \
const ScalarType src_type, const void* ptr) { \

View File

@ -0,0 +1,28 @@
- fix regression in python install path in 2.4.0
--- caffe2/CMakeLists.txt.orig 2024-07-24 18:41:35 UTC
+++ caffe2/CMakeLists.txt
@@ -1897,8 +1897,22 @@ if(BUILD_PYTHON)
# only rerun when needed.
if(BUILD_PYTHON)
+ # Python site-packages
+ # Get canonical directory for python site packages (relative to install
+ # location). It varies from system to system.
+ # We should pin the path separator to the forward slash on Windows.
+ # More details can be seen at
+ # https://github.com/pytorch/pytorch/tree/main/tools/build_pytorch_libs.bat#note-backslash-munging-on-windows
+ pycmd(PYTHON_SITE_PACKAGES "
+ import os
+ import sysconfig
+ relative_site_packages = sysconfig.get_path('purelib').replace(sysconfig.get_path('data'), '').lstrip(os.path.sep)
+ print(relative_site_packages)
+ ")
+ file(TO_CMAKE_PATH ${PYTHON_SITE_PACKAGES} PYTHON_SITE_PACKAGES)
+ set(PYTHON_SITE_PACKAGES ${PYTHON_SITE_PACKAGES} PARENT_SCOPE) # for Summary
# ---[ Options.
- set(PYTHON_LIB_REL_PATH "${Python_SITELIB}" CACHE STRING "Python installation path (relative to CMake installation prefix)")
+ set(PYTHON_LIB_REL_PATH "${PYTHON_SITE_PACKAGES}" CACHE STRING "Python installation path (relative to CMake installation prefix)")
message(STATUS "Using ${PYTHON_LIB_REL_PATH} as python relative installation path")

View File

@ -1,6 +1,6 @@
--- cmake/Dependencies.cmake.orig 2023-12-15 02:03:27 UTC
--- cmake/Dependencies.cmake.orig 2024-07-24 18:41:35 UTC
+++ cmake/Dependencies.cmake
@@ -340,7 +340,7 @@ if(USE_NNPACK OR USE_QNNPACK OR USE_PYTORCH_QNNPACK OR
@@ -304,7 +304,7 @@ if(USE_NNPACK OR USE_PYTORCH_QNNPACK OR USE_XNNPACK)
set(DISABLE_NNPACK_AND_FAMILY ON)
endif()
else()

View File

@ -0,0 +1,10 @@
--- setup.py.orig 2024-08-26 02:27:07 UTC
+++ setup.py
@@ -366,7 +366,6 @@ def get_submodule_folders():
"cpuinfo",
"onnx",
"foxi",
- "QNNPACK",
"fbgemm",
"cutlass",
]

View File

@ -1,8 +1,8 @@
--- third_party/kineto/libkineto/src/ThreadUtil.cpp.orig 2023-04-03 19:46:02 UTC
--- third_party/kineto/libkineto/src/ThreadUtil.cpp.orig 2024-07-24 18:41:37 UTC
+++ third_party/kineto/libkineto/src/ThreadUtil.cpp
@@ -57,7 +57,7 @@ int32_t systemThreadId() {
#elif defined _MSC_VER
_sysTid = (int32_t)GetCurrentThreadId();
@@ -59,7 +59,7 @@ int32_t systemThreadId() {
#elif defined __FreeBSD__
syscall(SYS_thr_self, &_sysTid);
#else
- _sysTid = (int32_t)syscall(SYS_gettid);
+ _sysTid = (int32_t)syscall(SYS_getpid);