#!/usr/bin/make -f
include /usr/share/dpkg/pkg-info.mk
export DEB_BUILD_MAINT_OPTIONS = hardening=+all
export LC_ALL=C.UTF-8
export PYBUILD_DISABLE_python2 = 1
export PYBUILD_NAME = torch
export PYTORCH_BUILD_VERSION = $(shell echo "$(DEB_VERSION_UPSTREAM)" | sed -e 's/+dfsg.*$//')
export PYTORCH_VERSION = $(shell echo "$(DEB_VERSION_UPSTREAM)" | sed -e 's/+dfsg.*$//')
export SOVERSION = 2.4
export PATCH = 1

# --[[ maintainer notes ]]--
# How to dump the flags for the upstream built instance:
# $ python3 -c 'import torch as th; print(th.__version__, th.__config__.show())'

ifneq (,$(shell command -v nvcc))
export CUDA_HOME=/usr
export CC=cuda-gcc
export CXX=cuda-g++
export BUILD_SPLIT_CUDA=ON  # this is a workaround to linker overflow
# Refer the following webpage for CUDA capacity of different GPUs:
#   https://developer.nvidia.com/cuda-gpus
# Note, cuda compute architecture is backward compatible. Thus, even if we
# compile the package in the config 2 ("all-major"), it still runs on the
# GPUs produced as of 2015. Note, we may encounter linker overflow because
# the binary is too large, containing binary code for way too many archs.
# -- config 1: cover popular archs (default) --
export TORCH_CUDA_ARCH_LIST = 6.1;7.5;8.6
# -- config 2: cover only major archs. --
#export TORCH_CUDA_ARCH_LIST = 6.0;7.0;8.0
# -- config 3: cover as more arch as possible, and PTX --
#export TORCH_CUDA_ARCH_LIST = 6.0;6.1;7.0;7.5;8.0;8.6+PTX
endif

# [special blas configurations]
# We use a run-time switching mechanism for BLAS/LAPACK.
# https://wiki.debian.org/DebianScience/LinearAlgebraLibraries
# Even if the torch package is built against the generic library, the backend
# libblas.so.3 is in fact automatically switched to the fastest implementation.
export BLAS = Generic
export GENERIC_BLAS_LIBRARIES = blas

# [misc configurations]
export _WNOERROR_ = -Wno-dangling-reference
ifneq (,$(shell command -v mold))
export LD=mold
export __LD=-fuse-ld=mold
else
ifneq (,$(shell command -v lld))
export LD=lld
export __LD=-fuse-ld=lld
else
export LD=ld
export __LD=
endif
endif
export CFLAGS = $(shell dpkg-buildflags --get CPPFLAGS) \
                $(shell dpkg-buildflags --get CFLAGS) \
                -gsplit-dwarf $(_WNOERROR_) $(__LD) -I/usr
export CXXFLAGS = $(shell dpkg-buildflags --get CPPFLAGS) \
                  $(shell dpkg-buildflags --get CXXFLAGS) \
                  -gsplit-dwarf $(_WNOERROR_) $(__LD) -I/usr
export ONNX_NAMESPACE = onnx
export REL_WITH_DEB_INFO = ON
export USE_FFMPEG = ON
export USE_GFLAGS= OFF
export USE_GLOG = OFF
export USE_LEVELDB = ON
export USE_LMDB = ON
export USE_OPENCV = ON
export USE_REDIS = OFF
export USE_SYSTEM_LIBS = ON
export USE_ZMQ = ON
export USE_ZSTD = ON
export USE_KINETO = ON
export USE_ITT = OFF
export BUILD_CAFFE2 = 0
export BUILD_CAFFE2_OPS = 0

# [device specific -- Vulkan backend support]
# See https://pytorch.org/tutorials/prototype/vulkan_workflow.html
#export USE_VULKAN=1
#export USE_VULKAN_SHADERC_RUNTIME=1
#export USE_VULKAN_WRAPPER=0

# [device specific -- CPU/CUDA/ROCm configurations]
ifneq (,$(shell command -v nvcc))
# CUDA version
export NO_CUDA = 0
export USE_CUDA = ON
export USE_CUDNN = ON
export USE_SYSTEM_NCCL = ON
export USE_ROCM = OFF
export USE_MIOPEN = OFF
export USE_MAGMA = OFF
export USE_LLVM = $(shell llvm-config --prefix)
else ifneq (,$(shell command -v hipcc))
# ROCm version
export NO_CUDA = 1
export USE_CUDA = OFF
export USE_ROCM = ON
export USE_MIOPEN = ON
else
# CPU version
export NO_CUDA = 1
export USE_CUDA = OFF
export USE_CUDNN = OFF
export USE_SYSTEM_NCCL = OFF
export USE_ROCM = OFF
export USE_MIOPEN = OFF
export DEB_OVERRIDE_CAFFE2_PY_PATH=$(shell pwd)/debian/python3-torch/
export USE_LLVM = OFF
endif

# [distributed/communication configurations]
export USE_DISTRIBUTED = ON
export USE_GLOO = ON
export USE_MPI = OFF
export USE_TENSORPIPE = ON

# [CPU training and inference]
export USE_FBGEMM = OFF  # XXX: forget this; No need to enable.
ifneq (,$(filter $(DEB_HOST_ARCH),amd64 arm64 ppc64el))
export USE_MKLDNN = ON  # requires ideep and onednn.
else
export USE_MKLDNN = OFF
endif
ifneq (,$(filter $(DEB_HOST_ARCH),amd64))
export USE_PYTORCH_QNNPACK = ON
else
export USE_PYTORCH_QNNPACK = OFF
endif
export USE_NNPACK = OFF  # XXX: forget this. No need to enable.
export USE_QNNPACK = OFF  # XXX: Deprecated upstream. No need to enable.
ifneq (,$(filter $(DEB_HOST_ARCH),amd64 arm64 riscv64))
export USE_XNNPACK = ON
else
export USE_XNNPACK = OFF
endif

# [number of jobs]
ifneq (,$(filter parallel=%,$(DEB_BUILD_OPTIONS)))
export MAX_JOBS := $(patsubst parallel=%,%,$(filter parallel=%,$(DEB_BUILD_OPTIONS)))
endif

# Customization options (You may want to rebuild this package locally)
NATIVE :=
ifneq (,$(NATIVE))
export CFLAGS = $(shell dpkg-buildflags --get CPPFLAGS) $(shell dpkg-buildflags --get CFLAGS) -gsplit-dwarf -march=native
export CXXFLAGS = $(shell dpkg-buildflags --get CPPFLAGS) $(shell dpkg-buildflags --get CXXFLAGS) -gsplit-dwarf -march=native
endif


%:
	dh $@ -Spybuild --with python3

override_dh_auto_clean:
	dh_auto_clean
	-find . -type d -name __pycache__ -exec rm -rf '{}' +
	-$(RM) -rf third_party/googletest

override_dh_auto_configure:
	cd third_party/; rmdir googletest; cp -rv /usr/src/googletest .
	# fix python and shell shebang -- no /usr/bin/env
	find . -type f -name '*.py' -exec sed -i -e 's@#!/usr/bin/env python.*@#!/usr/bin/python3@' '{}' \;
	find . -type f -name '*.sh' -exec sed -i -e 's@#!/usr/bin/env @#!/usr/bin/@g' '{}' \;
	# regenerate flatbuffers code. See tools/gen_flatbuffers.sh
	cd torch/csrc/jit/serialization/; flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs 

override_dh_auto_build:
	true # In order to avoid building everything twice.

override_dh_auto_test:
	true

# [maintainer note] The build results in many shared objects ...
# but not all of them are important. You may check the contents of the
# upstream release of libtorch binary tarball in https://pytorch.org/
# for the list of most important shared objects.
# Alternatively, you may browse the conda-meta/pytorch-*.json files
# under an anaconda instance.
override_dh_auto_install:
	# [build and install]
ifneq (,$(shell command -v nvcc))
    # --- CUDA variant
	for PY in $(shell py3versions -d); do\
        $$PY setup.py install --install-layout=deb \
		--root=$(shell pwd)/debian/python3-torch-cuda/ ;\
        done
else ifneq (,$(shell command -v hipcc))
    # --- ROCM variant
	false # not impl
else
    # --- CPU variant
	for PY in $(shell py3versions -d); do \
        $$PY setup.py install --install-layout=deb \
		--root=$(shell pwd)/debian/python3-torch/ ;\
        done
endif
	# [list shared objects] use `ls build/lib/*.so` to get a list of all compiled shared objects.
	echo {begin listing shared objects}
	find build/lib -type f | sort
	echo {end listing shared objects}
	# [python3-torch::headers -> libtorch-dev] move the headers out of the python package
ifneq (,$(shell command -v nvcc))
    # --- CUDA variant
	mkdir -pv debian/libtorch-cuda-dev/usr/include
	mv -v debian/python3-torch-cuda/usr/lib/python3*/dist-packages/torch/include/* \
		debian/libtorch-cuda-dev/usr/include/
	cd debian/python3-torch-cuda/usr/lib/python3*/dist-packages/torch/include/; \
		ln -sv /usr/include/ATen . ;\
		ln -sv /usr/include/c10 . ;\
		ln -sv /usr/include/caffe2 . ;\
		ln -sv /usr/include/torch . ;\
        true
else ifneq (,$(shell command -v hipcc))
    # --- ROCM variant
	false
else
    # --- CPU variant
	mkdir -pv debian/libtorch-dev/usr/include
	mv -v debian/python3-torch/usr/lib/python3*/dist-packages/torch/include/* \
		debian/libtorch-dev/usr/include/
	cd debian/python3-torch/usr/lib/python3*/dist-packages/torch/include/; \
		ln -sv /usr/include/ATen . ;\
		ln -sv /usr/include/c10 . ;\
		ln -sv /usr/include/caffe2 . ;\
		ln -sv /usr/include/torch . ;\
        true
endif
	# [python3-torch::shlibs -> libtorch$(SOVERSION)] move the (public) shared libs out of the python package
ifneq (,$(shell command -v nvcc))
    # --- CUDA variant
	mkdir -pv debian/libtorch-cuda-$(SOVERSION)/usr/lib/$(DEB_HOST_MULTIARCH)/
	mv -v debian/python3-torch-cuda/usr/lib/python3*/dist-packages/torch/lib/*.so.* \
		debian/libtorch-cuda-$(SOVERSION)/usr/lib/$(DEB_HOST_MULTIARCH)/
	cd debian/libtorch-cuda-$(SOVERSION)/usr/lib/$(DEB_HOST_MULTIARCH)/ ;\
		ln -sfv libbackend_with_compiler.so.$(SOVERSION).$(PATCH) libbackend_with_compiler.so.$(SOVERSION) ;\
		ln -sfv libc10.so.$(SOVERSION).$(PATCH) libc10.so.$(SOVERSION) ;\
		ln -sfv libc10_cuda.so.$(SOVERSION).$(PATCH) libc10_cuda.so.$(SOVERSION) ;\
		ln -sfv libc10d_cuda_test.so.$(SOVERSION).$(PATCH) libc10d_cuda_test.so.$(SOVERSION) ;\
		ln -sfv libcaffe2_nvrtc.so.$(SOVERSION).$(PATCH) libcaffe2_nvrtc.so.$(SOVERSION) ;\
		ln -sfv libjitbackend_test.so.$(SOVERSION).$(PATCH) libjitbackend_test.so.$(SOVERSION) ;\
		ln -sfv libshm.so.$(SOVERSION).$(PATCH) libshm.so.$(SOVERSION) ;\
		ln -sfv libtorch.so.$(SOVERSION).$(PATCH) libtorch.so.$(SOVERSION) ;\
		ln -sfv libtorch_cpu.so.$(SOVERSION).$(PATCH) libtorch_cpu.so.$(SOVERSION) ;\
		ln -sfv libtorch_cuda.so.$(SOVERSION).$(PATCH) libtorch_cuda.so.$(SOVERSION) ;\
		ln -sfv libtorch_cuda_linalg.so.$(SOVERSION).$(PATCH) libtorch_cuda_linalg.so.$(SOVERSION) ;\
		ln -sfv libtorch_global_deps.so.$(SOVERSION).$(PATCH) libtorch_global_deps.so.$(SOVERSION) ;\
		ln -sfv libtorch_python.so.$(SOVERSION).$(PATCH) libtorch_python.so.$(SOVERSION) ;\
		ln -sfv libtorchbind_test.so.$(SOVERSION).$(PATCH) libtorchbind_test.so.$(SOVERSION) ;\
        true
else ifneq (,$(shell command -v hipcc))
    # --- ROCM variant
	false
else
    # --- CPU variant
	mkdir -pv debian/libtorch$(SOVERSION)/usr/lib/$(DEB_HOST_MULTIARCH)/
	mv -v debian/python3-torch/usr/lib/python3*/dist-packages/torch/lib/*.so.* \
		debian/libtorch$(SOVERSION)/usr/lib/$(DEB_HOST_MULTIARCH)/
	cd debian/libtorch$(SOVERSION)/usr/lib/$(DEB_HOST_MULTIARCH)/; \
		ln -sfv libbackend_with_compiler.so.$(SOVERSION).$(PATCH) libbackend_with_compiler.so.$(SOVERSION) ;\
		ln -sfv libc10.so.$(SOVERSION).$(PATCH) libc10.so.$(SOVERSION) ;\
		ln -sfv libjitbackend_test.so.$(SOVERSION).$(PATCH) libjitbackend_test.so.$(SOVERSION) ;\
                ln -sfv libnnapi_backend.so.$(SOVERSION).$(PATCH) libnnapi_backend.so.$(SOVERSION) ;\
		ln -sfv libshm.so.$(SOVERSION).$(PATCH) libshm.so.$(SOVERSION) ;\
		ln -sfv libtorch.so.$(SOVERSION).$(PATCH) libtorch.so.$(SOVERSION) ;\
		ln -sfv libtorch_cpu.so.$(SOVERSION).$(PATCH) libtorch_cpu.so.$(SOVERSION) ;\
		ln -sfv libtorch_global_deps.so.$(SOVERSION).$(PATCH) libtorch_global_deps.so.$(SOVERSION) ;\
		ln -sfv libtorch_python.so.$(SOVERSION).$(PATCH) libtorch_python.so.$(SOVERSION) ;\
		ln -sfv libtorchbind_test.so.$(SOVERSION).$(PATCH) libtorchbind_test.so.$(SOVERSION) ;\
		true
endif
	# [python3-torch::symlinks -> libtorch-dev] move the links to shared libs out of the python package
ifneq (,$(shell command -v nvcc))
    # --- CUDA variant
	mkdir -pv debian/libtorch-cuda-dev/usr/lib/$(DEB_HOST_MULTIARCH)/
	cd debian/python3-torch-cuda/usr/lib/python3*/dist-packages/torch/lib ; $(RM) -v *.so;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libbackend_with_compiler.so.$(SOVERSION) libbackend_with_compiler.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libc10.so.$(SOVERSION) libc10.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libc10_cuda.so.$(SOVERSION) libc10_cuda.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libc10d_cuda_test.so.$(SOVERSION) libc10d_cuda_test.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libcaffe2_nvrtc.so.$(SOVERSION) libcaffe2_nvrtc.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libjitbackend_test.so.$(SOVERSION) libjitbackend_test.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libshm.so.$(SOVERSION) libshm.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libtorch.so.$(SOVERSION) libtorch.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libtorch_cpu.so.$(SOVERSION) libtorch_cpu.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libtorch_cuda_linalg.so.$(SOVERSION) libtorch_cuda_linalg.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libtorch_global_deps.so.$(SOVERSION) libtorch_global_deps.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libtorch_python.so.$(SOVERSION) libtorch_python.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libtorchbind_test.so.$(SOVERSION) libtorchbind_test.so ;\
		true
	cd debian/libtorch-cuda-dev/usr/lib/$(DEB_HOST_MULTIARCH)/; \
		ln -sfv libbackend_with_compiler.so.$(SOVERSION) libbackend_with_compiler.so ;\
		ln -sfv libc10.so.$(SOVERSION) libc10.so ;\
		ln -sfv libc10_cuda.so.$(SOVERSION) libc10_cuda.so ;\
		ln -sfv libc10d_cuda_test.so.$(SOVERSION) libc10d_cuda_test.so ;\
		ln -sfv libcaffe2_nvrtc.so.$(SOVERSION) libcaffe2_nvrtc.so ;\
		ln -sfv libjitbackend_test.so.$(SOVERSION) libjitbackend_test.so ;\
		ln -sfv libshm.so.$(SOVERSION) libshm.so ;\
		ln -sfv libtorch.so.$(SOVERSION) libtorch.so ;\
		ln -sfv libtorch_cpu.so.$(SOVERSION) libtorch_cpu.so ;\
		ln -sfv libtorch_cuda.so.$(SOVERSION) libtorch_cuda.so ;\
		ln -sfv libtorch_cuda_linalg.so.$(SOVERSION) libtorch_cuda_linalg.so ;\
		ln -sfv libtorch_global_deps.so.$(SOVERSION) libtorch_global_deps.so ;\
		ln -sfv libtorch_python.so.$(SOVERSION) libtorch_python.so ;\
		ln -sfv libtorchbind_test.so.$(SOVERSION) libtorchbind_test.so;\
		true
else ifneq (,$(shell command -v hipcc))
    # --- ROCM variant
	false
else
    # --- CPU variant
	mkdir -pv debian/libtorch-dev/usr/lib/$(DEB_HOST_MULTIARCH)/
	cd debian/python3-torch/usr/lib/python3*/dist-packages/torch/lib ; $(RM) -v *.so;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libbackend_with_compiler.so.$(SOVERSION) libbackend_with_compiler.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libc10.so.$(SOVERSION) libc10.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libjitbackend_test.so.$(SOVERSION) libjitbackend_test.so ;\
                ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libnnapi_backend.so.$(SOVERSION) libnnapi_backend.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libshm.so.$(SOVERSION) libshm.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libtorch.so.$(SOVERSION) libtorch.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libtorch_cpu.so.$(SOVERSION) libtorch_cpu.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libtorch_global_deps.so.$(SOVERSION) libtorch_global_deps.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libtorch_python.so.$(SOVERSION) libtorch_python.so ;\
		ln -sv  ../../../../$(DEB_HOST_MULTIARCH)/libtorchbind_test.so.$(SOVERSION) libtorchbind_test.so ;\
		true
	cd debian/libtorch-dev/usr/lib/$(DEB_HOST_MULTIARCH)/; \
		ln -sfv libbackend_with_compiler.so.$(SOVERSION) libbackend_with_compiler.so ;\
		ln -sfv libc10.so.$(SOVERSION) libc10.so ;\
		ln -sfv libjitbackend_test.so.$(SOVERSION) libjitbackend_test.so ;\
                ln -sfv libnnapi_backend.so.$(SOVERSION) libnnapi_backend.so ;\
		ln -sfv libshm.so.$(SOVERSION) libshm.so ;\
		ln -sfv libtorch.so.$(SOVERSION) libtorch.so ;\
		ln -sfv libtorch_cpu.so.$(SOVERSION) libtorch_cpu.so ;\
		ln -sfv libtorch_global_deps.so.$(SOVERSION) libtorch_global_deps.so ;\
		ln -sfv libtorch_python.so.$(SOVERSION) libtorch_python.so ;\
		ln -sfv libtorchbind_test.so.$(SOVERSION) libtorchbind_test.so ;\
		true
endif
	# [python3-torch::cmake -> libtorch-dev] move the cmake files out of the python package, and fixup the cmake files accordingly
ifneq (,$(shell command -v nvcc))
	mkdir -pv debian/libtorch-cuda-dev/usr/lib/$(DEB_HOST_MULTIARCH)/
	mv -v debian/python3-torch-cuda/usr/lib/python3*/dist-packages/torch/share/cmake \
		debian/libtorch-cuda-dev/usr/lib/$(DEB_HOST_MULTIARCH)/
	find debian/libtorch-cuda-dev/usr/lib/$(DEB_HOST_MULTIARCH) -type f -name '*.cmake' \
		-exec sed -i -e "s@\\(_IMPORT_PREFIX.\\)/lib/lib@\\1/$(DEB_HOST_MULTIARCH)/lib@g" '{}' \;
	find debian/libtorch-cuda-dev/usr/lib/$(DEB_HOST_MULTIARCH) -type f -name '*.cmake' \
		-exec sed -i -e "s@\\(_IMPORT_PREFIX.\\)/include@\\1/../include@g" '{}' \;
	find debian/libtorch-cuda-dev/usr/lib/$(DEB_HOST_MULTIARCH) -type f -name '*.cmake' \
	        -exec sed -i -e "s@\\(CMAKE_CURRENT_LIST_DIR.\\)/../../../@\\1/../../../../@g" '{}' \;
	find debian/libtorch-cuda-dev/usr/lib/$(DEB_HOST_MULTIARCH)/cmake/ATen -type f -name '*.cmake' \
		-exec sed -i -e "s@/build/pytorch-.*/torch/include@/usr/include/ATen@g" '{}' \;
else ifneq (,$(shell command -v hipcc))
	false
else
	mkdir -pv debian/libtorch-dev/usr/lib/$(DEB_HOST_MULTIARCH)/
	mv -v debian/python3-torch/usr/lib/python3*/dist-packages/torch/share/cmake \
		debian/libtorch-dev/usr/lib/$(DEB_HOST_MULTIARCH)/
	find debian/libtorch-dev/usr/lib/$(DEB_HOST_MULTIARCH) -type f -name '*.cmake' \
		-exec sed -i -e "s@\\(_IMPORT_PREFIX.\\)/lib/lib@\\1/$(DEB_HOST_MULTIARCH)/lib@g" '{}' \;
	find debian/libtorch-dev/usr/lib/$(DEB_HOST_MULTIARCH) -type f -name '*.cmake' \
		-exec sed -i -e "s@\\(_IMPORT_PREFIX.\\)/include@\\1/../include@g" '{}' \;
	find debian/libtorch-dev/usr/lib/$(DEB_HOST_MULTIARCH) -type f -name '*.cmake' \
	        -exec sed -i -e "s@\\(CMAKE_CURRENT_LIST_DIR.\\)/../../../@\\1/../../../../@g" '{}' \;
	find debian/libtorch-dev/usr/lib/$(DEB_HOST_MULTIARCH)/cmake/ATen -type f -name '*.cmake' \
		-exec sed -i -e "s@/build/pytorch-.*/torch/include@/usr/include/ATen@g" '{}' \;
endif
	# [python3-torch::testbin -> libtorch-test] move the test binaries out of the python packaage
ifneq (,$(shell command -v nvcc))
	mkdir -pv debian/libtorch-cuda-test/usr/lib/libtorch-cuda-test/
	mv -v debian/python3-torch-cuda/usr/lib/python3*/dist-packages/torch/test/* \
		debian/libtorch-cuda-test/usr/lib/libtorch-cuda-test/
	rmdir -v debian/python3-torch-cuda/usr/lib/python3*/dist-packages/torch/test
	cd debian/python3-torch-cuda/usr/lib/python3*/dist-packages/torch/ ;\
		ln -s /usr/lib/libtorch-cuda-test test
else ifneq (,$(shell command -v hipcc))
	false
else
	mkdir -pv debian/libtorch-test/usr/lib/libtorch-test/
	mv -v debian/python3-torch/usr/lib/python3*/dist-packages/torch/test/* \
		debian/libtorch-test/usr/lib/libtorch-test/
	rmdir -v debian/python3-torch/usr/lib/python3*/dist-packages/torch/test
	cd debian/python3-torch/usr/lib/python3*/dist-packages/torch/ ;\
		ln -s /usr/lib/libtorch-test test
endif
    # [python3-torch :: Debhelper Sequence files]
#ifneq (,$(shell command -v nvcc))
#    mkdir -pv debian/python3-torch-cuda/usr/share/perl5/Debian/Debhelper/Sequence
#    cp debian/pytorch.pm \
#        debian/python3-torch-cuda/usr/share/perl5/Debian/Debhelper/Sequence/
#    mkdir -pv debian/python3-torch-cuda/usr/bin
#    cp debian/dh_pytorch \
#        debian/python3-torch-cuda/usr/bin/
#else ifneq (,$(shell command -v hipcc))
#    false
#else
#    mkdir -pv debian/python3-torch/usr/share/perl5/Debian/Debhelper/Sequence
#    cp debian/pytorch.pm \
#        debian/python3-torch/usr/share/perl5/Debian/Debhelper/Sequence/
#    mkdir -pv debian/python3-torch/usr/bin
#    cp debian/dh_pytorch \
#        debian/python3-torch/usr/bin/
#endif

override_dh_python3:
	dh_python3 --requires=requirements.txt --no-ext-rename
	dh_numpy3

override_dh_auto_clean:
	-$(RM) -r build/
	-$(RM) -f third_party/googletest

override_dh_dwz:
	: # FTBFS
