作者:清明如月_213 | 来源:互联网 | 2023-08-07 08:55
libtorch下载libtorch-shared-with-deps-1.7.1cu101.zip例子下载https:github.compytorchexamplestre
libtorch 下载
libtorch-shared-with-deps-1.7.1+cu101.zip
例子下载
https://github.com/pytorch/examples/tree/master/cpp/autograd
cMakeList.txt
cmake_minimum_required(VERSION 2.8)project(autograd)
set(CMAKE_CXX_STANDARD 14)find_package(Torch REQUIRED)add_executable(${PROJECT_NAME} "autograd.cpp")
target_link_libraries(${PROJECT_NAME} "${TORCH_LIBRARIES}")# The following code block is suggested to be used on Windows.
# According to https://github.com/pytorch/pytorch/issues/25457,
# the DLLs need to be copied to avoid memory errors.
if (MSVC)file(GLOB TORCH_DLLS "${TORCH_INSTALL_PREFIX}/lib/*.dll")add_custom_command(TARGET ${PROJECT_NAME}POST_BUILDCOMMAND ${CMAKE_COMMAND} -E copy_if_different${TORCH_DLLS}$)
endif (MSVC)
autograd.cpp
#include
#include using namespace torch::autograd;void basic_autograd_operations_example() {std::cout <<"&#61;&#61;&#61;&#61;&#61;&#61; Running: \"Basic autograd operations\" &#61;&#61;&#61;&#61;&#61;&#61;" <name() <name() <name() <name() <() <1000) {y &#61; y * 2;}std::cout <name() <() <}void compute_higher_order_gradients_example() {std::cout <<"&#61;&#61;&#61;&#61;&#61;&#61; Running \"Computing higher-order gradients in C&#43;&#43;\" &#61;&#61;&#61;&#61;&#61;&#61;" <}// Inherit from Function
class LinearFunction : public Function {public:// Note that both forward and backward are static functions// bias is an optional argumentstatic torch::Tensor forward(AutogradContext *ctx, torch::Tensor input, torch::Tensor weight, torch::Tensor bias &#61; torch::Tensor()) {ctx->save_for_backward({input, weight, bias});auto output &#61; input.mm(weight.t());if (bias.defined()) {output &#43;&#61; bias.unsqueeze(0).expand_as(output);}return output;}static tensor_list backward(AutogradContext *ctx, tensor_list grad_outputs) {auto saved &#61; ctx->get_saved_variables();auto input &#61; saved[0];auto weight &#61; saved[1];auto bias &#61; saved[2];auto grad_output &#61; grad_outputs[0];auto grad_input &#61; grad_output.mm(weight);auto grad_weight &#61; grad_output.t().mm(input);auto grad_bias &#61; torch::Tensor();if (bias.defined()) {grad_bias &#61; grad_output.sum(0);}return {grad_input, grad_weight, grad_bias};}
};class MulConstant : public Function {public:static torch::Tensor forward(AutogradContext *ctx, torch::Tensor tensor, double constant) {// ctx is a context object that can be used to stash information// for backward computationctx->saved_data["constant"] &#61; constant;return tensor * constant;}static tensor_list backward(AutogradContext *ctx, tensor_list grad_outputs) {// We return as many input gradients as there were arguments.// Gradients of non-tensor arguments to forward must be &#96;torch::Tensor()&#96;.return {grad_outputs[0] * ctx->saved_data["constant"].toDouble(), torch::Tensor()};}
};void custom_autograd_function_example() {std::cout <<"&#61;&#61;&#61;&#61;&#61;&#61; Running \"Using custom autograd function in C&#43;&#43;\" &#61;&#61;&#61;&#61;&#61;&#61;" <}int main() {std::cout <}
cuda 版本
hlx&#64;W240F1:~/下载$ cat /usr/local/cuda/version.txt
CUDA Version 10.0.130
CUDA Patch Version 10.0.130.1
hlx&#64;W240F1:~/下载$
hlx&#64;W240F1:~/libtorch_tut$ mkdir build
hlx&#64;W240F1:~/libtorch_tut$ cd build
hlx&#64;W240F1:~/libtorch_tut/build$ cmake -DCMAKE_PREFIX_PATH&#61;/home/hlx/libtorch ..-- The C compiler identification is GNU 5.4.0
-- The CXX compiler identification is GNU 5.4.0
-- Check for working C compiler: /usr/bin/cc
-- Check for working C compiler: /usr/bin/cc -- works
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Detecting C compile features
-- Detecting C compile features - done
-- Check for working CXX compiler: /usr/bin/c&#43;&#43;
-- Check for working CXX compiler: /usr/bin/c&#43;&#43; -- works
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Looking for pthread.h
-- Looking for pthread.h - found
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Failed
-- Looking for pthread_create in pthreads
-- Looking for pthread_create in pthreads - not found
-- Looking for pthread_create in pthread
-- Looking for pthread_create in pthread - found
-- Found Threads: TRUE
-- Found CUDA: /usr/local/cuda (found version "10.0")
-- Caffe2: CUDA detected: 10.0
-- Caffe2: CUDA nvcc is: /usr/local/cuda/bin/nvcc
-- Caffe2: CUDA toolkit directory: /usr/local/cuda
-- Caffe2: Header version is: 10.0
-- Found CUDNN: /usr/local/cuda/lib64/libcudnn.so
-- Found cuDNN: v7.6.3 (include: /usr/local/cuda/include, library: /usr/local/cuda/lib64/libcudnn.so)
-- Autodetected CUDA architecture(s): 6.1 6.1
-- Added CUDA NVCC flags for: -gencode;arch&#61;compute_61,code&#61;sm_61
-- Found Torch: /home/hlx/libtorch/lib/libtorch.so
-- Configuring done
-- Generating done
-- Build files have been written to: /home/hlx/libtorch_tut/build
hlx&#64;W240F1:~/libtorch_tut/build$ make
Scanning dependencies of target autograd
[ 50%] Building CXX object CMakeFiles/autograd.dir/autograd.cpp.o
[100%] Linking CXX executable autograd
[100%] Built target autograd
hlx&#64;W240F1:~/libtorch_tut/build$