Path: blob/master/fastai/text/models/forget_mult_cuda.cpp
840 views
#include <torch/torch.h>12#include <vector>34// CUDA forward declarations5at::Tensor forget_mult_cuda_forward(at::Tensor x, at::Tensor f, at::Tensor output, bool batch_first);67// C++ interface89#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")10#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")11#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)1213at::Tensor forget_mult_forward(at::Tensor x, at::Tensor f, at::Tensor output, bool batch_first) {14CHECK_INPUT(x); CHECK_INPUT(f); CHECK_INPUT(output);15return forget_mult_cuda_forward(x, f, output, batch_first);16}1718std::vector<at::Tensor> forget_mult_cuda_backward(at::Tensor x, at::Tensor f, at::Tensor output,19at::Tensor grad_output, bool batch_first);2021std::vector<at::Tensor> forget_mult_backward(at::Tensor x, at::Tensor f, at::Tensor output,22at::Tensor grad_output, bool batch_first) {23CHECK_INPUT(x); CHECK_INPUT(f); CHECK_INPUT(output);24return forget_mult_cuda_backward(x, f, output, grad_output, batch_first);25}2627PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {28m.def("forward", &forget_mult_forward, "ForgetMult forward (CUDA)");29m.def("backward", &forget_mult_backward, "ForgetMult backward (CUDA)");30}313233