Path: blob/master/src/utils/style_ops/bias_act.cpp
809 views
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.1//2// NVIDIA CORPORATION and its licensors retain all intellectual property3// and proprietary rights in and to this software, related documentation4// and any modifications thereto. Any use, reproduction, disclosure or5// distribution of this software and related documentation without an express6// license agreement from NVIDIA CORPORATION is strictly prohibited.78#include <torch/extension.h>9#include <ATen/cuda/CUDAContext.h>10#include <c10/cuda/CUDAGuard.h>11#include "bias_act.h"1213//------------------------------------------------------------------------1415static bool has_same_layout(torch::Tensor x, torch::Tensor y)16{17if (x.dim() != y.dim())18return false;19for (int64_t i = 0; i < x.dim(); i++)20{21if (x.size(i) != y.size(i))22return false;23if (x.size(i) >= 2 && x.stride(i) != y.stride(i))24return false;25}26return true;27}2829//------------------------------------------------------------------------3031static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp)32{33// Validate arguments.34TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");35TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x");36TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x");37TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x");38TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x");39TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");40TORCH_CHECK(b.dim() == 1, "b must have rank 1");41TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds");42TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements");43TORCH_CHECK(grad >= 0, "grad must be non-negative");4445// Validate layout.46TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense");47TORCH_CHECK(b.is_contiguous(), "b must be contiguous");48TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x");49TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x");50TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x");5152// Create output tensor.53const at::cuda::OptionalCUDAGuard device_guard(device_of(x));54torch::Tensor y = torch::empty_like(x);55TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x");5657// Initialize CUDA kernel parameters.58bias_act_kernel_params p;59p.x = x.data_ptr();60p.b = (b.numel()) ? b.data_ptr() : NULL;61p.xref = (xref.numel()) ? xref.data_ptr() : NULL;62p.yref = (yref.numel()) ? yref.data_ptr() : NULL;63p.dy = (dy.numel()) ? dy.data_ptr() : NULL;64p.y = y.data_ptr();65p.grad = grad;66p.act = act;67p.alpha = alpha;68p.gain = gain;69p.clamp = clamp;70p.sizeX = (int)x.numel();71p.sizeB = (int)b.numel();72p.stepB = (b.numel()) ? (int)x.stride(dim) : 1;7374// Choose CUDA kernel.75void* kernel;76AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&]77{78kernel = choose_bias_act_kernel<scalar_t>(p);79});80TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func");8182// Launch CUDA kernel.83p.loopX = 4;84int blockSize = 4 * 32;85int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1;86void* args[] = {&p};87AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream()));88return y;89}9091//------------------------------------------------------------------------9293PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)94{95m.def("bias_act", &bias_act);96}9798//------------------------------------------------------------------------99100101