Path: blob/master/src/utils/style_ops/bias_act.cu
809 views
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.1//2// NVIDIA CORPORATION and its licensors retain all intellectual property3// and proprietary rights in and to this software, related documentation4// and any modifications thereto. Any use, reproduction, disclosure or5// distribution of this software and related documentation without an express6// license agreement from NVIDIA CORPORATION is strictly prohibited.78#include <c10/util/Half.h>9#include "bias_act.h"1011//------------------------------------------------------------------------12// Helpers.1314template <class T> struct InternalType;15template <> struct InternalType<double> { typedef double scalar_t; };16template <> struct InternalType<float> { typedef float scalar_t; };17template <> struct InternalType<c10::Half> { typedef float scalar_t; };1819//------------------------------------------------------------------------20// CUDA kernel.2122template <class T, int A>23__global__ void bias_act_kernel(bias_act_kernel_params p)24{25typedef typename InternalType<T>::scalar_t scalar_t;26int G = p.grad;27scalar_t alpha = (scalar_t)p.alpha;28scalar_t gain = (scalar_t)p.gain;29scalar_t clamp = (scalar_t)p.clamp;30scalar_t one = (scalar_t)1;31scalar_t two = (scalar_t)2;32scalar_t expRange = (scalar_t)80;33scalar_t halfExpRange = (scalar_t)40;34scalar_t seluScale = (scalar_t)1.0507009873554804934193349852946;35scalar_t seluAlpha = (scalar_t)1.6732632423543772848170429916717;3637// Loop over elements.38int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x;39for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x)40{41// Load.42scalar_t x = (scalar_t)((const T*)p.x)[xi];43scalar_t b = (p.b) ? (scalar_t)((const T*)p.b)[(xi / p.stepB) % p.sizeB] : 0;44scalar_t xref = (p.xref) ? (scalar_t)((const T*)p.xref)[xi] : 0;45scalar_t yref = (p.yref) ? (scalar_t)((const T*)p.yref)[xi] : 0;46scalar_t dy = (p.dy) ? (scalar_t)((const T*)p.dy)[xi] : one;47scalar_t yy = (gain != 0) ? yref / gain : 0;48scalar_t y = 0;4950// Apply bias.51((G == 0) ? x : xref) += b;5253// linear54if (A == 1)55{56if (G == 0) y = x;57if (G == 1) y = x;58}5960// relu61if (A == 2)62{63if (G == 0) y = (x > 0) ? x : 0;64if (G == 1) y = (yy > 0) ? x : 0;65}6667// lrelu68if (A == 3)69{70if (G == 0) y = (x > 0) ? x : x * alpha;71if (G == 1) y = (yy > 0) ? x : x * alpha;72}7374// tanh75if (A == 4)76{77if (G == 0) { scalar_t c = exp(x); scalar_t d = one / c; y = (x < -expRange) ? -one : (x > expRange) ? one : (c - d) / (c + d); }78if (G == 1) y = x * (one - yy * yy);79if (G == 2) y = x * (one - yy * yy) * (-two * yy);80}8182// sigmoid83if (A == 5)84{85if (G == 0) y = (x < -expRange) ? 0 : one / (exp(-x) + one);86if (G == 1) y = x * yy * (one - yy);87if (G == 2) y = x * yy * (one - yy) * (one - two * yy);88}8990// elu91if (A == 6)92{93if (G == 0) y = (x >= 0) ? x : exp(x) - one;94if (G == 1) y = (yy >= 0) ? x : x * (yy + one);95if (G == 2) y = (yy >= 0) ? 0 : x * (yy + one);96}9798// selu99if (A == 7)100{101if (G == 0) y = (x >= 0) ? seluScale * x : (seluScale * seluAlpha) * (exp(x) - one);102if (G == 1) y = (yy >= 0) ? x * seluScale : x * (yy + seluScale * seluAlpha);103if (G == 2) y = (yy >= 0) ? 0 : x * (yy + seluScale * seluAlpha);104}105106// softplus107if (A == 8)108{109if (G == 0) y = (x > expRange) ? x : log(exp(x) + one);110if (G == 1) y = x * (one - exp(-yy));111if (G == 2) { scalar_t c = exp(-yy); y = x * c * (one - c); }112}113114// swish115if (A == 9)116{117if (G == 0)118y = (x < -expRange) ? 0 : x / (exp(-x) + one);119else120{121scalar_t c = exp(xref);122scalar_t d = c + one;123if (G == 1)124y = (xref > halfExpRange) ? x : x * c * (xref + d) / (d * d);125else126y = (xref > halfExpRange) ? 0 : x * c * (xref * (two - d) + two * d) / (d * d * d);127yref = (xref < -expRange) ? 0 : xref / (exp(-xref) + one) * gain;128}129}130131// Apply gain.132y *= gain * dy;133134// Clamp.135if (clamp >= 0)136{137if (G == 0)138y = (y > -clamp & y < clamp) ? y : (y >= 0) ? clamp : -clamp;139else140y = (yref > -clamp & yref < clamp) ? y : 0;141}142143// Store.144((T*)p.y)[xi] = (T)y;145}146}147148//------------------------------------------------------------------------149// CUDA kernel selection.150151template <class T> void* choose_bias_act_kernel(const bias_act_kernel_params& p)152{153if (p.act == 1) return (void*)bias_act_kernel<T, 1>;154if (p.act == 2) return (void*)bias_act_kernel<T, 2>;155if (p.act == 3) return (void*)bias_act_kernel<T, 3>;156if (p.act == 4) return (void*)bias_act_kernel<T, 4>;157if (p.act == 5) return (void*)bias_act_kernel<T, 5>;158if (p.act == 6) return (void*)bias_act_kernel<T, 6>;159if (p.act == 7) return (void*)bias_act_kernel<T, 7>;160if (p.act == 8) return (void*)bias_act_kernel<T, 8>;161if (p.act == 9) return (void*)bias_act_kernel<T, 9>;162return NULL;163}164165//------------------------------------------------------------------------166// Template specializations.167168template void* choose_bias_act_kernel<double> (const bias_act_kernel_params& p);169template void* choose_bias_act_kernel<float> (const bias_act_kernel_params& p);170template void* choose_bias_act_kernel<c10::Half> (const bias_act_kernel_params& p);171172//------------------------------------------------------------------------173174175