Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/DOTA_devkit/poly_nms_gpu/setup.py
Views: 475
"""1setup.py file for SWIG example2"""3import os4from os.path import join as pjoin5from setuptools import setup6from distutils.extension import Extension7from Cython.Distutils import build_ext8import subprocess9import numpy as np1011def find_in_path(name, path):12"Find a file in a search path"13# Adapted fom14# http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/15for dir in path.split(os.pathsep):16binpath = pjoin(dir, name)17if os.path.exists(binpath):18return os.path.abspath(binpath)19return None202122def locate_cuda():23"""Locate the CUDA environment on the system2425Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'26and values giving the absolute path to each directory.2728Starts by looking for the CUDAHOME env variable. If not found, everything29is based on finding 'nvcc' in the PATH.30"""3132# first check if the CUDAHOME env variable is in use33if 'CUDAHOME' in os.environ:34home = os.environ['CUDAHOME']35nvcc = pjoin(home, 'bin', 'nvcc')36else:37# otherwise, search the PATH for NVCC38default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')39nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)40if nvcc is None:41raise EnvironmentError('The nvcc binary could not be '42'located in your $PATH. Either add it to your path, or set $CUDAHOME')43home = os.path.dirname(os.path.dirname(nvcc))4445cudaconfig = {'home':home, 'nvcc':nvcc,46'include': pjoin(home, 'include'),47'lib64': pjoin(home, 'lib64')}48try:49for k, v in cudaconfig.iteritems():50if not os.path.exists(v):51raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))52except:53for k, v in cudaconfig.items():54if not os.path.exists(v):55raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))56return cudaconfig57CUDA = locate_cuda()585960# Obtain the numpy include directory. This logic works across numpy versions.61try:62numpy_include = np.get_include()63except AttributeError:64numpy_include = np.get_numpy_include()6566def customize_compiler_for_nvcc(self):67"""inject deep into distutils to customize how the dispatch68to gcc/nvcc works.6970If you subclass UnixCCompiler, it's not trivial to get your subclass71injected in, and still have the right customizations (i.e.72distutils.sysconfig.customize_compiler) run on it. So instead of going73the OO route, I have this. Note, it's kindof like a wierd functional74subclassing going on."""7576# tell the compiler it can processes .cu77self.src_extensions.append('.cu')7879# save references to the default compiler_so and _comple methods80default_compiler_so = self.compiler_so81super = self._compile8283# now redefine the _compile method. This gets executed for each84# object but distutils doesn't have the ability to change compilers85# based on source extension: we add it.86def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):87if os.path.splitext(src)[1] == '.cu':88# use the cuda for .cu files89self.set_executable('compiler_so', CUDA['nvcc'])90# use only a subset of the extra_postargs, which are 1-1 translated91# from the extra_compile_args in the Extension class92postargs = extra_postargs['nvcc']93else:94postargs = extra_postargs['gcc']9596super(obj, src, ext, cc_args, postargs, pp_opts)97# reset the default compiler_so, which we might have changed for cuda98self.compiler_so = default_compiler_so99100# inject our redefined _compile method into the class101self._compile = _compile102103104# run the customize_compiler105class custom_build_ext(build_ext):106def build_extensions(self):107customize_compiler_for_nvcc(self.compiler)108build_ext.build_extensions(self)109110ext_modules = [111Extension('poly_nms',112['poly_nms_kernel.cu', 'poly_nms.pyx'],113library_dirs=[CUDA['lib64']],114libraries=['cudart'],115language='c++',116runtime_library_dirs=[CUDA['lib64']],117# this syntax is specific to this build system118# we're only going to use certain compiler args with nvcc and not with119# gcc the implementation of this trick is in customize_compiler() below120extra_compile_args={'gcc': ["-Wno-unused-function"],121'nvcc': ['-arch=sm_35',122'--ptxas-options=-v',123'-c',124'--compiler-options',125"'-fPIC'"]},126include_dirs=[numpy_include, CUDA['include']]127),128Extension('poly_overlaps',129['poly_overlaps_kernel.cu', 'poly_overlaps.pyx'],130library_dirs=[CUDA['lib64']],131libraries=['cudart'],132language='c++',133runtime_library_dirs=[CUDA['lib64']],134# this syntax is specific to this build system135# we're only going to use certain compiler args with nvcc and not with136# gcc the implementation of this trick is in customize_compiler() below137extra_compile_args={'gcc': ["-Wno-unused-function"],138'nvcc': ['-arch=sm_35',139'--ptxas-options=-v',140'-c',141'--compiler-options',142"'-fPIC'"]},143include_dirs=[numpy_include, CUDA['include']]144),145]146setup(147name='rotation',148ext_modules=ext_modules,149# inject our custom trigger150cmdclass={'build_ext': custom_build_ext},151)152153154