From 32bcc6317da27c6551c0d5017f92b6a2c068ba91 Mon Sep 17 00:00:00 2001 From: emortensen Date: Wed, 31 Jul 2024 23:22:06 +0000 Subject: [PATCH 1/2] feat: adding code for release v0.10.0 --- .gitattributes | 2 +- .github/workflows/codeql.yml | 10 + .gitignore | 2 +- CMakeLists.txt | 2 +- LICENSE.md | 22 +- README.md | 6 +- bench/BenchAdaptiveThreshold.cpp | 2 +- bench/BenchAverageBlur.cpp | 2 +- bench/BenchBilateralFilter.cpp | 2 +- bench/BenchBrightnessContrast.cpp | 2 +- bench/BenchColorTwist.cpp | 2 +- bench/BenchComposite.cpp | 2 +- bench/BenchCopyMakeBorder.cpp | 2 +- bench/BenchCvtColor.cpp | 159 +- bench/BenchErase.cpp | 2 +- bench/BenchFlip.cpp | 2 +- bench/BenchGaussian.cpp | 2 +- bench/BenchGaussianNoise.cpp | 2 +- bench/BenchHistogramEq.cpp | 2 +- bench/BenchInpaint.cpp | 2 +- bench/BenchJointBilateralFilter.cpp | 2 +- bench/BenchLabel.cpp | 2 +- bench/BenchLaplacian.cpp | 2 +- bench/BenchMedianBlur.cpp | 2 +- bench/BenchMinMaxLoc.cpp | 2 +- bench/BenchMorphology.cpp | 2 +- bench/BenchNormalize.cpp | 2 +- bench/BenchPillowResize.cpp | 2 +- bench/BenchRandomResizedCrop.cpp | 2 +- bench/BenchRemap.cpp | 2 +- bench/BenchResize.cpp | 2 +- bench/BenchResizeCropConvertReformat.cpp | 2 +- bench/BenchRotate.cpp | 2 +- bench/BenchThreshold.cpp | 2 +- bench/BenchUtils.hpp | 8 +- bench/BenchWarpAffine.cpp | 2 +- bench/BenchWarpPerspective.cpp | 2 +- bench/CMakeLists.txt | 2 +- bench/python/all_ops/op_adaptivethreshold.py | 2 +- bench/python/all_ops/op_averageblur.py | 2 +- bench/python/all_ops/op_blurbox.py | 2 +- bench/python/all_ops/op_boundingbox.py | 2 +- bench/python/all_ops/op_brightnesscontrast.py | 2 +- bench/python/all_ops/op_centercrop.py | 2 +- bench/python/all_ops/op_composite.py | 2 +- bench/python/all_ops/op_convertto.py | 2 +- bench/python/all_ops/op_copymakeborder.py | 2 +- bench/python/all_ops/op_customcrop.py | 2 +- bench/python/all_ops/op_cvtcolor.py | 2 +- bench/python/all_ops/op_flip.py | 2 +- bench/python/all_ops/op_gaussianblur.py | 2 +- bench/python/all_ops/op_hqresize.py | 2 +- bench/python/all_ops/op_inpaint.py | 2 +- bench/python/all_ops/op_jointbilateral.py | 2 +- bench/python/all_ops/op_laplacian.py | 2 +- bench/python/all_ops/op_morphology.py | 2 +- bench/python/all_ops/op_nms.py | 2 +- bench/python/all_ops/op_normalize.py | 2 +- bench/python/all_ops/op_randomresizedcrop.py | 2 +- bench/python/all_ops/op_reformat.py | 2 +- bench/python/all_ops/op_remap.py | 2 +- bench/python/all_ops/op_reshape.py | 2 +- bench/python/all_ops/op_resize.py | 2 +- .../op_resize_crop_convert_reformat.py | 2 +- bench/python/all_ops/op_rotate.py | 2 +- bench/python/all_ops/op_sift.py | 2 +- bench/python/all_ops/op_threshold.py | 2 +- bench/python/all_ops/op_warpaffine.py | 2 +- bench/python/all_ops/op_warpperspective.py | 2 +- bench/python/assets/NOTICE.md | 2 +- bench/python/bench_utils.py | 2 +- bench/python/run_bench.py | 2 +- ci/build.sh | 8 +- ci/build_docs.sh | 2 +- ci/build_samples.sh | 2 +- cmake/ConfigCPack.cmake | 2 +- cmake/ConfigCUDA.cmake | 5 +- cmake/ConfigCompiler.cmake | 2 +- cmake/ConfigVersion.cmake | 2 +- cmake/GetGitRevisionDescription.cmake | 2 +- cmake/InstallTests.cmake | 2 +- docker/build20.04/Dockerfile | 2 +- docker/build20.04/ccache.conf | 2 +- .../deadsnakes-ubuntu-ppa-focal.list | 2 +- docker/build22.04/Dockerfile | 2 +- docker/build22.04/ccache.conf | 2 +- .../deadsnakes-ubuntu-ppa-jammy.list | 2 +- docker/config | 2 +- docker/devel20.04/Dockerfile | 2 +- docker/devel20.04/gdbinit | 2 +- docker/devel20.04/vimrc | 2 +- docker/devel22.04/Dockerfile | 2 +- docker/devel22.04/gdbinit | 2 +- docker/devel22.04/vimrc | 2 +- docker/env_devel_linux.sh | 2 +- docker/samples/Dockerfile | 2 +- docker/test20.04/Dockerfile | 2 +- .../deadsnakes-ubuntu-ppa-focal.list | 2 +- docker/test22.04/Dockerfile | 2 +- .../deadsnakes-ubuntu-ppa-jammy.list | 2 +- docker/update_build_image.sh | 2 +- docker/update_devel_image.sh | 2 +- docker/update_samples_image.sh | 2 +- docker/update_test_image.sh | 2 +- docs/Doxyfile.in | 2 +- docs/sphinx/_python_api/nvcv/cache.rst | 4 +- docs/sphinx/_python_api/nvcv/colorspec.rst | 2 +- docs/sphinx/_python_api/nvcv/format.rst | 2 +- docs/sphinx/_python_api/nvcv/image.rst | 2 +- docs/sphinx/_python_api/nvcv/imagebatch.rst | 2 +- docs/sphinx/_python_api/nvcv/recti.rst | 2 +- docs/sphinx/_python_api/nvcv/resource.rst | 22 + docs/sphinx/_python_api/nvcv/tensor.rst | 2 +- docs/sphinx/_python_api/nvcv/tensorbatch.rst | 2 +- docs/sphinx/_python_api/template.rst | 2 +- docs/sphinx/bestpractices.rst | 28 + docs/sphinx/gen_py_doc_rsts.py | 14 +- docs/sphinx/generate_groups.py | 2 +- docs/sphinx/getting_started.rst | 5 +- docs/sphinx/index.rst | 1 + docs/sphinx/modules/c_algos.rst | 2 +- docs/sphinx/modules/c_core.rst | 2 +- docs/sphinx/modules/c_status.rst | 2 +- docs/sphinx/modules/c_utils.rst | 2 +- docs/sphinx/modules/cpp_algos.rst | 2 +- docs/sphinx/modules/cpp_core.rst | 2 +- docs/sphinx/modules/cpp_cudatools.rst | 2 +- docs/sphinx/modules/cpp_modules.rst | 2 +- docs/sphinx/modules/cpp_utils.rst | 2 +- docs/sphinx/modules/python_algos.rst | 2 +- docs/sphinx/modules/python_core.rst | 3 +- docs/sphinx/modules/python_modules.rst | 2 +- docs/sphinx/nvcvobjectcache.rst | 136 ++ docs/sphinx/prerequisites.rst | 2 +- docs/sphinx/python.rst | 28 + docs/sphinx/relnotes/v0.1.0-prealpha.rst | 2 +- docs/sphinx/relnotes/v0.10.0-beta.rst | 70 + docs/sphinx/relnotes/v0.2.0-alpha.rst | 2 +- docs/sphinx/relnotes/v0.2.1-alpha.rst | 2 +- docs/sphinx/relnotes/v0.3.0-beta.rst | 2 +- docs/sphinx/relnotes/v0.3.1-beta.rst | 2 +- docs/sphinx/relnotes/v0.4.0-beta.rst | 2 +- docs/sphinx/relnotes/v0.5.0-beta.rst | 2 +- .../sphinx/samples/cpp_samples/cropresize.rst | 2 +- .../classification/classification_pytorch.rst | 2 +- .../classification_tensorrt.rst | 2 +- .../classification/postprocessor_cvcuda.rst | 2 +- .../classification/preprocessor_cvcuda.rst | 2 +- .../objectdetection_tensorflow.rst | 2 +- .../object_detection/postprocessor_cvcuda.rst | 2 +- .../object_detection/preprocessor_cvcuda.rst | 2 +- .../segmentation/postprocessor_cvcuda.rst | 2 +- .../segmentation/preprocessor_cvcuda.rst | 2 +- .../segmentation/segmentation_pytorch.rst | 2 +- .../segmentation/segmentation_tensorrt.rst | 2 +- .../python_samples/segmentation_triton.rst | 2 +- lint/commitlint.config.js | 23 - lint/copyright_check.sh | 168 -- lint/lfs_check.sh | 40 - python/CMakeLists.txt | 3 +- python/common/CMakeLists.txt | 17 +- python/common/Hash.hpp | 6 +- python/common/PyUtil.hpp | 4 +- python/mod_cvcuda/CMakeLists.txt | 3 +- python/mod_cvcuda/ChannelManipType.cpp | 2 +- python/mod_cvcuda/ChannelManipType.hpp | 2 +- python/mod_cvcuda/CvtColorUtil.cpp | 63 +- python/mod_cvcuda/CvtColorUtil.hpp | 8 +- python/mod_cvcuda/InterpolationType.cpp | 2 +- python/mod_cvcuda/Main.cpp | 2 +- python/mod_cvcuda/OpAdaptiveThreshold.cpp | 78 +- python/mod_cvcuda/OpAdvCvtColor.cpp | 74 +- python/mod_cvcuda/OpAverageBlur.cpp | 64 +- python/mod_cvcuda/OpBilateralFilter.cpp | 56 +- python/mod_cvcuda/OpBndBox.cpp | 18 +- python/mod_cvcuda/OpBoxBlur.cpp | 18 +- python/mod_cvcuda/OpBrightnessContrast.cpp | 127 +- python/mod_cvcuda/OpCenterCrop.cpp | 20 +- python/mod_cvcuda/OpChannelReorder.cpp | 24 +- python/mod_cvcuda/OpColorTwist.cpp | 98 +- python/mod_cvcuda/OpComposite.cpp | 54 +- python/mod_cvcuda/OpConv2D.cpp | 24 +- python/mod_cvcuda/OpConvertTo.cpp | 22 +- python/mod_cvcuda/OpCopyMakeBorder.cpp | 100 +- .../OpCropFlipNormalizeReformat.cpp | 58 +- python/mod_cvcuda/OpCustomCrop.cpp | 18 +- python/mod_cvcuda/OpCvtColor.cpp | 72 +- python/mod_cvcuda/OpErase.cpp | 80 +- python/mod_cvcuda/OpFindHomography.cpp | 38 +- python/mod_cvcuda/OpFlip.cpp | 40 +- python/mod_cvcuda/OpGammaContrast.cpp | 20 +- python/mod_cvcuda/OpGaussian.cpp | 60 +- python/mod_cvcuda/OpGaussianNoise.cpp | 42 +- python/mod_cvcuda/OpHQResize.cpp | 96 +- python/mod_cvcuda/OpHistogram.cpp | 18 +- python/mod_cvcuda/OpHistogramEq.cpp | 29 +- python/mod_cvcuda/OpInpaint.cpp | 50 +- python/mod_cvcuda/OpJointBilateralFilter.cpp | 60 +- python/mod_cvcuda/OpLabel.cpp | 40 +- python/mod_cvcuda/OpLaplacian.cpp | 36 +- python/mod_cvcuda/OpMedianBlur.cpp | 44 +- python/mod_cvcuda/OpMinAreaRect.cpp | 24 +- python/mod_cvcuda/OpMinMaxLoc.cpp | 26 +- python/mod_cvcuda/OpMorphology.cpp | 80 +- python/mod_cvcuda/OpNonMaximumSuppression.cpp | 18 +- python/mod_cvcuda/OpNormalize.cpp | 98 +- python/mod_cvcuda/OpOSD.cpp | 18 +- python/mod_cvcuda/OpPadAndStack.cpp | 26 +- python/mod_cvcuda/OpPairwiseMatcher.cpp | 32 +- python/mod_cvcuda/OpPillowResize.cpp | 55 +- python/mod_cvcuda/OpRandomResizedCrop.cpp | 42 +- python/mod_cvcuda/OpReformat.cpp | 16 +- python/mod_cvcuda/OpRemap.cpp | 181 ++- python/mod_cvcuda/OpResize.cpp | 42 +- .../OpResizeCropConvertReformat.cpp | 20 +- python/mod_cvcuda/OpRotate.cpp | 62 +- python/mod_cvcuda/OpSIFT.cpp | 22 +- python/mod_cvcuda/OpStack.cpp | 14 +- python/mod_cvcuda/OpThreshold.cpp | 64 +- python/mod_cvcuda/OpWarpAffine.cpp | 86 +- python/mod_cvcuda/OpWarpPerspective.cpp | 88 +- python/mod_cvcuda/Operators.hpp | 4 +- python/mod_cvcuda/OsdElement.cpp | 2 +- python/mod_cvcuda/WorkspaceCache.hpp | 10 +- python/mod_nvcv/Array.cpp | 19 +- python/mod_nvcv/Array.hpp | 10 +- python/mod_nvcv/CAPI.cpp | 2 +- python/mod_nvcv/Cache.cpp | 113 +- python/mod_nvcv/Cache.hpp | 34 +- python/mod_nvcv/Container.cpp | 4 +- python/mod_nvcv/Container.hpp | 21 +- python/mod_nvcv/DataType.cpp | 4 +- python/mod_nvcv/Definitions.hpp | 27 + python/mod_nvcv/Image.cpp | 39 +- python/mod_nvcv/Image.hpp | 7 +- python/mod_nvcv/ImageBatch.cpp | 19 +- python/mod_nvcv/ImageBatch.hpp | 8 +- python/mod_nvcv/Main.cpp | 28 +- python/mod_nvcv/Rect.cpp | 2 +- python/mod_nvcv/Resource.cpp | 4 +- python/mod_nvcv/Resource.hpp | 2 +- python/mod_nvcv/Stream.cpp | 128 +- python/mod_nvcv/Stream.hpp | 29 +- python/mod_nvcv/Tensor.cpp | 21 +- python/mod_nvcv/Tensor.hpp | 10 +- python/mod_nvcv/TensorBatch.cpp | 19 +- python/mod_nvcv/TensorBatch.hpp | 8 +- python/mod_nvcv/include/nvcv/python/Array.hpp | 2 +- python/mod_nvcv/include/nvcv/python/CAPI.hpp | 2 +- python/mod_nvcv/include/nvcv/python/Cache.hpp | 2 +- .../include/nvcv/python/Container.hpp | 2 +- .../mod_nvcv/include/nvcv/python/DataType.hpp | 2 +- python/mod_nvcv/include/nvcv/python/Image.hpp | 2 +- .../nvcv/python/ImageBatchVarShape.hpp | 2 +- .../include/nvcv/python/ImageFormat.hpp | 2 +- .../mod_nvcv/include/nvcv/python/LockMode.hpp | 2 +- .../include/nvcv/python/ResourceGuard.hpp | 2 +- .../mod_nvcv/include/nvcv/python/Stream.hpp | 2 +- .../mod_nvcv/include/nvcv/python/Tensor.hpp | 2 +- .../include/nvcv/python/TensorBatch.hpp | 2 +- samples/classification/CMakeLists.txt | 2 +- .../classification/ClassificationUtils.hpp | 8 +- samples/classification/Main.cpp | 4 +- samples/classification/python/main.py | 2 +- samples/common/CMakeLists.txt | 2 +- samples/common/python/perf_utils.py | 2 +- samples/common/python/vpf_utils.py | 2 +- samples/cropandresize/CMakeLists.txt | 2 +- samples/cropandresize/Main.cpp | 4 +- samples/label/python/main.py | 2 +- samples/object_detection/python/main.py | 2 +- samples/scripts/benchmark.py | 2 +- samples/scripts/benchmark_samples.sh | 2 +- samples/scripts/build_samples.sh | 2 +- samples/scripts/requirements.txt | 10 + samples/segmentation/python/main.py | 2 +- .../segmentation/python/model_inference.py | 2 +- samples/segmentation/python/triton_client.py | 2 +- src/CMakeLists.txt | 5 +- src/cvcuda/CMakeLists.txt | 4 +- src/cvcuda/OpAdaptiveThreshold.cpp | 4 +- src/cvcuda/OpAdvCvtColor.cpp | 4 +- src/cvcuda/OpAverageBlur.cpp | 4 +- src/cvcuda/OpBilateralFilter.cpp | 4 +- src/cvcuda/OpBndBox.cpp | 4 +- src/cvcuda/OpBoxBlur.cpp | 4 +- src/cvcuda/OpBrightnessContrast.cpp | 4 +- src/cvcuda/OpCenterCrop.cpp | 4 +- src/cvcuda/OpChannelReorder.cpp | 4 +- src/cvcuda/OpColorTwist.cpp | 4 +- src/cvcuda/OpComposite.cpp | 4 +- src/cvcuda/OpConv2D.cpp | 4 +- src/cvcuda/OpConvertTo.cpp | 4 +- src/cvcuda/OpCopyMakeBorder.cpp | 4 +- src/cvcuda/OpCropFlipNormalizeReformat.cpp | 4 +- src/cvcuda/OpCustomCrop.cpp | 4 +- src/cvcuda/OpCvtColor.cpp | 4 +- src/cvcuda/OpErase.cpp | 4 +- src/cvcuda/OpFindHomography.cpp | 4 +- src/cvcuda/OpFlip.cpp | 4 +- src/cvcuda/OpGammaContrast.cpp | 4 +- src/cvcuda/OpGaussian.cpp | 4 +- src/cvcuda/OpGaussianNoise.cpp | 4 +- src/cvcuda/OpHQResize.cpp | 2 +- src/cvcuda/OpHistogram.cpp | 4 +- src/cvcuda/OpHistogramEq.cpp | 4 +- src/cvcuda/OpInpaint.cpp | 4 +- src/cvcuda/OpJointBilateralFilter.cpp | 4 +- src/cvcuda/OpLabel.cpp | 4 +- src/cvcuda/OpLaplacian.cpp | 4 +- src/cvcuda/OpMedianBlur.cpp | 4 +- src/cvcuda/OpMinAreaRect.cpp | 4 +- src/cvcuda/OpMinMaxLoc.cpp | 4 +- src/cvcuda/OpMorphology.cpp | 4 +- src/cvcuda/OpNonMaximumSuppression.cpp | 4 +- src/cvcuda/OpNormalize.cpp | 4 +- src/cvcuda/OpOSD.cpp | 4 +- src/cvcuda/OpPadAndStack.cpp | 4 +- src/cvcuda/OpPairwiseMatcher.cpp | 4 +- src/cvcuda/OpPillowResize.cpp | 4 +- src/cvcuda/OpRandomResizedCrop.cpp | 4 +- src/cvcuda/OpReformat.cpp | 4 +- src/cvcuda/OpRemap.cpp | 4 +- src/cvcuda/OpResize.cpp | 4 +- src/cvcuda/OpResizeCropConvertReformat.cpp | 14 +- src/cvcuda/OpRotate.cpp | 4 +- src/cvcuda/OpSIFT.cpp | 4 +- src/cvcuda/OpStack.cpp | 4 +- src/cvcuda/OpThreshold.cpp | 4 +- src/cvcuda/OpWarpAffine.cpp | 4 +- src/cvcuda/OpWarpPerspective.cpp | 4 +- .../cvcuda/OpCropFlipNormalizeReformat.h | 2 +- src/cvcuda/include/cvcuda/OpErase.h | 2 +- src/cvcuda/include/cvcuda/OpFindHomography.h | 2 +- src/cvcuda/include/cvcuda/OpLabel.h | 2 +- src/cvcuda/include/cvcuda/OpLabel.hpp | 2 +- src/cvcuda/include/cvcuda/OpRemap.h | 6 +- .../cvcuda/OpResizeCropConvertReformat.h | 20 +- src/cvcuda/include/cvcuda/OpSIFT.h | 2 +- src/cvcuda/include/cvcuda/Types.h | 2 +- src/cvcuda/include/cvcuda/Workspace.hpp | 2 +- .../include/cvcuda/cuda_tools}/ArrayWrap.hpp | 0 .../include/cvcuda/cuda_tools}/Atomics.hpp | 2 +- .../cvcuda/cuda_tools}/BorderVarShapeWrap.hpp | 2 +- .../include/cvcuda/cuda_tools}/BorderWrap.hpp | 0 .../include/cvcuda/cuda_tools}/DropCast.hpp | 2 +- .../cvcuda/cuda_tools}/FullTensorWrap.hpp | 2 +- .../cuda_tools}/ImageBatchVarShapeWrap.hpp | 2 +- .../cuda_tools}/InterpolationVarShapeWrap.hpp | 2 +- .../cvcuda/cuda_tools}/InterpolationWrap.hpp | 2 +- .../include/cvcuda/cuda_tools}/MathOps.hpp | 2 +- .../cvcuda/cuda_tools}/MathWrappers.hpp | 2 +- .../include/cvcuda/cuda_tools}/Printer.hpp | 0 .../include/cvcuda/cuda_tools}/RangeCast.hpp | 2 +- .../cvcuda/cuda_tools}/SaturateCast.hpp | 2 +- .../include/cvcuda/cuda_tools}/StaticCast.hpp | 2 +- .../cvcuda/cuda_tools}/TensorBatchWrap.hpp | 7 +- .../include/cvcuda/cuda_tools}/TensorWrap.hpp | 0 .../include/cvcuda/cuda_tools}/TypeTraits.hpp | 0 .../cuda_tools}/detail/MathWrappersImpl.hpp | 2 +- .../cuda_tools}/detail/Metaprogramming.hpp | 2 +- .../cuda_tools}/detail/RangeCastImpl.hpp | 2 +- .../cuda_tools}/detail/SaturateCastImpl.hpp | 2 +- .../cvcuda/cuda_tools}/math/LinAlg.hpp | 4 +- src/cvcuda/priv/CMakeLists.txt | 2 +- src/cvcuda/priv/OpAdaptiveThreshold.cpp | 4 +- src/cvcuda/priv/OpAdvCvtColor.cu | 6 +- src/cvcuda/priv/OpAverageBlur.cpp | 4 +- src/cvcuda/priv/OpBilateralFilter.cpp | 4 +- src/cvcuda/priv/OpBndBox.cpp | 4 +- src/cvcuda/priv/OpBoxBlur.cpp | 4 +- src/cvcuda/priv/OpBrightnessContrast.cu | 20 +- src/cvcuda/priv/OpCenterCrop.cpp | 4 +- src/cvcuda/priv/OpChannelReorder.cpp | 4 +- src/cvcuda/priv/OpColorTwist.cu | 22 +- src/cvcuda/priv/OpComposite.cpp | 4 +- src/cvcuda/priv/OpConv2D.cpp | 4 +- src/cvcuda/priv/OpConvertTo.cpp | 4 +- src/cvcuda/priv/OpCopyMakeBorder.cpp | 4 +- .../priv/OpCropFlipNormalizeReformat.cu | 18 +- src/cvcuda/priv/OpCustomCrop.cpp | 4 +- src/cvcuda/priv/OpCvtColor.cpp | 4 +- src/cvcuda/priv/OpErase.cpp | 4 +- src/cvcuda/priv/OpFindHomography.cu | 22 +- src/cvcuda/priv/OpFlip.cpp | 4 +- src/cvcuda/priv/OpGammaContrast.cpp | 4 +- src/cvcuda/priv/OpGaussian.cpp | 4 +- src/cvcuda/priv/OpGaussianNoise.cpp | 4 +- src/cvcuda/priv/OpHQResize.cu | 22 +- src/cvcuda/priv/OpHQResizeBatchWrap.cuh | 12 +- src/cvcuda/priv/OpHQResizeFilter.cuh | 6 +- src/cvcuda/priv/OpHistogram.cpp | 4 +- src/cvcuda/priv/OpHistogramEq.cpp | 4 +- src/cvcuda/priv/OpInpaint.cpp | 4 +- src/cvcuda/priv/OpJointBilateralFilter.cpp | 4 +- src/cvcuda/priv/OpLabel.cu | 18 +- src/cvcuda/priv/OpLabel.hpp | 2 +- src/cvcuda/priv/OpLaplacian.cpp | 4 +- src/cvcuda/priv/OpMedianBlur.cpp | 4 +- src/cvcuda/priv/OpMinAreaRect.cpp | 4 +- src/cvcuda/priv/OpMinMaxLoc.cu | 14 +- src/cvcuda/priv/OpMorphology.cpp | 4 +- src/cvcuda/priv/OpNonMaximumSuppression.cu | 16 +- src/cvcuda/priv/OpNormalize.cpp | 4 +- src/cvcuda/priv/OpOSD.cpp | 4 +- src/cvcuda/priv/OpPadAndStack.cpp | 4 +- src/cvcuda/priv/OpPairwiseMatcher.cu | 12 +- src/cvcuda/priv/OpPillowResize.cpp | 4 +- src/cvcuda/priv/OpRandomResizedCrop.cpp | 4 +- src/cvcuda/priv/OpReformat.cpp | 4 +- src/cvcuda/priv/OpRemap.cu | 16 +- src/cvcuda/priv/OpResize.cpp | 4 +- src/cvcuda/priv/OpResize.cu | 66 +- src/cvcuda/priv/OpResize.hpp | 2 +- .../priv/OpResizeCropConvertReformat.cu | 22 +- src/cvcuda/priv/OpRotate.cpp | 4 +- src/cvcuda/priv/OpSIFT.cu | 18 +- src/cvcuda/priv/OpStack.cpp | 4 +- src/cvcuda/priv/OpThreshold.cpp | 4 +- src/cvcuda/priv/OpWarpAffine.cpp | 4 +- src/cvcuda/priv/OpWarpPerspective.cpp | 4 +- src/cvcuda/priv/SymbolVersioning.hpp | 4 +- src/cvcuda/priv/Version.hpp | 4 +- src/cvcuda/priv/legacy/CMakeLists.txt | 2 +- src/cvcuda/priv/legacy/CvCudaLegacy.h | 2 +- src/cvcuda/priv/legacy/CvCudaUtils.cuh | 32 +- src/cvcuda/priv/legacy/adaptive_threshold.cu | 2 +- .../legacy/adaptive_threshold_var_shape.cu | 2 +- src/cvcuda/priv/legacy/bilateral_filter.cu | 4 +- src/cvcuda/priv/legacy/box_blur.cu | 2 +- src/cvcuda/priv/legacy/calc_hist.cu | 6 +- src/cvcuda/priv/legacy/center_crop.cu | 2 +- .../priv/legacy/channel_reorder_var_shape.cu | 4 +- src/cvcuda/priv/legacy/composite.cu | 2 +- src/cvcuda/priv/legacy/composite_var_shape.cu | 2 +- src/cvcuda/priv/legacy/convert_to.cu | 4 +- src/cvcuda/priv/legacy/copy_make_border.cu | 2 +- .../priv/legacy/copy_make_border_var_shape.cu | 2 +- src/cvcuda/priv/legacy/custom_crop.cu | 2 +- src/cvcuda/priv/legacy/cvt_color.cu | 1439 +++++++++-------- src/cvcuda/priv/legacy/cvt_color_var_shape.cu | 2 +- src/cvcuda/priv/legacy/erase.cu | 2 +- src/cvcuda/priv/legacy/erase_var_shape.cu | 2 +- src/cvcuda/priv/legacy/filter.cu | 4 +- src/cvcuda/priv/legacy/filter_utils.cu | 12 +- src/cvcuda/priv/legacy/filter_utils.cuh | 4 +- src/cvcuda/priv/legacy/filter_var_shape.cu | 2 +- src/cvcuda/priv/legacy/flip.cu | 2 +- .../priv/legacy/flip_or_copy_var_shape.cu | 2 +- src/cvcuda/priv/legacy/gaussian_noise.cu | 2 +- .../priv/legacy/gaussian_noise_var_shape.cu | 2 +- src/cvcuda/priv/legacy/histogram_eq.cu | 2 +- .../priv/legacy/histogram_eq_var_shape.cu | 4 +- src/cvcuda/priv/legacy/inpaint.cu | 4 +- src/cvcuda/priv/legacy/inpaint_var_shape.cu | 2 +- .../priv/legacy/joint_bilateral_filter.cu | 2 +- src/cvcuda/priv/legacy/median_blur.cu | 2 +- .../priv/legacy/median_blur_var_shape.cu | 2 +- src/cvcuda/priv/legacy/morphology.cu | 6 +- .../priv/legacy/morphology_var_shape.cu | 6 +- src/cvcuda/priv/legacy/normalize.cu | 6 +- src/cvcuda/priv/legacy/normalize_var_shape.cu | 4 +- src/cvcuda/priv/legacy/osd.cu | 2 +- src/cvcuda/priv/legacy/pad_and_stack.cu | 2 +- src/cvcuda/priv/legacy/pillow_resize.cu | 2 +- src/cvcuda/priv/legacy/random_resized_crop.cu | 9 +- .../legacy/random_resized_crop_var_shape.cu | 9 +- src/cvcuda/priv/legacy/reformat.cu | 2 +- src/cvcuda/priv/legacy/resize.cu | 367 ----- src/cvcuda/priv/legacy/resize_var_shape.cu | 11 +- src/cvcuda/priv/legacy/rotate.cu | 2 +- src/cvcuda/priv/legacy/rotate_var_shape.cu | 2 +- src/cvcuda/priv/legacy/threshold.cu | 2 +- src/cvcuda/priv/legacy/threshold_util.cu | 2 +- src/cvcuda/priv/legacy/threshold_util.cuh | 2 +- src/cvcuda/priv/legacy/threshold_var_shape.cu | 2 +- src/cvcuda/priv/legacy/warp.cu | 2 +- src/cvcuda/priv/legacy/warp_var_shape.cu | 2 +- src/cvcuda/util/CMakeLists.txt | 29 + src/{ => cvcuda}/util/Event.cpp | 5 +- src/{ => cvcuda}/util/Event.hpp | 2 +- src/{ => cvcuda}/util/PerStreamCache.hpp | 5 +- src/{ => cvcuda}/util/PerStreamCacheImpl.hpp | 2 +- src/{ => cvcuda}/util/SimpleCache.hpp | 2 +- src/{ => cvcuda}/util/Stream.cpp | 5 +- src/{ => cvcuda}/util/Stream.hpp | 2 +- src/{ => cvcuda}/util/StreamId.cpp | 2 +- src/{ => cvcuda}/util/StreamId.hpp | 2 +- src/{ => cvcuda}/util/UniqueHandle.hpp | 2 +- src/nvcv/CMakeLists.txt | 40 + src/nvcv/cmake/ConfigBuildTree.cmake | 100 ++ src/nvcv/cmake/ConfigVersion.cmake | 109 ++ src/nvcv/cmake/CreateExportsFile.cmake | 86 + src/nvcv/cmake/VersionDef.h.in | 200 +++ src/nvcv/cmake/VersionUtils.h.in | 24 + src/{nvcv_types => nvcv/src}/Allocator.cpp | 6 +- src/{nvcv_types => nvcv/src}/Array.cpp | 2 +- src/{nvcv_types => nvcv/src}/CMakeLists.txt | 48 +- src/{nvcv_types => nvcv/src}/ColorSpec.cpp | 4 +- src/{nvcv_types => nvcv/src}/Config.cpp | 2 +- src/{nvcv_types => nvcv/src}/DataLayout.cpp | 4 +- src/{nvcv_types => nvcv/src}/DataType.cpp | 6 +- src/{nvcv_types => nvcv/src}/Image.cpp | 2 +- src/{nvcv_types => nvcv/src}/ImageBatch.cpp | 2 +- src/{nvcv_types => nvcv/src}/ImageFormat.cpp | 4 +- src/{nvcv_types => nvcv/src}/Requirements.cpp | 2 +- src/{nvcv_types => nvcv/src}/Status.cpp | 4 +- src/{nvcv_types => nvcv/src}/Tensor.cpp | 2 +- src/{nvcv_types => nvcv/src}/TensorBatch.cpp | 2 +- src/{nvcv_types => nvcv/src}/TensorLayout.cpp | 2 +- src/{nvcv_types => nvcv/src}/TensorShape.cpp | 2 +- src/{nvcv_types => nvcv/src}/Version.cpp | 2 +- .../src}/include/nvcv/Array.h | 2 +- .../src}/include/nvcv/Array.hpp | 2 +- .../src}/include/nvcv/ArrayData.h | 2 +- .../src}/include/nvcv/ArrayData.hpp | 2 +- .../src}/include/nvcv/ArrayDataAccess.hpp | 2 +- .../src}/include/nvcv/BorderType.h | 2 +- .../src}/include/nvcv/Casts.hpp | 2 +- .../src}/include/nvcv/ColorSpec.h | 2 +- .../src}/include/nvcv/ColorSpec.hpp | 2 +- .../src}/include/nvcv/Config.h | 2 +- .../src}/include/nvcv/Config.hpp | 2 +- .../src}/include/nvcv/CoreResource.hpp | 2 +- .../src}/include/nvcv/DataLayout.h | 2 +- .../src}/include/nvcv/DataLayout.hpp | 2 +- .../src}/include/nvcv/DataType.h | 4 +- .../src}/include/nvcv/DataType.hpp | 2 +- .../src}/include/nvcv/Exception.hpp | 2 +- .../src}/include/nvcv/Export.h | 2 +- .../src}/include/nvcv/Fwd.h | 2 +- .../src}/include/nvcv/Fwd.hpp | 2 +- .../src}/include/nvcv/HandleWrapper.hpp | 2 +- .../src}/include/nvcv/Image.h | 2 +- .../src}/include/nvcv/Image.hpp | 4 +- .../src}/include/nvcv/ImageBatch.h | 2 +- .../src}/include/nvcv/ImageBatch.hpp | 2 +- .../src}/include/nvcv/ImageBatchData.h | 2 +- .../src}/include/nvcv/ImageBatchData.hpp | 2 +- .../src}/include/nvcv/ImageData.h | 2 +- .../src}/include/nvcv/ImageData.hpp | 2 +- .../src}/include/nvcv/ImageFormat.h | 2 +- .../src}/include/nvcv/ImageFormat.hpp | 2 +- .../src}/include/nvcv/Optional.hpp | 2 +- .../src}/include/nvcv/Rect.h | 2 +- .../src}/include/nvcv/Shape.hpp | 2 +- .../src}/include/nvcv/Size.h | 2 +- .../src}/include/nvcv/Size.hpp | 2 +- .../src}/include/nvcv/Status.h | 18 +- .../src}/include/nvcv/Status.hpp | 2 +- .../src}/include/nvcv/Tensor.h | 2 +- .../src}/include/nvcv/Tensor.hpp | 2 +- .../src}/include/nvcv/TensorBatch.h | 2 +- .../src}/include/nvcv/TensorBatch.hpp | 2 +- .../src}/include/nvcv/TensorBatchData.h | 2 +- .../src}/include/nvcv/TensorBatchData.hpp | 2 +- .../src}/include/nvcv/TensorData.h | 2 +- .../src}/include/nvcv/TensorData.hpp | 2 +- .../src}/include/nvcv/TensorDataAccess.hpp | 2 +- .../src}/include/nvcv/TensorLayout.h | 2 +- .../src}/include/nvcv/TensorLayout.hpp | 2 +- .../src}/include/nvcv/TensorLayoutDef.inc | 2 +- .../src}/include/nvcv/TensorLayoutInfo.hpp | 2 +- .../src}/include/nvcv/TensorShape.h | 2 +- .../src}/include/nvcv/TensorShape.hpp | 2 +- .../src}/include/nvcv/TensorShapeInfo.hpp | 2 +- .../src}/include/nvcv/Version.h | 2 +- .../src}/include/nvcv/alloc/Allocator.h | 2 +- .../src}/include/nvcv/alloc/Allocator.hpp | 2 +- .../src}/include/nvcv/alloc/AllocatorImpl.hpp | 2 +- .../src}/include/nvcv/alloc/Fwd.h | 2 +- .../src}/include/nvcv/alloc/Fwd.hpp | 2 +- .../src}/include/nvcv/alloc/Requirements.h | 2 +- .../src}/include/nvcv/alloc/Requirements.hpp | 2 +- .../src}/include/nvcv/detail/Align.hpp | 2 +- .../include/nvcv/detail/ArrayDataImpl.hpp | 2 +- .../src}/include/nvcv/detail/ArrayImpl.hpp | 2 +- .../include/nvcv/detail/BaseFromMember.hpp | 2 +- .../src}/include/nvcv/detail/Callback.hpp | 2 +- .../src}/include/nvcv/detail/CastsImpl.hpp | 2 +- .../src}/include/nvcv/detail/CheckError.hpp | 2 +- .../src}/include/nvcv/detail/CompilerUtils.h | 2 +- .../src}/include/nvcv/detail/Concepts.hpp | 2 +- .../src}/include/nvcv/detail/CudaFwd.h | 2 +- .../src}/include/nvcv/detail/FormatUtils.h | 2 +- .../nvcv/detail/ImageBatchDataImpl.hpp | 2 +- .../include/nvcv/detail/ImageBatchImpl.hpp | 2 +- .../include/nvcv/detail/ImageDataImpl.hpp | 2 +- .../src}/include/nvcv/detail/ImageImpl.hpp | 2 +- .../src}/include/nvcv/detail/InPlace.hpp | 2 +- .../include/nvcv/detail/IndexSequence.hpp | 2 +- .../include/nvcv/detail/TensorBatchImpl.hpp | 2 +- .../include/nvcv/detail/TensorDataImpl.hpp | 2 +- .../src}/include/nvcv/detail/TensorImpl.hpp | 2 +- .../src}/include/nvcv/detail/TypeTraits.hpp | 2 +- .../src}/priv/AllocatorManager.hpp | 2 +- src/{nvcv_types => nvcv/src}/priv/Array.cpp | 8 +- src/{nvcv_types => nvcv/src}/priv/Array.hpp | 2 +- .../src}/priv/ArrayManager.hpp | 2 +- .../src}/priv/ArrayWrapData.cpp | 6 +- .../src}/priv/ArrayWrapData.hpp | 2 +- .../src}/priv/Bitfield.hpp | 2 +- .../src}/priv/CMakeLists.txt | 6 +- .../src}/priv/ColorFormat.cpp | 2 +- .../src}/priv/ColorFormat.hpp | 2 +- .../src}/priv/ColorSpec.cpp | 4 +- .../src}/priv/ColorSpec.hpp | 2 +- src/{nvcv_types => nvcv/src}/priv/Context.cpp | 4 +- src/{nvcv_types => nvcv/src}/priv/Context.hpp | 2 +- .../src}/priv/CustomAllocator.cpp | 4 +- .../src}/priv/CustomAllocator.hpp | 2 +- .../src}/priv/DataLayout.cpp | 8 +- .../src}/priv/DataLayout.hpp | 2 +- .../src}/priv/DataType.cpp | 6 +- .../src}/priv/DataType.hpp | 2 +- .../src}/priv/DefaultAllocator.cpp | 4 +- .../src}/priv/DefaultAllocator.hpp | 2 +- .../src}/priv/Exception.cpp | 4 +- .../src}/priv/Exception.hpp | 2 +- .../src}/priv/HandleManager.hpp | 6 +- .../src}/priv/HandleManagerImpl.hpp | 2 +- .../src}/priv/HandleTraits.hpp | 2 +- .../src}/priv/IAllocator.cpp | 4 +- .../src}/priv/IAllocator.hpp | 2 +- src/{nvcv_types => nvcv/src}/priv/IArray.hpp | 2 +- .../src}/priv/IContext.hpp | 2 +- .../src}/priv/ICoreObject.hpp | 2 +- src/{nvcv_types => nvcv/src}/priv/IImage.hpp | 2 +- .../src}/priv/IImageBatch.hpp | 2 +- src/{nvcv_types => nvcv/src}/priv/ITensor.hpp | 2 +- .../src}/priv/ITensorBatch.hpp | 2 +- src/{nvcv_types => nvcv/src}/priv/Image.cpp | 6 +- src/{nvcv_types => nvcv/src}/priv/Image.hpp | 2 +- .../src}/priv/ImageBatchManager.hpp | 2 +- .../src}/priv/ImageBatchVarShape.cpp | 6 +- .../src}/priv/ImageBatchVarShape.hpp | 2 +- .../src}/priv/ImageFormat.cpp | 4 +- .../src}/priv/ImageFormat.hpp | 4 +- .../src}/priv/ImageManager.hpp | 2 +- .../src}/priv/LockFreeStack.hpp | 2 +- .../src}/priv/Requirements.cpp | 4 +- .../src}/priv/Requirements.hpp | 2 +- .../src}/priv/SharedCoreObj.hpp | 2 +- src/{nvcv_types => nvcv/src}/priv/Size.hpp | 4 +- src/{nvcv_types => nvcv/src}/priv/Status.cpp | 4 +- src/{nvcv_types => nvcv/src}/priv/Status.hpp | 2 +- .../src}/priv/SymbolVersioning.hpp | 4 +- src/{nvcv_types => nvcv/src}/priv/TLS.cpp | 2 +- src/{nvcv_types => nvcv/src}/priv/TLS.hpp | 2 +- src/{nvcv_types => nvcv/src}/priv/Tensor.cpp | 8 +- src/{nvcv_types => nvcv/src}/priv/Tensor.hpp | 2 +- .../src}/priv/TensorBatch.cpp | 6 +- .../src}/priv/TensorBatch.hpp | 2 +- .../src}/priv/TensorBatchManager.hpp | 2 +- .../src}/priv/TensorData.cpp | 2 +- .../src}/priv/TensorData.hpp | 2 +- .../src}/priv/TensorLayout.cpp | 4 +- .../src}/priv/TensorLayout.hpp | 2 +- .../src}/priv/TensorManager.hpp | 2 +- .../src}/priv/TensorShape.cpp | 2 +- .../src}/priv/TensorShape.hpp | 4 +- .../src}/priv/TensorWrapDataStrided.cpp | 6 +- .../src}/priv/TensorWrapDataStrided.hpp | 2 +- src/{nvcv_types => nvcv/src}/priv/Version.hpp | 4 +- src/{ => nvcv}/util/Algorithm.hpp | 2 +- src/{ => nvcv}/util/Assert.cpp | 2 +- src/{ => nvcv}/util/Assert.h | 2 +- src/{ => nvcv}/util/CMakeLists.txt | 20 +- src/{ => nvcv}/util/CheckError.cpp | 2 +- src/{ => nvcv}/util/CheckError.hpp | 4 +- src/{ => nvcv}/util/Compat.c.in | 4 +- src/{ => nvcv}/util/Compat.cpp | 2 +- src/{ => nvcv}/util/Compat.h | 2 +- src/{ => nvcv}/util/Compiler.hpp | 2 +- src/{ => nvcv}/util/Math.hpp | 2 +- src/{ => nvcv}/util/Metaprogramming.hpp | 2 +- src/{ => nvcv}/util/Ranges.hpp | 2 +- src/{ => nvcv}/util/SanitizerOptions.c | 2 +- src/{ => nvcv}/util/Size.hpp | 2 +- src/{ => nvcv}/util/StaticVector.hpp | 2 +- src/{ => nvcv}/util/String.cpp | 2 +- src/{ => nvcv}/util/String.hpp | 2 +- src/{ => nvcv}/util/SymbolVersioning.hpp | 6 +- src/{ => nvcv}/util/Version.cpp | 2 +- src/{ => nvcv}/util/Version.hpp | 2 +- src/{ => nvcv}/util/compat_symbols.txt | 2 +- src/{ => nvcv}/util/stubs/.gitattributes | 2 +- src/{ => nvcv}/util/stubs/libdl-2.17_stub.so | 0 .../util/stubs/libpthread-2.17_stub.so | 0 src/{ => nvcv}/util/stubs/librt-2.17_stub.so | 0 tests/CMakeLists.txt | 10 +- tests/common/CMakeLists.txt | 6 +- tests/common/CheckStatus.hpp | 4 +- tests/common/HashMD5.cpp | 4 +- tests/common/HashMD5.hpp | 4 +- tests/common/InterpUtils.hpp | 12 +- tests/common/Printers.cpp | 4 +- tests/common/Printers.hpp | 8 +- .../util => tests/common}/TensorDataUtils.cpp | 2 +- .../util => tests/common}/TensorDataUtils.hpp | 8 +- tests/common/TypeList.hpp | 4 +- tests/cvcuda/python/cvcuda_test_python.in | 2 +- tests/cvcuda/python/cvcuda_util.py | 4 +- tests/cvcuda/python/test_cache.py | 95 ++ tests/cvcuda/python/test_multi_stream.py | 2 +- tests/cvcuda/python/test_opcvtcolor.py | 13 +- tests/cvcuda/python/test_opfindhomography.py | 2 +- tests/cvcuda/python/test_oplabel.py | 2 +- tests/cvcuda/python/test_opresize.py | 39 +- tests/cvcuda/python/test_stream.py | 36 + tests/cvcuda/system/CMakeLists.txt | 4 +- tests/cvcuda/system/ConvUtils.cpp | 14 +- tests/cvcuda/system/FlipUtils.cpp | 12 +- tests/cvcuda/system/ResizeUtils.cpp | 18 +- tests/cvcuda/system/ResizeUtils.hpp | 2 +- .../cvcuda/system/TestOpAdaptiveThreshold.cpp | 10 +- tests/cvcuda/system/TestOpAdvCvtColor.cpp | 8 +- tests/cvcuda/system/TestOpAverageBlur.cpp | 6 +- tests/cvcuda/system/TestOpBilateralFilter.cpp | 10 +- tests/cvcuda/system/TestOpBndBox.cpp | 4 +- tests/cvcuda/system/TestOpBoxBlur.cpp | 4 +- .../system/TestOpBrightnessContrast.cpp | 10 +- tests/cvcuda/system/TestOpCenterCrop.cpp | 4 +- tests/cvcuda/system/TestOpChannelReorder.cpp | 6 +- tests/cvcuda/system/TestOpColorTwist.cpp | 12 +- tests/cvcuda/system/TestOpConv2D.cpp | 4 +- tests/cvcuda/system/TestOpConvertTo.cpp | 6 +- .../TestOpCropFlipNormalizeReformat.cpp | 6 +- tests/cvcuda/system/TestOpCustomCrop.cpp | 4 +- tests/cvcuda/system/TestOpCvtColor.cpp | 6 +- tests/cvcuda/system/TestOpErase.cpp | 4 +- tests/cvcuda/system/TestOpFindHomography.cpp | 16 +- tests/cvcuda/system/TestOpFlip.cpp | 6 +- tests/cvcuda/system/TestOpGammaContrast.cpp | 6 +- tests/cvcuda/system/TestOpGaussian.cpp | 6 +- tests/cvcuda/system/TestOpGaussianNoise.cpp | 8 +- tests/cvcuda/system/TestOpHQResize.cpp | 10 +- tests/cvcuda/system/TestOpHistogram.cpp | 4 +- tests/cvcuda/system/TestOpHistogramEq.cpp | 6 +- tests/cvcuda/system/TestOpInpaint.cpp | 4 +- .../system/TestOpJointBilateralFilter.cpp | 13 +- tests/cvcuda/system/TestOpLabel.cpp | 10 +- tests/cvcuda/system/TestOpLaplacian.cpp | 6 +- tests/cvcuda/system/TestOpMedianBlur.cpp | 2 +- tests/cvcuda/system/TestOpMinAreaRect.cpp | 4 +- tests/cvcuda/system/TestOpMinMaxLoc.cpp | 8 +- tests/cvcuda/system/TestOpMorphology.cpp | 6 +- .../system/TestOpNonMaximumSuppression.cpp | 12 +- tests/cvcuda/system/TestOpNormalize.cpp | 4 +- tests/cvcuda/system/TestOpOSD.cpp | 4 +- tests/cvcuda/system/TestOpPairwiseMatcher.cpp | 6 +- tests/cvcuda/system/TestOpPillowResize.cpp | 4 +- .../cvcuda/system/TestOpRandomResizedCrop.cpp | 8 +- tests/cvcuda/system/TestOpReformat.cpp | 6 +- tests/cvcuda/system/TestOpRemap.cpp | 10 +- tests/cvcuda/system/TestOpResize.cpp | 8 +- .../TestOpResizeCropConvertReformat.cpp | 2 +- tests/cvcuda/system/TestOpRotate.cpp | 6 +- tests/cvcuda/system/TestOpSIFT.cpp | 10 +- tests/cvcuda/system/TestOpStack.cpp | 4 +- tests/cvcuda/system/TestOpThreshold.cpp | 4 +- tests/cvcuda/system/TestOpWarpAffine.cpp | 4 +- tests/cvcuda/system/TestOpWarpPerspective.cpp | 6 +- tests/cvcuda/unit/CMakeLists.txt | 7 +- .../unit/TestPerStreamCache.cpp | 8 +- .../unit/TestSimpleCache.cpp | 4 +- .../unit/TestStreamId.cpp | 4 +- tests/nvcv_types/CMakeLists.txt | 37 +- .../cudatools_system/CMakeLists.txt | 2 +- .../cudatools_system/DeviceAtomics.cu | 6 +- .../DeviceBorderVarShapeWrap.cu | 12 +- .../cudatools_system/DeviceBorderWrap.cu | 14 +- .../cudatools_system/DeviceFullTensorWrap.cu | 12 +- .../cudatools_system/DeviceFullTensorWrap.hpp | 6 +- .../DeviceImageBatchVarShapeWrap.cu | 8 +- .../DeviceImageBatchVarShapeWrap.hpp | 6 +- .../DeviceInterpolationVarShapeWrap.cu | 10 +- .../DeviceInterpolationWrap.cu | 12 +- .../cudatools_system/DeviceMathWrappers.hpp | 4 +- .../cudatools_system/DeviceSaturateCast.cu | 6 +- .../cudatools_system/DeviceTensorBatchWrap.cu | 10 +- .../DeviceTensorBatchWrap.hpp | 4 +- .../cudatools_system/DeviceTensorWrap.cu | 12 +- .../cudatools_system/DeviceTensorWrap.hpp | 8 +- .../cudatools_system/TestArrayWrap.cpp | 6 +- .../cudatools_system/TestAtomics.cpp | 8 +- .../TestBorderVarShapeWrap.cpp | 18 +- .../cudatools_system/TestBorderWrap.cpp | 20 +- .../cudatools_system/TestDropCast.cpp | 6 +- .../cudatools_system/TestFullTensorWrap.cpp | 14 +- .../TestImageBatchVarShapeWrap.cpp | 10 +- .../TestInterpolationVarShapeWrap.cpp | 20 +- .../TestInterpolationWrap.cpp | 16 +- .../cudatools_system/TestLinAlg.cpp | 6 +- .../cudatools_system/TestMathOps.cpp | 6 +- .../cudatools_system/TestMathWrappers.cpp | 8 +- .../cudatools_system/TestMetaprogramming.cpp | 6 +- .../cudatools_system/TestRangeCast.cpp | 8 +- .../cudatools_system/TestSaturateCast.cpp | 8 +- .../cudatools_system/TestStaticCast.cpp | 6 +- .../cudatools_system/TestTensorBatchWrap.cpp | 12 +- .../cudatools_system/TestTensorWrap.cpp | 14 +- .../cudatools_system/TestTypeTraits.cpp | 10 +- .../python/nvcv_test_types_python.in | 2 +- tests/nvcv_types/python/test_cache.py | 118 ++ tests/nvcv_types/python/test_image.py | 26 +- .../python/test_imgbatchvarshape.py | 14 +- tests/nvcv_types/python/test_stream.py | 10 +- tests/nvcv_types/python/test_tensor.py | 27 +- tests/nvcv_types/python/test_tensor_batch.py | 14 +- tests/nvcv_types/standalone/CMakeLists.txt | 40 + .../standalone/TestNVCVStandalone.cpp | 33 + tests/nvcv_types/system/CMakeLists.txt | 4 +- tests/nvcv_types/system/TestAllocatorC.cpp | 2 +- tests/nvcv_types/system/TestArray.cpp | 2 +- tests/nvcv_types/system/TestColorSpec.cpp | 6 +- tests/nvcv_types/system/TestConfig.cpp | 2 +- tests/nvcv_types/system/TestDataLayout.cpp | 4 +- tests/nvcv_types/system/TestImage.cpp | 2 +- tests/nvcv_types/system/TestImageBatch.cpp | 2 +- tests/nvcv_types/system/TestImageFormat.cpp | 8 +- tests/nvcv_types/system/TestRequirements.cpp | 4 +- tests/nvcv_types/system/TestTensor.cpp | 2 +- tests/nvcv_types/system/TestTensorBatch.cpp | 2 +- .../nvcv_types/system/TestTensorDataUtils.cpp | 4 +- tests/nvcv_types/system/TestTensorLayout.cpp | 2 +- tests/nvcv_types/unit/CMakeLists.txt | 5 +- tests/nvcv_types/unit/TestAlgorithm.cpp | 4 +- tests/nvcv_types/unit/TestCheckError.cpp | 4 +- tests/nvcv_types/unit/TestCompat.cpp | 4 +- tests/nvcv_types/unit/TestHandleManager.cpp | 8 +- tests/nvcv_types/unit/TestHandleWrapper.cpp | 8 +- tests/nvcv_types/unit/TestLockFreeStack.cpp | 4 +- tests/nvcv_types/unit/TestMath.cpp | 4 +- tests/nvcv_types/unit/TestMetaprogramming.cpp | 4 +- tests/nvcv_types/unit/TestRange.cpp | 4 +- tests/nvcv_types/unit/TestSharedCoreObj.cpp | 14 +- tests/nvcv_types/unit/TestStaticVector.cpp | 4 +- tests/nvcv_types/unit/TestString.cpp | 6 +- tests/nvcv_types/unit/TestVersion.cpp | 4 +- tests/run_tests.sh.in | 2 +- tools/mkop/CppTest.cpp | 4 +- tools/mkop/PythonWrap.cpp | 2 +- 843 files changed, 5880 insertions(+), 3931 deletions(-) create mode 100644 docs/sphinx/_python_api/nvcv/resource.rst create mode 100644 docs/sphinx/bestpractices.rst create mode 100644 docs/sphinx/nvcvobjectcache.rst create mode 100644 docs/sphinx/python.rst create mode 100644 docs/sphinx/relnotes/v0.10.0-beta.rst delete mode 100644 lint/commitlint.config.js delete mode 100755 lint/copyright_check.sh delete mode 100755 lint/lfs_check.sh create mode 100644 python/mod_nvcv/Definitions.hpp rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/ArrayWrap.hpp (100%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/Atomics.hpp (96%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/BorderVarShapeWrap.hpp (99%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/BorderWrap.hpp (100%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/DropCast.hpp (97%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/FullTensorWrap.hpp (98%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/ImageBatchVarShapeWrap.hpp (99%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/InterpolationVarShapeWrap.hpp (99%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/InterpolationWrap.hpp (99%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/MathOps.hpp (99%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/MathWrappers.hpp (99%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/Printer.hpp (100%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/RangeCast.hpp (98%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/SaturateCast.hpp (97%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/StaticCast.hpp (97%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/TensorBatchWrap.hpp (98%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/TensorWrap.hpp (100%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/TypeTraits.hpp (100%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/detail/MathWrappersImpl.hpp (99%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/detail/Metaprogramming.hpp (98%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/detail/RangeCastImpl.hpp (96%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/detail/SaturateCastImpl.hpp (98%) rename src/{nvcv_types/include/nvcv/cuda => cvcuda/include/cvcuda/cuda_tools}/math/LinAlg.hpp (99%) delete mode 100644 src/cvcuda/priv/legacy/resize.cu create mode 100644 src/cvcuda/util/CMakeLists.txt rename src/{ => cvcuda}/util/Event.cpp (93%) rename src/{ => cvcuda}/util/Event.hpp (96%) rename src/{ => cvcuda}/util/PerStreamCache.hpp (97%) rename src/{ => cvcuda}/util/PerStreamCacheImpl.hpp (98%) rename src/{ => cvcuda}/util/SimpleCache.hpp (97%) rename src/{ => cvcuda}/util/Stream.cpp (95%) rename src/{ => cvcuda}/util/Stream.hpp (96%) rename src/{ => cvcuda}/util/StreamId.cpp (97%) rename src/{ => cvcuda}/util/StreamId.hpp (93%) rename src/{ => cvcuda}/util/UniqueHandle.hpp (98%) create mode 100644 src/nvcv/CMakeLists.txt create mode 100644 src/nvcv/cmake/ConfigBuildTree.cmake create mode 100644 src/nvcv/cmake/ConfigVersion.cmake create mode 100644 src/nvcv/cmake/CreateExportsFile.cmake create mode 100644 src/nvcv/cmake/VersionDef.h.in create mode 100644 src/nvcv/cmake/VersionUtils.h.in rename src/{nvcv_types => nvcv/src}/Allocator.cpp (98%) rename src/{nvcv_types => nvcv/src}/Array.cpp (99%) rename src/{nvcv_types => nvcv/src}/CMakeLists.txt (62%) rename src/{nvcv_types => nvcv/src}/ColorSpec.cpp (99%) rename src/{nvcv_types => nvcv/src}/Config.cpp (97%) rename src/{nvcv_types => nvcv/src}/DataLayout.cpp (98%) rename src/{nvcv_types => nvcv/src}/DataType.cpp (97%) rename src/{nvcv_types => nvcv/src}/Image.cpp (99%) rename src/{nvcv_types => nvcv/src}/ImageBatch.cpp (99%) rename src/{nvcv_types => nvcv/src}/ImageFormat.cpp (99%) rename src/{nvcv_types => nvcv/src}/Requirements.cpp (96%) rename src/{nvcv_types => nvcv/src}/Status.cpp (94%) rename src/{nvcv_types => nvcv/src}/Tensor.cpp (99%) rename src/{nvcv_types => nvcv/src}/TensorBatch.cpp (99%) rename src/{nvcv_types => nvcv/src}/TensorLayout.cpp (96%) rename src/{nvcv_types => nvcv/src}/TensorShape.cpp (90%) rename src/{nvcv_types => nvcv/src}/Version.cpp (87%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Array.h (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Array.hpp (97%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ArrayData.h (97%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ArrayData.hpp (96%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ArrayDataAccess.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/BorderType.h (94%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Casts.hpp (97%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ColorSpec.h (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ColorSpec.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Config.h (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Config.hpp (97%) rename src/{nvcv_types => nvcv/src}/include/nvcv/CoreResource.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/DataLayout.h (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/DataLayout.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/DataType.h (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/DataType.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Exception.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Export.h (91%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Fwd.h (94%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Fwd.hpp (95%) rename src/{nvcv_types => nvcv/src}/include/nvcv/HandleWrapper.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Image.h (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Image.hpp (97%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ImageBatch.h (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ImageBatch.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ImageBatchData.h (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ImageBatchData.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ImageData.h (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ImageData.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ImageFormat.h (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/ImageFormat.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Optional.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Rect.h (90%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Shape.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Size.h (89%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Size.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Status.h (87%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Status.hpp (97%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Tensor.h (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Tensor.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorBatch.h (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorBatch.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorBatchData.h (93%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorBatchData.hpp (97%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorData.h (95%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorData.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorDataAccess.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorLayout.h (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorLayout.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorLayoutDef.inc (93%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorLayoutInfo.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorShape.h (94%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorShape.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/TensorShapeInfo.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/Version.h (92%) rename src/{nvcv_types => nvcv/src}/include/nvcv/alloc/Allocator.h (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/alloc/Allocator.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/alloc/AllocatorImpl.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/alloc/Fwd.h (93%) rename src/{nvcv_types => nvcv/src}/include/nvcv/alloc/Fwd.hpp (88%) rename src/{nvcv_types => nvcv/src}/include/nvcv/alloc/Requirements.h (97%) rename src/{nvcv_types => nvcv/src}/include/nvcv/alloc/Requirements.hpp (97%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/Align.hpp (97%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/ArrayDataImpl.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/ArrayImpl.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/BaseFromMember.hpp (95%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/Callback.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/CastsImpl.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/CheckError.hpp (93%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/CompilerUtils.h (94%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/Concepts.hpp (90%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/CudaFwd.h (85%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/FormatUtils.h (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/ImageBatchDataImpl.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/ImageBatchImpl.hpp (99%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/ImageDataImpl.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/ImageImpl.hpp (97%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/InPlace.hpp (88%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/IndexSequence.hpp (92%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/TensorBatchImpl.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/TensorDataImpl.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/TensorImpl.hpp (98%) rename src/{nvcv_types => nvcv/src}/include/nvcv/detail/TypeTraits.hpp (97%) rename src/{nvcv_types => nvcv/src}/priv/AllocatorManager.hpp (94%) rename src/{nvcv_types => nvcv/src}/priv/Array.cpp (97%) rename src/{nvcv_types => nvcv/src}/priv/Array.hpp (96%) rename src/{nvcv_types => nvcv/src}/priv/ArrayManager.hpp (94%) rename src/{nvcv_types => nvcv/src}/priv/ArrayWrapData.cpp (95%) rename src/{nvcv_types => nvcv/src}/priv/ArrayWrapData.hpp (96%) rename src/{nvcv_types => nvcv/src}/priv/Bitfield.hpp (91%) rename src/{nvcv_types => nvcv/src}/priv/CMakeLists.txt (92%) rename src/{nvcv_types => nvcv/src}/priv/ColorFormat.cpp (91%) rename src/{nvcv_types => nvcv/src}/priv/ColorFormat.hpp (91%) rename src/{nvcv_types => nvcv/src}/priv/ColorSpec.cpp (99%) rename src/{nvcv_types => nvcv/src}/priv/ColorSpec.hpp (97%) rename src/{nvcv_types => nvcv/src}/priv/Context.cpp (94%) rename src/{nvcv_types => nvcv/src}/priv/Context.hpp (96%) rename src/{nvcv_types => nvcv/src}/priv/CustomAllocator.cpp (98%) rename src/{nvcv_types => nvcv/src}/priv/CustomAllocator.hpp (96%) rename src/{nvcv_types => nvcv/src}/priv/DataLayout.cpp (99%) rename src/{nvcv_types => nvcv/src}/priv/DataLayout.hpp (98%) rename src/{nvcv_types => nvcv/src}/priv/DataType.cpp (97%) rename src/{nvcv_types => nvcv/src}/priv/DataType.hpp (95%) rename src/{nvcv_types => nvcv/src}/priv/DefaultAllocator.cpp (97%) rename src/{nvcv_types => nvcv/src}/priv/DefaultAllocator.hpp (95%) rename src/{nvcv_types => nvcv/src}/priv/Exception.cpp (93%) rename src/{nvcv_types => nvcv/src}/priv/Exception.hpp (96%) rename src/{nvcv_types => nvcv/src}/priv/HandleManager.hpp (97%) rename src/{nvcv_types => nvcv/src}/priv/HandleManagerImpl.hpp (99%) rename src/{nvcv_types => nvcv/src}/priv/HandleTraits.hpp (93%) rename src/{nvcv_types => nvcv/src}/priv/IAllocator.cpp (97%) rename src/{nvcv_types => nvcv/src}/priv/IAllocator.hpp (97%) rename src/{nvcv_types => nvcv/src}/priv/IArray.hpp (96%) rename src/{nvcv_types => nvcv/src}/priv/IContext.hpp (96%) rename src/{nvcv_types => nvcv/src}/priv/ICoreObject.hpp (98%) rename src/{nvcv_types => nvcv/src}/priv/IImage.hpp (95%) rename src/{nvcv_types => nvcv/src}/priv/IImageBatch.hpp (96%) rename src/{nvcv_types => nvcv/src}/priv/ITensor.hpp (95%) rename src/{nvcv_types => nvcv/src}/priv/ITensorBatch.hpp (96%) rename src/{nvcv_types => nvcv/src}/priv/Image.cpp (98%) rename src/{nvcv_types => nvcv/src}/priv/Image.hpp (97%) rename src/{nvcv_types => nvcv/src}/priv/ImageBatchManager.hpp (94%) rename src/{nvcv_types => nvcv/src}/priv/ImageBatchVarShape.cpp (99%) rename src/{nvcv_types => nvcv/src}/priv/ImageBatchVarShape.hpp (97%) rename src/{nvcv_types => nvcv/src}/priv/ImageFormat.cpp (99%) rename src/{nvcv_types => nvcv/src}/priv/ImageFormat.hpp (98%) rename src/{nvcv_types => nvcv/src}/priv/ImageManager.hpp (94%) rename src/{nvcv_types => nvcv/src}/priv/LockFreeStack.hpp (98%) rename src/{nvcv_types => nvcv/src}/priv/Requirements.cpp (96%) rename src/{nvcv_types => nvcv/src}/priv/Requirements.hpp (91%) rename src/{nvcv_types => nvcv/src}/priv/SharedCoreObj.hpp (97%) rename src/{nvcv_types => nvcv/src}/priv/Size.hpp (85%) rename src/{nvcv_types => nvcv/src}/priv/Status.cpp (96%) rename src/{nvcv_types => nvcv/src}/priv/Status.hpp (93%) rename src/{nvcv_types => nvcv/src}/priv/SymbolVersioning.hpp (85%) rename src/{nvcv_types => nvcv/src}/priv/TLS.cpp (87%) rename src/{nvcv_types => nvcv/src}/priv/TLS.hpp (96%) rename src/{nvcv_types => nvcv/src}/priv/Tensor.cpp (98%) rename src/{nvcv_types => nvcv/src}/priv/Tensor.hpp (96%) rename src/{nvcv_types => nvcv/src}/priv/TensorBatch.cpp (98%) rename src/{nvcv_types => nvcv/src}/priv/TensorBatch.hpp (98%) rename src/{nvcv_types => nvcv/src}/priv/TensorBatchManager.hpp (94%) rename src/{nvcv_types => nvcv/src}/priv/TensorData.cpp (99%) rename src/{nvcv_types => nvcv/src}/priv/TensorData.hpp (94%) rename src/{nvcv_types => nvcv/src}/priv/TensorLayout.cpp (96%) rename src/{nvcv_types => nvcv/src}/priv/TensorLayout.hpp (93%) rename src/{nvcv_types => nvcv/src}/priv/TensorManager.hpp (94%) rename src/{nvcv_types => nvcv/src}/priv/TensorShape.cpp (93%) rename src/{nvcv_types => nvcv/src}/priv/TensorShape.hpp (86%) rename src/{nvcv_types => nvcv/src}/priv/TensorWrapDataStrided.cpp (96%) rename src/{nvcv_types => nvcv/src}/priv/TensorWrapDataStrided.hpp (95%) rename src/{nvcv_types => nvcv/src}/priv/Version.hpp (86%) rename src/{ => nvcv}/util/Algorithm.hpp (90%) rename src/{ => nvcv}/util/Assert.cpp (90%) rename src/{ => nvcv}/util/Assert.h (96%) rename src/{ => nvcv}/util/CMakeLists.txt (95%) rename src/{ => nvcv}/util/CheckError.cpp (97%) rename src/{ => nvcv}/util/CheckError.hpp (98%) rename src/{ => nvcv}/util/Compat.c.in (99%) rename src/{ => nvcv}/util/Compat.cpp (98%) rename src/{ => nvcv}/util/Compat.h (93%) rename src/{ => nvcv}/util/Compiler.hpp (95%) rename src/{ => nvcv}/util/Math.hpp (97%) rename src/{ => nvcv}/util/Metaprogramming.hpp (88%) rename src/{ => nvcv}/util/Ranges.hpp (95%) rename src/{ => nvcv}/util/SanitizerOptions.c (97%) rename src/{ => nvcv}/util/Size.hpp (90%) rename src/{ => nvcv}/util/StaticVector.hpp (99%) rename src/{ => nvcv}/util/String.cpp (98%) rename src/{ => nvcv}/util/String.hpp (94%) rename src/{ => nvcv}/util/SymbolVersioning.hpp (93%) rename src/{ => nvcv}/util/Version.cpp (89%) rename src/{ => nvcv}/util/Version.hpp (97%) rename src/{ => nvcv}/util/compat_symbols.txt (97%) rename src/{ => nvcv}/util/stubs/.gitattributes (85%) rename src/{ => nvcv}/util/stubs/libdl-2.17_stub.so (100%) rename src/{ => nvcv}/util/stubs/libpthread-2.17_stub.so (100%) rename src/{ => nvcv}/util/stubs/librt-2.17_stub.so (100%) rename {src/util => tests/common}/TensorDataUtils.cpp (99%) rename {src/util => tests/common}/TensorDataUtils.hpp (99%) create mode 100644 tests/cvcuda/python/test_cache.py create mode 100644 tests/cvcuda/python/test_stream.py rename tests/{nvcv_types => cvcuda}/unit/TestPerStreamCache.cpp (98%) rename tests/{nvcv_types => cvcuda}/unit/TestSimpleCache.cpp (93%) rename tests/{nvcv_types => cvcuda}/unit/TestStreamId.cpp (96%) create mode 100644 tests/nvcv_types/python/test_cache.py create mode 100644 tests/nvcv_types/standalone/CMakeLists.txt create mode 100644 tests/nvcv_types/standalone/TestNVCVStandalone.cpp diff --git a/.gitattributes b/.gitattributes index 892050179..a04837f39 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index ae8f44bda..4d1de0382 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -1,3 +1,13 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + name: "CodeQL" on: diff --git a/.gitignore b/.gitignore index 528b9a558..6bcf4789b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/CMakeLists.txt b/CMakeLists.txt index 198a070f4..94260676e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -23,7 +23,7 @@ endif() project(cvcuda LANGUAGES C CXX - VERSION 0.9.0 + VERSION 0.10.0 DESCRIPTION "CUDA-accelerated Computer Vision algorithms" ) diff --git a/LICENSE.md b/LICENSE.md index 0701ae6cf..f6ac8a5f0 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -87,18 +87,18 @@ END OF TERMS AND CONDITIONS # APPENDIX: How to apply the Apache License to your work -To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[ ]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + Copyright (c) [yyyy] [name of copyright owner] - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index fe954e9b7..7a93e25dd 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ [![License](https://img.shields.io/badge/License-Apache_2.0-yellogreen.svg)](https://opensource.org/licenses/Apache-2.0) -![Version](https://img.shields.io/badge/Version-v0.9.0--beta-blue) +![Version](https://img.shields.io/badge/Version-v0.10.0--beta-blue) ![Platform](https://img.shields.io/badge/Platform-linux--64_%7C_win--64_wsl2%7C_aarch64-gray) @@ -61,7 +61,9 @@ To get a local copy up and running follow these steps. - Only one CUDA version (CUDA 11.x or CUDA 12.x) of CV-CUDA packages (Debian packages, tarballs, Python Wheels) can be installed at a time. Please uninstall all packages from a given CUDA version before installing packages from a different version. - Documentation built on Ubuntu 20.04 needs an up-to-date version of sphinx (`pip install --upgrade sphinx`) as well as explicitly parsing the system's default python version ` ./ci/build_docs path/to/build -DPYTHON_VERSIONS=""`. - Python bindings installed via Debian packages and Python tests fail with Numpy 2.0. We recommend using an older version of Numpy (e.g. 1.26) until we have implemented a fix. -- The Resize and RandomResizedCrop operators incorrectly interpolate pixel values near the boundary of an image or tensor when using linear and cubic interpolation. This will be fixed in an upcoming release. +- The Resize and RandomResizedCrop operators incorrectly interpolate pixel values near the boundary of an image or tensor when using cubic interpolation. This will be fixed in an upcoming release. +- Cache/resource management introduced in v0.10 add micro-second-level overhead to Python operator calls. Based on the performance analysis of our Python samples, we expect the production- and pipeline-level impact to be negligible. CUDA kernel and C++ call performance is not affected. We aim to investigate and reduce this overhead further in a future release.​ +- Sporadic Pybind11-deallocation crashes have been reported in long-lasting multi-threaded Python pipelines with externally allocated memory (eg wrapped Pytorch buffers). We are evaluating an upgrade of Pybind11 (currently using 2.10) as a potential fix in an upcoming release. ### Installation diff --git a/bench/BenchAdaptiveThreshold.cpp b/bench/BenchAdaptiveThreshold.cpp index 10fe8570f..98fcac3a6 100644 --- a/bench/BenchAdaptiveThreshold.cpp +++ b/bench/BenchAdaptiveThreshold.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchAverageBlur.cpp b/bench/BenchAverageBlur.cpp index 0736ccd47..4b6cb83b1 100644 --- a/bench/BenchAverageBlur.cpp +++ b/bench/BenchAverageBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchBilateralFilter.cpp b/bench/BenchBilateralFilter.cpp index ff41b9494..9695abb08 100644 --- a/bench/BenchBilateralFilter.cpp +++ b/bench/BenchBilateralFilter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchBrightnessContrast.cpp b/bench/BenchBrightnessContrast.cpp index ea79f5a13..69367649b 100644 --- a/bench/BenchBrightnessContrast.cpp +++ b/bench/BenchBrightnessContrast.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchColorTwist.cpp b/bench/BenchColorTwist.cpp index 1ade029f4..8ce4d139a 100644 --- a/bench/BenchColorTwist.cpp +++ b/bench/BenchColorTwist.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchComposite.cpp b/bench/BenchComposite.cpp index f29f26acf..21852e085 100644 --- a/bench/BenchComposite.cpp +++ b/bench/BenchComposite.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchCopyMakeBorder.cpp b/bench/BenchCopyMakeBorder.cpp index 8d26487a7..1a855975c 100644 --- a/bench/BenchCopyMakeBorder.cpp +++ b/bench/BenchCopyMakeBorder.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchCvtColor.cpp b/bench/BenchCvtColor.cpp index abe1951ea..1058290b3 100644 --- a/bench/BenchCvtColor.cpp +++ b/bench/BenchCvtColor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,51 +21,146 @@ #include -template -inline void CvtColor(nvbench::state &state, nvbench::type_list) +#include +#include +#include + +inline static std::tuple StringToFormats( + const std::string &str) +{ + // clang-format off + static const std::map> codeMap{ + { "RGB2BGR", {NVCV_COLOR_RGB2BGR, NVCV_IMAGE_FORMAT_RGB8, NVCV_IMAGE_FORMAT_BGR8 }}, + { "RGB2RGBA", {NVCV_COLOR_RGB2RGBA, NVCV_IMAGE_FORMAT_RGB8, NVCV_IMAGE_FORMAT_RGBA8}}, + { "RGBA2RGB", {NVCV_COLOR_RGBA2RGB, NVCV_IMAGE_FORMAT_RGBA8, NVCV_IMAGE_FORMAT_RGB8 }}, + { "RGB2GRAY", {NVCV_COLOR_RGB2GRAY, NVCV_IMAGE_FORMAT_RGB8, NVCV_IMAGE_FORMAT_Y8 }}, + { "GRAY2RGB", {NVCV_COLOR_GRAY2RGB, NVCV_IMAGE_FORMAT_Y8, NVCV_IMAGE_FORMAT_RGB8 }}, + { "RGB2HSV", {NVCV_COLOR_RGB2HSV, NVCV_IMAGE_FORMAT_RGB8, NVCV_IMAGE_FORMAT_HSV8 }}, + { "HSV2RGB", {NVCV_COLOR_HSV2RGB, NVCV_IMAGE_FORMAT_HSV8, NVCV_IMAGE_FORMAT_RGB8 }}, + { "RGB2YUV", {NVCV_COLOR_RGB2YUV, NVCV_IMAGE_FORMAT_RGB8, NVCV_IMAGE_FORMAT_YUV8 }}, + { "YUV2RGB", {NVCV_COLOR_YUV2RGB, NVCV_IMAGE_FORMAT_YUV8, NVCV_IMAGE_FORMAT_RGB8 }}, + {"RGB2YUV_NV12", {NVCV_COLOR_RGB2YUV_NV12, NVCV_IMAGE_FORMAT_RGB8, NVCV_IMAGE_FORMAT_NV12 }}, + {"YUV2RGB_NV12", {NVCV_COLOR_YUV2RGB_NV12, NVCV_IMAGE_FORMAT_NV12, NVCV_IMAGE_FORMAT_RGB8 }}, + }; + // clang-format on + + if (auto it = codeMap.find(str); it != codeMap.end()) + { + return it->second; + } + else + { + throw std::invalid_argument("Unrecognized color code"); + } +} + +template +inline float BytesPerPixel(NVCVImageFormat imgFormat) +{ + switch (imgFormat) + { +#define CVCUDA_BYTES_PER_PIXEL_CASE(FORMAT, BYTES) \ + case FORMAT: \ + return BYTES * sizeof(BT) + CVCUDA_BYTES_PER_PIXEL_CASE(NVCV_IMAGE_FORMAT_RGB8, 3); + CVCUDA_BYTES_PER_PIXEL_CASE(NVCV_IMAGE_FORMAT_BGR8, 3); + CVCUDA_BYTES_PER_PIXEL_CASE(NVCV_IMAGE_FORMAT_HSV8, 3); + CVCUDA_BYTES_PER_PIXEL_CASE(NVCV_IMAGE_FORMAT_RGBA8, 4); + CVCUDA_BYTES_PER_PIXEL_CASE(NVCV_IMAGE_FORMAT_YUV8, 3); + CVCUDA_BYTES_PER_PIXEL_CASE(NVCV_IMAGE_FORMAT_NV12, 1.5f); + CVCUDA_BYTES_PER_PIXEL_CASE(NVCV_IMAGE_FORMAT_Y8, 1); +#undef CVCUDA_BYTES_PER_PIXEL_CASE + default: + throw std::invalid_argument("Unrecognized format"); + } +} + +// Adapted from src/util/TensorDataUtils.hpp +inline static nvcv::Tensor CreateTensor(int numImages, int imgWidth, int imgHeight, const nvcv::ImageFormat &imgFormat) +{ + if (imgFormat == NVCV_IMAGE_FORMAT_NV12 || imgFormat == NVCV_IMAGE_FORMAT_NV12_ER + || imgFormat == NVCV_IMAGE_FORMAT_NV21 || imgFormat == NVCV_IMAGE_FORMAT_NV21_ER) + { + int height420 = (imgHeight * 3) / 2; + if (height420 % 3 != 0 || imgWidth % 2 != 0) + { + throw std::invalid_argument("Invalid height"); + } + + return nvcv::Tensor(numImages, {imgWidth, height420}, nvcv::ImageFormat(NVCV_IMAGE_FORMAT_Y8)); + } + else + { + return nvcv::Tensor(numImages, {imgWidth, imgHeight}, imgFormat); + } +} + +template +inline void CvtColor(nvbench::state &state, nvbench::type_list) try { long3 shape = benchutils::GetShape<3>(state.get_string("shape")); long varShape = state.get_int64("varShape"); + std::tuple formats + = StringToFormats(state.get_string("code")); - using BT = typename nvcv::cuda::BaseType; - - int ch = nvcv::cuda::NumElements; + NVCVColorConversionCode code = std::get<0>(formats); + nvcv::ImageFormat inFormat{std::get<1>(formats)}; + nvcv::ImageFormat outFormat{std::get<2>(formats)}; - NVCVColorConversionCode code = ch == 3 ? NVCV_COLOR_BGR2RGB : NVCV_COLOR_BGRA2RGBA; - - state.add_global_memory_reads(shape.x * shape.y * shape.z * sizeof(T)); - state.add_global_memory_writes(shape.x * shape.y * shape.z * sizeof(T)); + state.add_global_memory_reads(shape.x * shape.y * shape.z * BytesPerPixel(inFormat)); + state.add_global_memory_writes(shape.x * shape.y * shape.z * BytesPerPixel(outFormat)); cvcuda::CvtColor op; - // clang-format off - if (varShape < 0) // negative var shape means use Tensor { - nvcv::Tensor src({{shape.x, shape.y, shape.z, ch}, "NHWC"}, benchutils::GetDataType()); - nvcv::Tensor dst({{shape.x, shape.y, shape.z, ch}, "NHWC"}, benchutils::GetDataType()); + nvcv::Tensor src = CreateTensor(shape.x, shape.z, shape.y, inFormat); + nvcv::Tensor dst = CreateTensor(shape.x, shape.z, shape.y, outFormat); benchutils::FillTensor(src, benchutils::RandomValues()); - state.exec(nvbench::exec_tag::sync, [&op, &src, &dst, &code](nvbench::launch &launch) - { - op(launch.get_stream(), src, dst, code); - }); + state.exec(nvbench::exec_tag::sync, + [&op, &src, &dst, &code](nvbench::launch &launch) { op(launch.get_stream(), src, dst, code); }); } else // zero and positive var shape means use ImageBatchVarShape { - nvcv::ImageBatchVarShape src(shape.x); - nvcv::ImageBatchVarShape dst(shape.x); + if (inFormat.chromaSubsampling() != nvcv::ChromaSubsampling::CSS_444 + || outFormat.chromaSubsampling() != nvcv::ChromaSubsampling::CSS_444) + { + state.skip("Skipping formats that have subsampled planes for the varshape benchmark"); + } + + std::vector imgSrc; + std::vector imgDst; + nvcv::ImageBatchVarShape src(shape.x); + nvcv::ImageBatchVarShape dst(shape.x); + std::vector> srcVec(shape.x); - benchutils::FillImageBatch(src, long2{shape.z, shape.y}, long2{varShape, varShape}, - benchutils::RandomValues()); - dst.pushBack(src.begin(), src.end()); + auto randomValuesU8 = benchutils::RandomValues(); - state.exec(nvbench::exec_tag::sync, [&op, &src, &dst, &code](nvbench::launch &launch) + for (int i = 0; i < shape.x; i++) { - op(launch.get_stream(), src, dst, code); - }); + imgSrc.emplace_back(nvcv::Size2D{(int)shape.z, (int)shape.y}, inFormat); + imgDst.emplace_back(nvcv::Size2D{(int)shape.z, (int)shape.y}, outFormat); + + int srcRowStride = imgSrc[i].size().w * inFormat.planePixelStrideBytes(0); + int srcBufSize = imgSrc[i].size().h * srcRowStride; + srcVec[i].resize(srcBufSize); + for (int idx = 0; idx < srcBufSize; idx++) + { + srcVec[i][idx] = randomValuesU8(); + } + + auto imgData = imgSrc[i].exportData(); + CUDA_CHECK_ERROR(cudaMemcpy2D(imgData->plane(0).basePtr, imgData->plane(0).rowStride, srcVec[i].data(), + srcRowStride, srcRowStride, imgSrc[i].size().h, cudaMemcpyHostToDevice)); + } + src.pushBack(imgSrc.begin(), imgSrc.end()); + dst.pushBack(imgDst.begin(), imgDst.end()); + + state.exec(nvbench::exec_tag::sync, + [&op, &src, &dst, &code](nvbench::launch &launch) { op(launch.get_stream(), src, dst, code); }); } } catch (const std::exception &err) @@ -73,11 +168,11 @@ catch (const std::exception &err) state.skip(err.what()); } -// clang-format on - -using CvtColorTypes = nvbench::type_list; +using BaseTypes = nvbench::type_list; -NVBENCH_BENCH_TYPES(CvtColor, NVBENCH_TYPE_AXES(CvtColorTypes)) - .set_type_axes_names({"InOutDataType"}) - .add_string_axis("shape", {"1x1080x1920"}) +NVBENCH_BENCH_TYPES(CvtColor, NVBENCH_TYPE_AXES(BaseTypes)) + .set_type_axes_names({"BaseType"}) + .add_string_axis("shape", {"1x1080x1920", "64x720x1280"}) + .add_string_axis("code", {"RGB2BGR", "RGB2RGBA", "RGBA2RGB", "RGB2GRAY", "GRAY2RGB", "RGB2HSV", "HSV2RGB", + "RGB2YUV", "YUV2RGB", "RGB2YUV_NV12", "YUV2RGB_NV12"}) .add_int64_axis("varShape", {-1, 0}); diff --git a/bench/BenchErase.cpp b/bench/BenchErase.cpp index 2bb504d2b..8a0843d7e 100644 --- a/bench/BenchErase.cpp +++ b/bench/BenchErase.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchFlip.cpp b/bench/BenchFlip.cpp index 9c052f62a..11ca80f90 100644 --- a/bench/BenchFlip.cpp +++ b/bench/BenchFlip.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchGaussian.cpp b/bench/BenchGaussian.cpp index a1976581d..2f534207f 100644 --- a/bench/BenchGaussian.cpp +++ b/bench/BenchGaussian.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchGaussianNoise.cpp b/bench/BenchGaussianNoise.cpp index 09dcd04e4..3806e7524 100644 --- a/bench/BenchGaussianNoise.cpp +++ b/bench/BenchGaussianNoise.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchHistogramEq.cpp b/bench/BenchHistogramEq.cpp index 74bcb9d46..0af7ea793 100644 --- a/bench/BenchHistogramEq.cpp +++ b/bench/BenchHistogramEq.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchInpaint.cpp b/bench/BenchInpaint.cpp index ed6dbd055..19ae66060 100644 --- a/bench/BenchInpaint.cpp +++ b/bench/BenchInpaint.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchJointBilateralFilter.cpp b/bench/BenchJointBilateralFilter.cpp index 2aa748048..c9b8472e1 100644 --- a/bench/BenchJointBilateralFilter.cpp +++ b/bench/BenchJointBilateralFilter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchLabel.cpp b/bench/BenchLabel.cpp index 5e1870f50..c5cd7b218 100644 --- a/bench/BenchLabel.cpp +++ b/bench/BenchLabel.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchLaplacian.cpp b/bench/BenchLaplacian.cpp index 7956d8c22..1b59dc8e2 100644 --- a/bench/BenchLaplacian.cpp +++ b/bench/BenchLaplacian.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchMedianBlur.cpp b/bench/BenchMedianBlur.cpp index 0520f5f26..45ab427d6 100644 --- a/bench/BenchMedianBlur.cpp +++ b/bench/BenchMedianBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchMinMaxLoc.cpp b/bench/BenchMinMaxLoc.cpp index 40e8385bf..7f1b28980 100644 --- a/bench/BenchMinMaxLoc.cpp +++ b/bench/BenchMinMaxLoc.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchMorphology.cpp b/bench/BenchMorphology.cpp index f357dbffb..dffb0b6b4 100644 --- a/bench/BenchMorphology.cpp +++ b/bench/BenchMorphology.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchNormalize.cpp b/bench/BenchNormalize.cpp index 9e7cc09e6..ae7003c4d 100644 --- a/bench/BenchNormalize.cpp +++ b/bench/BenchNormalize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchPillowResize.cpp b/bench/BenchPillowResize.cpp index 1340a9f26..6dd5ae67d 100644 --- a/bench/BenchPillowResize.cpp +++ b/bench/BenchPillowResize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchRandomResizedCrop.cpp b/bench/BenchRandomResizedCrop.cpp index 661a5e42c..55161a982 100644 --- a/bench/BenchRandomResizedCrop.cpp +++ b/bench/BenchRandomResizedCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchRemap.cpp b/bench/BenchRemap.cpp index 3f3825c82..4f35e016d 100644 --- a/bench/BenchRemap.cpp +++ b/bench/BenchRemap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchResize.cpp b/bench/BenchResize.cpp index b8fb517a0..9cb608bc9 100644 --- a/bench/BenchResize.cpp +++ b/bench/BenchResize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchResizeCropConvertReformat.cpp b/bench/BenchResizeCropConvertReformat.cpp index f058a10e0..b9e9e3125 100644 --- a/bench/BenchResizeCropConvertReformat.cpp +++ b/bench/BenchResizeCropConvertReformat.cpp @@ -18,7 +18,7 @@ #include "BenchUtils.hpp" #include -#include +#include #include diff --git a/bench/BenchRotate.cpp b/bench/BenchRotate.cpp index bfd58527b..00d905210 100644 --- a/bench/BenchRotate.cpp +++ b/bench/BenchRotate.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchThreshold.cpp b/bench/BenchThreshold.cpp index 1c87a7995..bfc438623 100644 --- a/bench/BenchThreshold.cpp +++ b/bench/BenchThreshold.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchUtils.hpp b/bench/BenchUtils.hpp index 3875928ee..2fddd2cef 100644 --- a/bench/BenchUtils.hpp +++ b/bench/BenchUtils.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,15 +19,15 @@ #define CVCUDA_BENCH_UTILS_HPP #include +#include +#include +#include #include #include #include #include #include #include -#include -#include -#include #include #include diff --git a/bench/BenchWarpAffine.cpp b/bench/BenchWarpAffine.cpp index a028e28b9..e103051e8 100644 --- a/bench/BenchWarpAffine.cpp +++ b/bench/BenchWarpAffine.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/BenchWarpPerspective.cpp b/bench/BenchWarpPerspective.cpp index f18108e87..32c47fea0 100644 --- a/bench/BenchWarpPerspective.cpp +++ b/bench/BenchWarpPerspective.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/CMakeLists.txt b/bench/CMakeLists.txt index a685f08ab..c38328e03 100644 --- a/bench/CMakeLists.txt +++ b/bench/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_adaptivethreshold.py b/bench/python/all_ops/op_adaptivethreshold.py index ddc316cc6..1d7b09fbc 100644 --- a/bench/python/all_ops/op_adaptivethreshold.py +++ b/bench/python/all_ops/op_adaptivethreshold.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_averageblur.py b/bench/python/all_ops/op_averageblur.py index cf591e3f1..e6dee83eb 100644 --- a/bench/python/all_ops/op_averageblur.py +++ b/bench/python/all_ops/op_averageblur.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_blurbox.py b/bench/python/all_ops/op_blurbox.py index 8f24740d5..ea37e00ba 100644 --- a/bench/python/all_ops/op_blurbox.py +++ b/bench/python/all_ops/op_blurbox.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_boundingbox.py b/bench/python/all_ops/op_boundingbox.py index 5b9f1ba3d..cc08e9703 100644 --- a/bench/python/all_ops/op_boundingbox.py +++ b/bench/python/all_ops/op_boundingbox.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_brightnesscontrast.py b/bench/python/all_ops/op_brightnesscontrast.py index 1cd38e679..58d9cb833 100644 --- a/bench/python/all_ops/op_brightnesscontrast.py +++ b/bench/python/all_ops/op_brightnesscontrast.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_centercrop.py b/bench/python/all_ops/op_centercrop.py index 907c31cf0..e70e964fc 100644 --- a/bench/python/all_ops/op_centercrop.py +++ b/bench/python/all_ops/op_centercrop.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_composite.py b/bench/python/all_ops/op_composite.py index d42e5063b..2ef027469 100644 --- a/bench/python/all_ops/op_composite.py +++ b/bench/python/all_ops/op_composite.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_convertto.py b/bench/python/all_ops/op_convertto.py index 48e4fa21c..443db43e9 100644 --- a/bench/python/all_ops/op_convertto.py +++ b/bench/python/all_ops/op_convertto.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_copymakeborder.py b/bench/python/all_ops/op_copymakeborder.py index 2f57475d0..39c79d5e8 100644 --- a/bench/python/all_ops/op_copymakeborder.py +++ b/bench/python/all_ops/op_copymakeborder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_customcrop.py b/bench/python/all_ops/op_customcrop.py index 0618a4821..4cb31c2fd 100644 --- a/bench/python/all_ops/op_customcrop.py +++ b/bench/python/all_ops/op_customcrop.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_cvtcolor.py b/bench/python/all_ops/op_cvtcolor.py index 6eafee402..35a7322fe 100644 --- a/bench/python/all_ops/op_cvtcolor.py +++ b/bench/python/all_ops/op_cvtcolor.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_flip.py b/bench/python/all_ops/op_flip.py index d93a1c148..0e0a1eb51 100644 --- a/bench/python/all_ops/op_flip.py +++ b/bench/python/all_ops/op_flip.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_gaussianblur.py b/bench/python/all_ops/op_gaussianblur.py index cd306ec93..45f57eda3 100644 --- a/bench/python/all_ops/op_gaussianblur.py +++ b/bench/python/all_ops/op_gaussianblur.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_hqresize.py b/bench/python/all_ops/op_hqresize.py index a5514ab72..765256873 100644 --- a/bench/python/all_ops/op_hqresize.py +++ b/bench/python/all_ops/op_hqresize.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_inpaint.py b/bench/python/all_ops/op_inpaint.py index c2419545b..6fa8af553 100644 --- a/bench/python/all_ops/op_inpaint.py +++ b/bench/python/all_ops/op_inpaint.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_jointbilateral.py b/bench/python/all_ops/op_jointbilateral.py index 99b0cc0f7..9b9e694e3 100644 --- a/bench/python/all_ops/op_jointbilateral.py +++ b/bench/python/all_ops/op_jointbilateral.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_laplacian.py b/bench/python/all_ops/op_laplacian.py index ee9d4b75a..829268aed 100644 --- a/bench/python/all_ops/op_laplacian.py +++ b/bench/python/all_ops/op_laplacian.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_morphology.py b/bench/python/all_ops/op_morphology.py index f13434e05..10744b3d1 100644 --- a/bench/python/all_ops/op_morphology.py +++ b/bench/python/all_ops/op_morphology.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_nms.py b/bench/python/all_ops/op_nms.py index dd9abfa9f..c73def6c5 100644 --- a/bench/python/all_ops/op_nms.py +++ b/bench/python/all_ops/op_nms.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_normalize.py b/bench/python/all_ops/op_normalize.py index a17fd296f..fa7b3eddf 100644 --- a/bench/python/all_ops/op_normalize.py +++ b/bench/python/all_ops/op_normalize.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_randomresizedcrop.py b/bench/python/all_ops/op_randomresizedcrop.py index 0dc1f5c03..7da248a42 100644 --- a/bench/python/all_ops/op_randomresizedcrop.py +++ b/bench/python/all_ops/op_randomresizedcrop.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_reformat.py b/bench/python/all_ops/op_reformat.py index eb4c2ddc8..016b4acb7 100644 --- a/bench/python/all_ops/op_reformat.py +++ b/bench/python/all_ops/op_reformat.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_remap.py b/bench/python/all_ops/op_remap.py index 31175d66e..9e6b94d29 100644 --- a/bench/python/all_ops/op_remap.py +++ b/bench/python/all_ops/op_remap.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_reshape.py b/bench/python/all_ops/op_reshape.py index 37bc63950..438aa1af6 100644 --- a/bench/python/all_ops/op_reshape.py +++ b/bench/python/all_ops/op_reshape.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_resize.py b/bench/python/all_ops/op_resize.py index 0a3d4fcf5..b60abe92f 100644 --- a/bench/python/all_ops/op_resize.py +++ b/bench/python/all_ops/op_resize.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_resize_crop_convert_reformat.py b/bench/python/all_ops/op_resize_crop_convert_reformat.py index 46001f068..32dccfbc7 100644 --- a/bench/python/all_ops/op_resize_crop_convert_reformat.py +++ b/bench/python/all_ops/op_resize_crop_convert_reformat.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_rotate.py b/bench/python/all_ops/op_rotate.py index b7d0697ee..9681f67e1 100644 --- a/bench/python/all_ops/op_rotate.py +++ b/bench/python/all_ops/op_rotate.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_sift.py b/bench/python/all_ops/op_sift.py index 1d0e23567..724cdd2af 100644 --- a/bench/python/all_ops/op_sift.py +++ b/bench/python/all_ops/op_sift.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_threshold.py b/bench/python/all_ops/op_threshold.py index 6cd277fc3..70208114b 100644 --- a/bench/python/all_ops/op_threshold.py +++ b/bench/python/all_ops/op_threshold.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_warpaffine.py b/bench/python/all_ops/op_warpaffine.py index 9a4f062b1..5130e3953 100644 --- a/bench/python/all_ops/op_warpaffine.py +++ b/bench/python/all_ops/op_warpaffine.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/all_ops/op_warpperspective.py b/bench/python/all_ops/op_warpperspective.py index c73ae25d2..af3bced1f 100644 --- a/bench/python/all_ops/op_warpperspective.py +++ b/bench/python/all_ops/op_warpperspective.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/assets/NOTICE.md b/bench/python/assets/NOTICE.md index 7dd764391..acde2a72e 100644 --- a/bench/python/assets/NOTICE.md +++ b/bench/python/assets/NOTICE.md @@ -1,5 +1,5 @@ -[//]: # "SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved." +[//]: # "SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved." [//]: # "SPDX-License-Identifier: Apache-2.0" [//]: # "" [//]: # "Licensed under the Apache License, Version 2.0 (the 'License');" diff --git a/bench/python/bench_utils.py b/bench/python/bench_utils.py index 23578438e..36874890b 100644 --- a/bench/python/bench_utils.py +++ b/bench/python/bench_utils.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/bench/python/run_bench.py b/bench/python/run_bench.py index 8c00b43c1..9bab6c1e7 100644 --- a/bench/python/run_bench.py +++ b/bench/python/run_bench.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/ci/build.sh b/ci/build.sh index b5114d3cd..668352920 100755 --- a/ci/build.sh +++ b/ci/build.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -62,6 +62,12 @@ mkdir -p "$build_dir" # Set build configuration cmake_args="-DBUILD_TESTS=1" +if [[ "$ENABLE_SANITIZER" == 'true' || "$ENABLE_SANITIZER" == '1' ]]; then + cmake_args="$cmake_args -DENABLE_SANITIZER=ON" +else + cmake_args="$cmake_args -DENABLE_SANITIZER=OFF" +fi + # Python build configuration if [[ "$ENABLE_PYTHON" == '0' || "$ENABLE_PYTHON" == 'no' ]]; then cmake_args="$cmake_args -DBUILD_PYTHON=0" diff --git a/ci/build_docs.sh b/ci/build_docs.sh index addfb8e02..3d5b04794 100755 --- a/ci/build_docs.sh +++ b/ci/build_docs.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/ci/build_samples.sh b/ci/build_samples.sh index 27b5c383c..013cd6301 100755 --- a/ci/build_samples.sh +++ b/ci/build_samples.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cmake/ConfigCPack.cmake b/cmake/ConfigCPack.cmake index e0bec6ada..4790ef918 100644 --- a/cmake/ConfigCPack.cmake +++ b/cmake/ConfigCPack.cmake @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cmake/ConfigCUDA.cmake b/cmake/ConfigCUDA.cmake index 88a2707c5..319a157eb 100644 --- a/cmake/ConfigCUDA.cmake +++ b/cmake/ConfigCUDA.cmake @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,6 +32,9 @@ set(CMAKE_CUDA_STANDARD ${CMAKE_CXX_STANDARD}) # Compress kernels to generate smaller executables set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xfatbin=--compress-all") +# Enable device lambdas +set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --extended-lambda") + if(NOT USE_CMAKE_CUDA_ARCHITECTURES) set(CMAKE_CUDA_ARCHITECTURES "$ENV{CUDAARCHS}") diff --git a/cmake/ConfigCompiler.cmake b/cmake/ConfigCompiler.cmake index b011ace1b..b75165e09 100644 --- a/cmake/ConfigCompiler.cmake +++ b/cmake/ConfigCompiler.cmake @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cmake/ConfigVersion.cmake b/cmake/ConfigVersion.cmake index c256e9442..f0e98a2f4 100644 --- a/cmake/ConfigVersion.cmake +++ b/cmake/ConfigVersion.cmake @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/cmake/GetGitRevisionDescription.cmake b/cmake/GetGitRevisionDescription.cmake index b18506492..2aec0dc1b 100644 --- a/cmake/GetGitRevisionDescription.cmake +++ b/cmake/GetGitRevisionDescription.cmake @@ -167,7 +167,7 @@ function(git_local_changes _var) endif() endfunction() -# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES # SPDX-License-Identifier: Apache-2.0 # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual diff --git a/cmake/InstallTests.cmake b/cmake/InstallTests.cmake index ff34de54a..2f35998e0 100644 --- a/cmake/InstallTests.cmake +++ b/cmake/InstallTests.cmake @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/build20.04/Dockerfile b/docker/build20.04/Dockerfile index d689ce3ea..d99223968 100644 --- a/docker/build20.04/Dockerfile +++ b/docker/build20.04/Dockerfile @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/build20.04/ccache.conf b/docker/build20.04/ccache.conf index 3ea1d6a35..1fb25208b 100644 --- a/docker/build20.04/ccache.conf +++ b/docker/build20.04/ccache.conf @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/build20.04/deadsnakes-ubuntu-ppa-focal.list b/docker/build20.04/deadsnakes-ubuntu-ppa-focal.list index b9cba6f58..2cdc0fb1d 100644 --- a/docker/build20.04/deadsnakes-ubuntu-ppa-focal.list +++ b/docker/build20.04/deadsnakes-ubuntu-ppa-focal.list @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/build22.04/Dockerfile b/docker/build22.04/Dockerfile index e974b5cd0..216860511 100644 --- a/docker/build22.04/Dockerfile +++ b/docker/build22.04/Dockerfile @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/build22.04/ccache.conf b/docker/build22.04/ccache.conf index 3ea1d6a35..1fb25208b 100644 --- a/docker/build22.04/ccache.conf +++ b/docker/build22.04/ccache.conf @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/build22.04/deadsnakes-ubuntu-ppa-jammy.list b/docker/build22.04/deadsnakes-ubuntu-ppa-jammy.list index b60c2e7b4..8fac5bd8a 100644 --- a/docker/build22.04/deadsnakes-ubuntu-ppa-jammy.list +++ b/docker/build22.04/deadsnakes-ubuntu-ppa-jammy.list @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/config b/docker/config index 563a3a99c..d597b477b 100644 --- a/docker/config +++ b/docker/config @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/devel20.04/Dockerfile b/docker/devel20.04/Dockerfile index 5d7fd499e..d71475ded 100644 --- a/docker/devel20.04/Dockerfile +++ b/docker/devel20.04/Dockerfile @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/devel20.04/gdbinit b/docker/devel20.04/gdbinit index 9ba78c2dc..babb83389 100644 --- a/docker/devel20.04/gdbinit +++ b/docker/devel20.04/gdbinit @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/devel20.04/vimrc b/docker/devel20.04/vimrc index 59a3426ac..2320f4f50 100644 --- a/docker/devel20.04/vimrc +++ b/docker/devel20.04/vimrc @@ -1,4 +1,4 @@ -" SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +" SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. " SPDX-License-Identifier: Apache-2.0 " " NVIDIA CORPORATION, its affiliates and licensors retain all intellectual diff --git a/docker/devel22.04/Dockerfile b/docker/devel22.04/Dockerfile index 55b652779..51f9c9abe 100644 --- a/docker/devel22.04/Dockerfile +++ b/docker/devel22.04/Dockerfile @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/devel22.04/gdbinit b/docker/devel22.04/gdbinit index 9ba78c2dc..babb83389 100644 --- a/docker/devel22.04/gdbinit +++ b/docker/devel22.04/gdbinit @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/devel22.04/vimrc b/docker/devel22.04/vimrc index 59a3426ac..2320f4f50 100644 --- a/docker/devel22.04/vimrc +++ b/docker/devel22.04/vimrc @@ -1,4 +1,4 @@ -" SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +" SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. " SPDX-License-Identifier: Apache-2.0 " " NVIDIA CORPORATION, its affiliates and licensors retain all intellectual diff --git a/docker/env_devel_linux.sh b/docker/env_devel_linux.sh index 0c16ee742..f031be358 100755 --- a/docker/env_devel_linux.sh +++ b/docker/env_devel_linux.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/samples/Dockerfile b/docker/samples/Dockerfile index 0a8e70f65..89a6b902b 100644 --- a/docker/samples/Dockerfile +++ b/docker/samples/Dockerfile @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/test20.04/Dockerfile b/docker/test20.04/Dockerfile index edd979a1b..b139a2f6a 100644 --- a/docker/test20.04/Dockerfile +++ b/docker/test20.04/Dockerfile @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/test20.04/deadsnakes-ubuntu-ppa-focal.list b/docker/test20.04/deadsnakes-ubuntu-ppa-focal.list index b9cba6f58..2cdc0fb1d 100644 --- a/docker/test20.04/deadsnakes-ubuntu-ppa-focal.list +++ b/docker/test20.04/deadsnakes-ubuntu-ppa-focal.list @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/test22.04/Dockerfile b/docker/test22.04/Dockerfile index 63b0d4a08..a23c61889 100644 --- a/docker/test22.04/Dockerfile +++ b/docker/test22.04/Dockerfile @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/test22.04/deadsnakes-ubuntu-ppa-jammy.list b/docker/test22.04/deadsnakes-ubuntu-ppa-jammy.list index b60c2e7b4..8fac5bd8a 100644 --- a/docker/test22.04/deadsnakes-ubuntu-ppa-jammy.list +++ b/docker/test22.04/deadsnakes-ubuntu-ppa-jammy.list @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/update_build_image.sh b/docker/update_build_image.sh index 32e4eab3f..2a5cb4494 100755 --- a/docker/update_build_image.sh +++ b/docker/update_build_image.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/update_devel_image.sh b/docker/update_devel_image.sh index aa7504149..1bc93a10b 100755 --- a/docker/update_devel_image.sh +++ b/docker/update_devel_image.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/update_samples_image.sh b/docker/update_samples_image.sh index 6dbfc907b..7623cffd3 100755 --- a/docker/update_samples_image.sh +++ b/docker/update_samples_image.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docker/update_test_image.sh b/docker/update_test_image.sh index c69598d32..6ac9564b3 100755 --- a/docker/update_test_image.sh +++ b/docker/update_test_image.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/Doxyfile.in b/docs/Doxyfile.in index 5c9873453..fe31051a7 100644 --- a/docs/Doxyfile.in +++ b/docs/Doxyfile.in @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/_python_api/nvcv/cache.rst b/docs/sphinx/_python_api/nvcv/cache.rst index ee48d9df9..720328798 100644 --- a/docs/sphinx/_python_api/nvcv/cache.rst +++ b/docs/sphinx/_python_api/nvcv/cache.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,4 +19,4 @@ Cache .. automodule:: nvcv :noindex: - :members: cache_size, clear_cache + :members: cache_size, clear_cache, get_cache_limit_inbytes, set_cache_limit_inbytes, current_cache_size_inbytes diff --git a/docs/sphinx/_python_api/nvcv/colorspec.rst b/docs/sphinx/_python_api/nvcv/colorspec.rst index 7344dac50..b8c1832b8 100644 --- a/docs/sphinx/_python_api/nvcv/colorspec.rst +++ b/docs/sphinx/_python_api/nvcv/colorspec.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/_python_api/nvcv/format.rst b/docs/sphinx/_python_api/nvcv/format.rst index d51f8fd21..923799f69 100644 --- a/docs/sphinx/_python_api/nvcv/format.rst +++ b/docs/sphinx/_python_api/nvcv/format.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/_python_api/nvcv/image.rst b/docs/sphinx/_python_api/nvcv/image.rst index cd5f3dfa7..b7379b4f1 100644 --- a/docs/sphinx/_python_api/nvcv/image.rst +++ b/docs/sphinx/_python_api/nvcv/image.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/_python_api/nvcv/imagebatch.rst b/docs/sphinx/_python_api/nvcv/imagebatch.rst index 17054ce96..123b114a7 100644 --- a/docs/sphinx/_python_api/nvcv/imagebatch.rst +++ b/docs/sphinx/_python_api/nvcv/imagebatch.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/_python_api/nvcv/recti.rst b/docs/sphinx/_python_api/nvcv/recti.rst index e170207e0..d7f0a1b33 100644 --- a/docs/sphinx/_python_api/nvcv/recti.rst +++ b/docs/sphinx/_python_api/nvcv/recti.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/_python_api/nvcv/resource.rst b/docs/sphinx/_python_api/nvcv/resource.rst new file mode 100644 index 000000000..cd2fbb2cd --- /dev/null +++ b/docs/sphinx/_python_api/nvcv/resource.rst @@ -0,0 +1,22 @@ +.. + # SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-License-Identifier: Apache-2.0 + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +Resource +======== + +.. automodule:: nvcv + :noindex: + :members: Resource diff --git a/docs/sphinx/_python_api/nvcv/tensor.rst b/docs/sphinx/_python_api/nvcv/tensor.rst index fdd3f4915..12e9dde96 100644 --- a/docs/sphinx/_python_api/nvcv/tensor.rst +++ b/docs/sphinx/_python_api/nvcv/tensor.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/_python_api/nvcv/tensorbatch.rst b/docs/sphinx/_python_api/nvcv/tensorbatch.rst index 636442dc5..90ee0e13d 100644 --- a/docs/sphinx/_python_api/nvcv/tensorbatch.rst +++ b/docs/sphinx/_python_api/nvcv/tensorbatch.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/_python_api/template.rst b/docs/sphinx/_python_api/template.rst index 5ca35a3bc..1e04d0b2d 100644 --- a/docs/sphinx/_python_api/template.rst +++ b/docs/sphinx/_python_api/template.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/bestpractices.rst b/docs/sphinx/bestpractices.rst new file mode 100644 index 000000000..b5c4cd80a --- /dev/null +++ b/docs/sphinx/bestpractices.rst @@ -0,0 +1,28 @@ +.. + # SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-License-Identifier: Apache-2.0 + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +.. _bestpractices: + + +Best Practices +-------------- + +This guide covers best practices of CV-CUDA. + +.. toctree:: + :maxdepth: 1 + + python diff --git a/docs/sphinx/gen_py_doc_rsts.py b/docs/sphinx/gen_py_doc_rsts.py index b48eccba3..320f97600 100644 --- a/docs/sphinx/gen_py_doc_rsts.py +++ b/docs/sphinx/gen_py_doc_rsts.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -53,11 +53,13 @@ def get_name_of_def(s: str) -> str: return re.findall('"([^"]*)"', s)[0] -def has_exports(file_path, export_calls): - for call in export_calls: - if call in open(file_path).read(): - export_calls.remove(call) - return True +def has_exports(file_path: str, export_calls: List[str]) -> bool: + with open(file_path, "r") as file_str: + file_str_read = file_str.read() + for call in export_calls: + if call in file_str_read: + export_calls.remove(call) + return True return False diff --git a/docs/sphinx/generate_groups.py b/docs/sphinx/generate_groups.py index 8ab19ba2b..7e87ae436 100644 --- a/docs/sphinx/generate_groups.py +++ b/docs/sphinx/generate_groups.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/getting_started.rst b/docs/sphinx/getting_started.rst index f7489ab4d..ce1fd4f1f 100644 --- a/docs/sphinx/getting_started.rst +++ b/docs/sphinx/getting_started.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,11 +19,12 @@ Getting Started =============== -This section guides you step-by-step towards building different types of Computer Vision and Deep Learning pipelines accelerated on the GPU using the CV-CUDA APIs. Before getting started, please review the :ref:`prerequisites`. Once reviewed, head over to the :ref:`samples`' section which showcases various CV-CUDA samples. +This section guides you step-by-step towards building different types of Computer Vision and Deep Learning pipelines accelerated on the GPU using the CV-CUDA APIs. Before getting started, please review the :ref:`prerequisites` and :ref:`bestpractices`. Once reviewed, head over to the :ref:`samples`' section which showcases various CV-CUDA samples. .. toctree:: :maxdepth: 1 prerequisites + bestpractices samples diff --git a/docs/sphinx/index.rst b/docs/sphinx/index.rst index 3817f1bfd..acb87f9da 100644 --- a/docs/sphinx/index.rst +++ b/docs/sphinx/index.rst @@ -123,6 +123,7 @@ Copyright :maxdepth: 1 :hidden: + v0.10.0-beta v0.9.0-beta v0.8.0-beta v0.7.0-beta diff --git a/docs/sphinx/modules/c_algos.rst b/docs/sphinx/modules/c_algos.rst index 81392c4ef..9164df2c5 100644 --- a/docs/sphinx/modules/c_algos.rst +++ b/docs/sphinx/modules/c_algos.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/modules/c_core.rst b/docs/sphinx/modules/c_core.rst index d74af3281..6754c52e9 100644 --- a/docs/sphinx/modules/c_core.rst +++ b/docs/sphinx/modules/c_core.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/modules/c_status.rst b/docs/sphinx/modules/c_status.rst index 0696ccab5..52ff8b891 100644 --- a/docs/sphinx/modules/c_status.rst +++ b/docs/sphinx/modules/c_status.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/modules/c_utils.rst b/docs/sphinx/modules/c_utils.rst index 07d51cefb..f619a768b 100644 --- a/docs/sphinx/modules/c_utils.rst +++ b/docs/sphinx/modules/c_utils.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/modules/cpp_algos.rst b/docs/sphinx/modules/cpp_algos.rst index 83255b27d..c10ae2051 100644 --- a/docs/sphinx/modules/cpp_algos.rst +++ b/docs/sphinx/modules/cpp_algos.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/modules/cpp_core.rst b/docs/sphinx/modules/cpp_core.rst index 23ea8ce5b..8a03ff5f7 100644 --- a/docs/sphinx/modules/cpp_core.rst +++ b/docs/sphinx/modules/cpp_core.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/modules/cpp_cudatools.rst b/docs/sphinx/modules/cpp_cudatools.rst index 0d08d11b6..f0c122b5b 100644 --- a/docs/sphinx/modules/cpp_cudatools.rst +++ b/docs/sphinx/modules/cpp_cudatools.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/modules/cpp_modules.rst b/docs/sphinx/modules/cpp_modules.rst index 7df402929..413d08a8d 100644 --- a/docs/sphinx/modules/cpp_modules.rst +++ b/docs/sphinx/modules/cpp_modules.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/modules/cpp_utils.rst b/docs/sphinx/modules/cpp_utils.rst index fff1ffde8..2ced344b3 100644 --- a/docs/sphinx/modules/cpp_utils.rst +++ b/docs/sphinx/modules/cpp_utils.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/modules/python_algos.rst b/docs/sphinx/modules/python_algos.rst index 50983eacb..bdeb5d6c3 100644 --- a/docs/sphinx/modules/python_algos.rst +++ b/docs/sphinx/modules/python_algos.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/modules/python_core.rst b/docs/sphinx/modules/python_core.rst index 98279fe88..cf8adf679 100644 --- a/docs/sphinx/modules/python_core.rst +++ b/docs/sphinx/modules/python_core.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,6 +25,7 @@ Core components and related functions for the NVIDIA® NVCV library. Color Models <../_python_api/nvcv/colorspec> Image Formats <../_python_api/nvcv/format> Rect <../_python_api/nvcv/recti> + Resource <../_python_api/nvcv/resource> Image <../_python_api/nvcv/image> ImageBatchVarShape <../_python_api/nvcv/imagebatch> Tensor <../_python_api/nvcv/tensor> diff --git a/docs/sphinx/modules/python_modules.rst b/docs/sphinx/modules/python_modules.rst index bf20edd91..b76f8420b 100644 --- a/docs/sphinx/modules/python_modules.rst +++ b/docs/sphinx/modules/python_modules.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/nvcvobjectcache.rst b/docs/sphinx/nvcvobjectcache.rst new file mode 100644 index 000000000..02b223ed9 --- /dev/null +++ b/docs/sphinx/nvcvobjectcache.rst @@ -0,0 +1,136 @@ +.. + # SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-License-Identifier: Apache-2.0 + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +.. _nvcvobjectcache: + + +NVCV Object Cache +================= + +CV-CUDA has internal Resource management. +Python objects that are used within CV-CUDA will be added to CV-CUDA's NVCV cache. + +Note: CV-CUDA is device agnostic, ie CV-CUDA does not know on which device the data resides! + +Basics +------ + +The most prominent cached objects are of the following classes: ``Image``, ``ImageBatch``, ``Stream``, ``Tensor``, ``TensorBatch``, ``ExternalCacheItem`` (eg. an operator's payload). + +With respect to the cache, we differentiate objects between their used memory of the cache. +While wrapped objects do not increase the cache's size, non-wrapped objects do increase the cache. + +An example of a non-wrapped object that increases the cache's memory:: + + import cvcuda + import numpy as np + + tensor = nvcv.Tensor((16, 32, 4), np.float32, nvcv.TensorLayout.HWC) + +Wrapped objects are objects which do not have the memory hosted by CV-CUDA, hence they do not increase the cache's memory. +In the following python snippet, the ``cvcuda_tensor`` is a wrapped tensor, which does not increase the cache's memory.:: + + import cvcuda + import torch + + torch_tensor = torch.tensor([1], device="cuda", dtype=torch.uint8) + cvcuda_tensor = torch.as_tensor(torch_tensor) + + +Cache Re-use +-------------- + +If a CV-CUDA object is created and runs out of scope, we can leverage the cache to efficiently create a new CV-CUDA object with the same specifics, eg of the same shape and data type:: + + import cvcuda + import numpy as np + + def create_tensor1(): + tensor1 = nvcv.Tensor((16, 32, 4), np.float32, nvcv.TensorLayout.HWC) + return + + def create_tensor2(): + # re-use the cache + tensor2 = nvcv.Tensor((16, 32, 4), np.float32, nvcv.TensorLayout.HWC) + return + + create_tensor1() + # tensor1 runs out of scope, after leaving ``create_tensor1()`` + create_tensor2() + + +In this case, for ``tensor2`` no new memory is being allocated, as we re-use the memory from ``tensor1``, because ``tensor1`` and ``tensor2`` have the same shape and data type. + +Cache re-use is also possible for wrapped objects (even if they do not increase the cache's memory, it's more efficient to use the re-use the cache). + +Controlling the cache limit +--------------------------- + +Some workflows can cause the cache to grow significantly, eg if one keeps creating non-wrapped tensors of different shape. Hence, rarely re-using the cache:: + + import cvcuda + import numpy as np + import random + + def create_tensor(h, w): + tensor1 = nvcv.Tensor((h, w, 3), np.float32, nvcv.TensorLayout.HWC) + return + + while True: + h = random.randint(1000, 2000) + w = random.randint(1000, 2000) + create_tensor(h, w) + +To control that cache growth, CV-CUDA implements a user-configurable' cache limit and automatic clearance mechanism. +When the cache hits that limit, it is automatically cleared. +Similarly, if a single object is larger than the cache limit, we do not add it to the cache. +The cache limit can be controlled in the following manner:: + + import cvcuda + + # Get the cache limit (in bytes) + current_cache_limit = nvcv.get_cache_limit() + + # Set the cache limit (in bytes) + my_new_cache_limit = 12345 # in bytes + nvcv.set_cache_limit(my_new_cache_limit) + +By default the cache limit is set to half the total GPU memory of the current device when importing cvcuda, eg:: + + import cvcuda + import torch + + # Set the cache limit (in bytes) + total_mem = torch.cuda.mem_get_info()[1] + nvcv.set_cache_limit(total_mem // 2) + +It is also feasible to set the cache limit to a value larger than the total GPU memory. +Due to CV-CUDA being device agnostic, it can happen that a larger cache than one GPU's total memory is possible. +Consider a scenario where two GPUs, each with 24GB are available. +Data of 20GB could reside on each GPU. +Setting the cache to >40GB, allows to keep all data in cache, despite the cache limit being larger than one GPU's total memory. +It is, however, the user's responsibility to distribute the data accordingly. + +A cache limit of 0 effectively disables the cache. +However, a low cache limit or a disabled cache can cause a hit in performance, as already allocated memory is not being re-used, but new memory has to be allocated and deallocated. + +CV-CUDA also provides querying the current cache size (in bytes). This can be helpful for debugging:: + + import cvcuda + + print(nvcv.current_cache_inbytes()) + img = nvcv.Image.zeros((1, 1), nvcv.Format.F32) + print(nvcv.current_cache_inbytes()) diff --git a/docs/sphinx/prerequisites.rst b/docs/sphinx/prerequisites.rst index 540bef62e..550d67a83 100644 --- a/docs/sphinx/prerequisites.rst +++ b/docs/sphinx/prerequisites.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/python.rst b/docs/sphinx/python.rst new file mode 100644 index 000000000..1b5ef0815 --- /dev/null +++ b/docs/sphinx/python.rst @@ -0,0 +1,28 @@ +.. + # SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-License-Identifier: Apache-2.0 + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +.. _Python: + + +Python +-------------- + +This guide covers best practices of CV-CUDA for Python. + +.. toctree:: + :maxdepth: 1 + + nvcvobjectcache diff --git a/docs/sphinx/relnotes/v0.1.0-prealpha.rst b/docs/sphinx/relnotes/v0.1.0-prealpha.rst index c7b75d22e..f5be9593a 100644 --- a/docs/sphinx/relnotes/v0.1.0-prealpha.rst +++ b/docs/sphinx/relnotes/v0.1.0-prealpha.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/relnotes/v0.10.0-beta.rst b/docs/sphinx/relnotes/v0.10.0-beta.rst new file mode 100644 index 000000000..7f446dd77 --- /dev/null +++ b/docs/sphinx/relnotes/v0.10.0-beta.rst @@ -0,0 +1,70 @@ +.. + # SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-License-Identifier: Apache-2.0 + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +.. _v0.10.0-beta: + +v0.10.0-beta +============ + +Release Highlights +------------------ + +CV-CUDA v0.10.0 includes a critical bug fix (cache growth management) alongside the following changes: + +* **New Features**: + + * Added mechanism to limit and manage cache memory consumption (includes new "Best Practices" documentation). + * Performance improvements of color conversion operators (e.g., 2x faster RGB2YUV). + * Refactored codebase to allow independent build of NVCV library (data structures). + +* **Bug Fixes**: + + * Fixed unbounded cache memory consumption issue. + * Improved management of Python-created object lifetimes, decoupled from cache management. + * Fixed potential crash in Resize operator's linear and nearest neighbor interpolation from non-aligned vectorized writes. + * Fixed Python CvtColor operator to correctly handle NV12 and NV21 outputs. + * Fixed Resize and RandomResizedCrop linear interpolation weight for border rows and columns. + * Fixed missing parameter in C API for fused ResizeCropConvertReformat. + * Fixed several minor documentation and error output issues. + * Fixed minor compiler warning while building Resize operator. + +Compatibility and Known Limitations +----------------------------------- + +* **New limitations**: + + * Cache/resource management introduced in v0.10 add micro-second-level overhead to Python operator calls. Based on the performance analysis of our Python samples, we expect the production- and pipeline-level impact to be negligible. CUDA kernel and C++ call performance is not affected. We aim to investigate and reduce this overhead further in a future release.​ + * Sporadic Pybind11-deallocation crashes have been reported in long-lasting multi-threaded Python pipelines with externally allocated memory (eg wrapped Pytorch buffers). We are evaluating an upgrade of Pybind11 (currently using 2.10) as a potential fix in an upcoming release. + +For the full list, see main README on `CV-CUDA GitHub `_. + +License +------- + +CV-CUDA is licensed under the `Apache 2.0 `_ license. + +Resources +--------- + +1. `CV-CUDA GitHub `_ +2. `CV-CUDA Increasing Throughput and Reducing Costs for AI-Based Computer Vision with CV-CUDA `_ +3. `NVIDIA Announces Microsoft, Tencent, Baidu Adopting CV-CUDA for Computer Vision AI `_ +4. `CV-CUDA helps Tencent Cloud audio and video PaaS platform achieve full-process GPU acceleration for video enhancement AI `_ + +Acknowledgements +---------------- + +CV-CUDA is developed jointly by NVIDIA and the ByteDance Machine Learning team. diff --git a/docs/sphinx/relnotes/v0.2.0-alpha.rst b/docs/sphinx/relnotes/v0.2.0-alpha.rst index d9bef9512..8454c2283 100644 --- a/docs/sphinx/relnotes/v0.2.0-alpha.rst +++ b/docs/sphinx/relnotes/v0.2.0-alpha.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/relnotes/v0.2.1-alpha.rst b/docs/sphinx/relnotes/v0.2.1-alpha.rst index 4455e9237..68fb92c91 100644 --- a/docs/sphinx/relnotes/v0.2.1-alpha.rst +++ b/docs/sphinx/relnotes/v0.2.1-alpha.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/relnotes/v0.3.0-beta.rst b/docs/sphinx/relnotes/v0.3.0-beta.rst index 5c7d784c9..e83ad49bd 100644 --- a/docs/sphinx/relnotes/v0.3.0-beta.rst +++ b/docs/sphinx/relnotes/v0.3.0-beta.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/relnotes/v0.3.1-beta.rst b/docs/sphinx/relnotes/v0.3.1-beta.rst index 24058d6d7..55332d408 100644 --- a/docs/sphinx/relnotes/v0.3.1-beta.rst +++ b/docs/sphinx/relnotes/v0.3.1-beta.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/relnotes/v0.4.0-beta.rst b/docs/sphinx/relnotes/v0.4.0-beta.rst index 1a0f4eed1..54f6dd052 100644 --- a/docs/sphinx/relnotes/v0.4.0-beta.rst +++ b/docs/sphinx/relnotes/v0.4.0-beta.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/relnotes/v0.5.0-beta.rst b/docs/sphinx/relnotes/v0.5.0-beta.rst index 57abe2f20..ecdeaa20c 100644 --- a/docs/sphinx/relnotes/v0.5.0-beta.rst +++ b/docs/sphinx/relnotes/v0.5.0-beta.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/cpp_samples/cropresize.rst b/docs/sphinx/samples/cpp_samples/cropresize.rst index 0e10f8d88..3288e9146 100644 --- a/docs/sphinx/samples/cpp_samples/cropresize.rst +++ b/docs/sphinx/samples/cpp_samples/cropresize.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/python_samples/classification/classification_pytorch.rst b/docs/sphinx/samples/python_samples/classification/classification_pytorch.rst index 941c3b1e3..eb466a9c4 100644 --- a/docs/sphinx/samples/python_samples/classification/classification_pytorch.rst +++ b/docs/sphinx/samples/python_samples/classification/classification_pytorch.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/python_samples/classification/classification_tensorrt.rst b/docs/sphinx/samples/python_samples/classification/classification_tensorrt.rst index afcb4e428..76cd53ac2 100644 --- a/docs/sphinx/samples/python_samples/classification/classification_tensorrt.rst +++ b/docs/sphinx/samples/python_samples/classification/classification_tensorrt.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/python_samples/classification/postprocessor_cvcuda.rst b/docs/sphinx/samples/python_samples/classification/postprocessor_cvcuda.rst index 96abd4efc..55e2140a8 100644 --- a/docs/sphinx/samples/python_samples/classification/postprocessor_cvcuda.rst +++ b/docs/sphinx/samples/python_samples/classification/postprocessor_cvcuda.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/python_samples/classification/preprocessor_cvcuda.rst b/docs/sphinx/samples/python_samples/classification/preprocessor_cvcuda.rst index cb75a09f7..613cb7b4d 100644 --- a/docs/sphinx/samples/python_samples/classification/preprocessor_cvcuda.rst +++ b/docs/sphinx/samples/python_samples/classification/preprocessor_cvcuda.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/python_samples/object_detection/objectdetection_tensorflow.rst b/docs/sphinx/samples/python_samples/object_detection/objectdetection_tensorflow.rst index 129aebe5f..ce5d96d24 100644 --- a/docs/sphinx/samples/python_samples/object_detection/objectdetection_tensorflow.rst +++ b/docs/sphinx/samples/python_samples/object_detection/objectdetection_tensorflow.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/python_samples/object_detection/postprocessor_cvcuda.rst b/docs/sphinx/samples/python_samples/object_detection/postprocessor_cvcuda.rst index 59c20333a..5eb9570c5 100644 --- a/docs/sphinx/samples/python_samples/object_detection/postprocessor_cvcuda.rst +++ b/docs/sphinx/samples/python_samples/object_detection/postprocessor_cvcuda.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/python_samples/object_detection/preprocessor_cvcuda.rst b/docs/sphinx/samples/python_samples/object_detection/preprocessor_cvcuda.rst index 7f39501af..5ce2c534f 100644 --- a/docs/sphinx/samples/python_samples/object_detection/preprocessor_cvcuda.rst +++ b/docs/sphinx/samples/python_samples/object_detection/preprocessor_cvcuda.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/python_samples/segmentation/postprocessor_cvcuda.rst b/docs/sphinx/samples/python_samples/segmentation/postprocessor_cvcuda.rst index 02e54d352..f662ebbde 100644 --- a/docs/sphinx/samples/python_samples/segmentation/postprocessor_cvcuda.rst +++ b/docs/sphinx/samples/python_samples/segmentation/postprocessor_cvcuda.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/python_samples/segmentation/preprocessor_cvcuda.rst b/docs/sphinx/samples/python_samples/segmentation/preprocessor_cvcuda.rst index cc0649879..e8ea4df1b 100644 --- a/docs/sphinx/samples/python_samples/segmentation/preprocessor_cvcuda.rst +++ b/docs/sphinx/samples/python_samples/segmentation/preprocessor_cvcuda.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/python_samples/segmentation/segmentation_pytorch.rst b/docs/sphinx/samples/python_samples/segmentation/segmentation_pytorch.rst index acf9de781..4586bdbcf 100644 --- a/docs/sphinx/samples/python_samples/segmentation/segmentation_pytorch.rst +++ b/docs/sphinx/samples/python_samples/segmentation/segmentation_pytorch.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/python_samples/segmentation/segmentation_tensorrt.rst b/docs/sphinx/samples/python_samples/segmentation/segmentation_tensorrt.rst index 211ff89ef..e0ba7810a 100644 --- a/docs/sphinx/samples/python_samples/segmentation/segmentation_tensorrt.rst +++ b/docs/sphinx/samples/python_samples/segmentation/segmentation_tensorrt.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/sphinx/samples/python_samples/segmentation_triton.rst b/docs/sphinx/samples/python_samples/segmentation_triton.rst index 549edd597..b2607bea8 100644 --- a/docs/sphinx/samples/python_samples/segmentation_triton.rst +++ b/docs/sphinx/samples/python_samples/segmentation_triton.rst @@ -1,5 +1,5 @@ .. - # SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/lint/commitlint.config.js b/lint/commitlint.config.js deleted file mode 100644 index 986b826dc..000000000 --- a/lint/commitlint.config.js +++ /dev/null @@ -1,23 +0,0 @@ -/* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -module.exports = { - extends : [ "@commitlint/config-conventional" ], - helpUrl : 'https://confluence.nvidia.com/display/CVCUDA/Commit+message+format', - rules : {'references-empty' : [ 2, 'never' ]}, - parserPreset : {parserOpts : {issuePrefixes : [ '^CVCUDA-' ]}} -} diff --git a/lint/copyright_check.sh b/lint/copyright_check.sh deleted file mode 100755 index d46ecb7c6..000000000 --- a/lint/copyright_check.sh +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -eE - -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Check if input files have valid copyright message -# Ref: https://confluence.nvidia.com/display/RP/Standardizing+on+SPDX+Identifiers - -valid_licenses=('Apache-2.0' 'LicenseRef-NvidiaProprietary') - -# Detects that the line is a comment. -# The following line detects comments in source code and mark down files. -# It can detect c++ style comments, python style comments or markdown style comments. -rgx_comment='^[[:space:]]*[[:graph:]]\+[[:space:]]\+[[:graph:]]*[[:space:]]*["]*' - -function get_tag() -{ - local tag=$1 - shift - - local rgx="s@^\($rgx_comment\)\?$tag:[[:space:]]*\([^\"]*\)\"*@\2@p" - - sed -n "$rgx" "$file" -} - -function error() -{ - local file=$1 - local msg=$2 - shift 2 - - echo -e "In $file:\n\t$msg" && false -} - -function check_license() -{ - local file=$1 - shift - - # Get tag value - local tag='SPDX-License-Identifier' - local license - license=$(get_tag "$tag") - if [ -z "$license" ]; then - error "$file" "No well-formed $tag tag found." - fi - - # Check if it is valid - if [[ ! " ${valid_licenses[*]} " =~ [[:space:]]${license}[[:space:]] ]]; then - valid_licenses_str="${valid_licenses[*]}" - error "$file" "License '$license' not valid. Must be a value from '${valid_licenses_str//${IFS:0:1}/, }'." && false - fi -} - -function get_copyright_year_range() -{ - local file=$1 - shift - - local tag='SPDX-FileCopyrightText' - copyright=$(get_tag "$tag") - if [ "$copyright" ]; then - local rgx_copyright_year_range='Copyright[[:space:]]*([Cc])[[:space:]]\+\([[:digit:]]\+-\?[[:digit:]]\+\),\?[[:space:]]*NVIDIA CORPORATION & AFFILIATES\. All rights reserved\.' - - # If copyright text is limited to fit 80 characters, - if [ "$copyright" = 'NVIDIA CORPORATION & AFFILIATES' ]; then - # Look for the non-tagged copyright message - copyright=$(sed -n 's@^\('"$rgx_comment"'\)\?\('"$rgx_copyright_year_range"'\)@\2@p' "$file") - fi - fi - - echo "$copyright" | sed -n "s@$rgx_copyright_year_range@\1@p" -} - -function check_copyright_message() -{ - local file=$1 - shift - - # Get tag value - local tag='SPDX-FileCopyrightText' - local copyright - copyright=$(get_tag "$tag") - if [ -z "$copyright" ]; then - error "$file" "No well-formed $tag tag found." && false - fi - - # Check if year range is valid - local year_range - year_range=$(get_copyright_year_range "$file") - if [[ -z "$year_range" ]]; then - error "$file" "Malformed copyright message '$copyright'. Must be 'Copyright (c) beg_year[-end_year] NVIDIA CORPORATION & AFFILIATES. All rights reserved.'" && false - fi -} - -function check_copyright_year() -{ - local file=$1 - shift - - local year_range - year_range=$(get_copyright_year_range "$file") - - # Get copyright year range - local rgx_year_range='\([[:digit:]]\{4\}\)\(-[[:digit:]]\{4\}\)\?' - local beg_year end_year - beg_year=$(echo "$year_range" | sed -n "s@$rgx_year_range@\1@p") - end_year=$(echo "$year_range" | sed -n "s@$rgx_year_range@\2@p") - end_year=${end_year:1} # remove '-' at beginning - if [[ -z "$beg_year" ]]; then - error "$file" "Malformed copyright year range '$year_range'. Must be beg_year[-end_year]." && false - fi - - # Check if range is valid - - # Get the year when file was last modified. - local cur_year - - # If file is staged, - local is_staged - is_staged=$(git diff --name-only --cached "$file") - if [ "$is_staged" ]; then - # it was modified now - cur_year=$(date +'%Y') # YYYY - else - local is_grafted - is_grafted=$(git log --oneline -1 --decorate "$file" | grep grafted || true) - # if most recent commit is "grafted", it means that the repository is shallow, - # and the last commit this file was touched is not present. In this situation, - # we can't tell when it was last changed. Since this commit is old, we can assume - # that the copyright year is correct, i.e., the file wasn't touched recently. - if [ "$is_grafted" ]; then - return 0 - else - # get last modification time - cur_year=$(git log -1 --pretty="format:%cs" "$file") # YYYY-MM-DD - cur_year=${cur_year%%-*} # YYYY - fi - fi - - # Only start year? - if [ -z "$end_year" ]; then - if [[ $beg_year != "$cur_year" ]]; then - error "$file" "Invalid year '$beg_year' in copyright message. Must be '$beg_year-$cur_year'." && false - fi - # Range doesn't include current year? - elif [[ $beg_year -ge $cur_year || $end_year -lt $cur_year ]]; then - error "$file" "Invalid year range '$year_range' in copyright message. '$cur_year' must be in range ($beg_year;$end_year]." && false - fi -} - -for file in "$@"; do - check_license "$file" - check_copyright_message "$file" - check_copyright_year "$file" -done diff --git a/lint/lfs_check.sh b/lint/lfs_check.sh deleted file mode 100755 index 78fada48c..000000000 --- a/lint/lfs_check.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -e - -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Check if files that should be handled by LFS are being committed as -# LFS objects - -lfs_files=$(echo "$@" | xargs git check-attr filter | grep 'filter: lfs$' | sed -e 's@: filter: lfs@@') - -binary_files='' - -for file in $lfs_files; do - soft_sha=$(git hash-object -w $file) - raw_sha=$(git hash-object -w --no-filters $file) - - if [ $soft_sha == $raw_sha ]; then - binary_files="* $file\n$binary_files" - fi -done - -if [[ "$binary_files" ]]; then - echo "The following files tracked by git-lfs are being committed as standard git objects:" - echo -e "$binary_files" - echo "Revert your changes and commit those with git-lfs installed." - echo "In repo's root directory, run: sudo apt-get git-lfs && git lfs install" - exit 1 -fi diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 65f61d879..2f175f089 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -30,7 +30,8 @@ add_subdirectory(${PYBIND11_SOURCE_DIR} pybind11) find_package(nvcv_types REQUIRED) -add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../src/util nvcv_util) +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../src/nvcv/util nvcv_util) +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../src/cvcuda/util cvcuda_util) set(PYTHON_MODULE_NAME python${PYTHON_VERSION}) string(REPLACE "." "" PYTHON_MODULE_NAME "${PYTHON_MODULE_NAME}") diff --git a/python/common/CMakeLists.txt b/python/common/CMakeLists.txt index 1b31d6408..80fa81c9d 100644 --- a/python/common/CMakeLists.txt +++ b/python/common/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +find_package(cvcuda REQUIRED) find_package(CUDAToolkit REQUIRED) add_library(nvcv_python_common STATIC @@ -22,6 +23,16 @@ add_library(nvcv_python_common STATIC String.cpp ) -target_include_directories(nvcv_python_common PUBLIC ..) +target_include_directories(nvcv_python_common + PUBLIC + .. +) -target_link_libraries(nvcv_python_common PUBLIC nvcv_types nvcv_util CUDA::cudart_static pybind11::pybind11) +target_link_libraries(nvcv_python_common + PUBLIC + nvcv_types + nvcv_util + cvcuda + CUDA::cudart_static + pybind11::pybind11 +) diff --git a/python/common/Hash.hpp b/python/common/Hash.hpp index 9ca9bd9b6..121e2f631 100644 --- a/python/common/Hash.hpp +++ b/python/common/Hash.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,9 +18,9 @@ #ifndef NVCV_PYTHON_HASH_HPP #define NVCV_PYTHON_HASH_HPP +#include #include -#include -#include +#include #include #include diff --git a/python/common/PyUtil.hpp b/python/common/PyUtil.hpp index af6377de8..de8348b05 100644 --- a/python/common/PyUtil.hpp +++ b/python/common/PyUtil.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,8 +18,8 @@ #ifndef NVCV_PYTHON_PYUTIL_HPP #define NVCV_PYTHON_PYUTIL_HPP +#include #include -#include #include #include diff --git a/python/mod_cvcuda/CMakeLists.txt b/python/mod_cvcuda/CMakeLists.txt index 2ab990d14..92da9a8d0 100644 --- a/python/mod_cvcuda/CMakeLists.txt +++ b/python/mod_cvcuda/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -97,6 +97,7 @@ target_link_libraries(cvcuda_module_python pynvcv nvcv_util_compat cvcuda + cvcuda_util nvcv_python_common nvcv_util cuda diff --git a/python/mod_cvcuda/ChannelManipType.cpp b/python/mod_cvcuda/ChannelManipType.cpp index e32085100..623f28289 100644 --- a/python/mod_cvcuda/ChannelManipType.cpp +++ b/python/mod_cvcuda/ChannelManipType.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_cvcuda/ChannelManipType.hpp b/python/mod_cvcuda/ChannelManipType.hpp index ee0a30ff1..b1909464a 100644 --- a/python/mod_cvcuda/ChannelManipType.hpp +++ b/python/mod_cvcuda/ChannelManipType.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_cvcuda/CvtColorUtil.cpp b/python/mod_cvcuda/CvtColorUtil.cpp index 60fad0e71..5d4df214e 100644 --- a/python/mod_cvcuda/CvtColorUtil.cpp +++ b/python/mod_cvcuda/CvtColorUtil.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,16 +17,9 @@ #include "CvtColorUtil.hpp" -//#include #include -//#include #include -#include -//#include -//#include -//#include -//#include -//#include +#include #include #include @@ -281,3 +274,55 @@ nvcv::ImageFormat GetOutputFormat(nvcv::DataType in, NVCVColorConversionCode cod return outFormat; } + +int64_t GetOutputHeight(int64_t height, NVCVColorConversionCode code) +{ + switch (code) + { + case NVCVColorConversionCode::NVCV_COLOR_YUV2RGB_NV12: + case NVCVColorConversionCode::NVCV_COLOR_YUV2BGR_NV12: + case NVCVColorConversionCode::NVCV_COLOR_YUV2RGB_NV21: + case NVCVColorConversionCode::NVCV_COLOR_YUV2BGR_NV21: + return (2 * height) / 3; // output height must be 2/3 of input height from NV12 or NV21 + + case NVCVColorConversionCode::NVCV_COLOR_RGB2YUV_NV12: + case NVCVColorConversionCode::NVCV_COLOR_BGR2YUV_NV12: + case NVCVColorConversionCode::NVCV_COLOR_RGB2YUV_NV21: + case NVCVColorConversionCode::NVCV_COLOR_BGR2YUV_NV21: + return (3 * height) / 2; // output height must be 3/2 of input height for UV plane + + default: + return height; + } +} + +nvcv::TensorShape GetOutputTensorShape(nvcv::TensorShape inputShape, nvcv::ImageFormat outputFormat, + NVCVColorConversionCode code) +{ + if (inputShape.rank() < 3 || inputShape.rank() > 4) + { + throw std::runtime_error("Invalid input tensor shape, only NHWC or HWC are supported"); + } + + int64_t outputShape[4] = {}; + bool heightIndex = inputShape.rank() == 4 ? 1 : 0; + for (int i = 0; i < inputShape.rank(); i++) + { + outputShape[i] = inputShape[i]; + } + int channelIndex = inputShape.rank() == 4 ? 3 : 2; + + outputShape[heightIndex] = GetOutputHeight(outputShape[heightIndex], code); + outputShape[channelIndex] = outputFormat.numChannels(); + + if (inputShape.rank() == 4) + { + return nvcv::TensorShape({outputShape[0], outputShape[1], outputShape[2], outputShape[3]}, "NHWC"); + } + else + { + assert(inputShape.rank() == 3); + + return nvcv::TensorShape({outputShape[0], outputShape[1], outputShape[2]}, "HWC"); + } +} diff --git a/python/mod_cvcuda/CvtColorUtil.hpp b/python/mod_cvcuda/CvtColorUtil.hpp index cfde7f9fa..caf277375 100644 --- a/python/mod_cvcuda/CvtColorUtil.hpp +++ b/python/mod_cvcuda/CvtColorUtil.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,7 +20,13 @@ #include #include +#include nvcv::ImageFormat GetOutputFormat(nvcv::DataType in, NVCVColorConversionCode code); +int64_t GetOutputHeight(int64_t inputHeight, NVCVColorConversionCode code); + +nvcv::TensorShape GetOutputTensorShape(nvcv::TensorShape inputShape, nvcv::ImageFormat outputFormat, + NVCVColorConversionCode code); + #endif // NVCV_COLOR_CONVERSION_UTIL_HPP diff --git a/python/mod_cvcuda/InterpolationType.cpp b/python/mod_cvcuda/InterpolationType.cpp index eb1c934e1..35c968aa0 100644 --- a/python/mod_cvcuda/InterpolationType.cpp +++ b/python/mod_cvcuda/InterpolationType.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_cvcuda/Main.cpp b/python/mod_cvcuda/Main.cpp index 780773ba1..7336e79fc 100644 --- a/python/mod_cvcuda/Main.cpp +++ b/python/mod_cvcuda/Main.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_cvcuda/OpAdaptiveThreshold.cpp b/python/mod_cvcuda/OpAdaptiveThreshold.cpp index 503820171..a7003211d 100644 --- a/python/mod_cvcuda/OpAdaptiveThreshold.cpp +++ b/python/mod_cvcuda/OpAdaptiveThreshold.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -110,7 +110,8 @@ void ExportOpAdaptiveThreshold(py::module &m) m.def("adaptivethreshold", &AdaptiveThreshold, "src"_a, "max_value"_a, "adaptive_method"_a = NVCV_ADAPTIVE_THRESH_MEAN_C, "threshold_type"_a = NVCV_THRESH_BINARY, "block_size"_a, "c"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.adaptivethreshold(src: nvcv.Tensor, max_value: double, adaptive_method: NVCVAdaptiveThresholdType = < NVCV_ADAPTIVE_THRESH_MEAN_C >, threshold_type: NVCVThresholdType = < NVCV_THRESH_BINARY >, block_size: int, c: double, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.adaptivethreshold(src: nvcv.Tensor, max_value: float, adaptive_method: cvcuda.AdaptiveThresholdType = cvcuda.AdaptiveThresholdType.MEAN_C, + threshold_type: cvcuda.ThresholdType = cvcuda.ThresholdType.BINARY, block_size: int, c: float, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the adaptive threshold operation on the given cuda stream. @@ -119,16 +120,16 @@ void ExportOpAdaptiveThreshold(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - max_value (double): Non-zero value assigned to the pixels for which the condition is satisfied. - adaptive_method (NVCVAdaptiveThresholdType): Adaptive threshold algorithm to use. - threshold_type (NVCVThresholdType): Threshold type that must be either THRESH_BINARY or THRESH_BINARY_INV. + src (nvcv.Tensor): Input tensor containing one or more images. + max_value (float): Non-zero value assigned to the pixels for which the condition is satisfied. + adaptive_method (cvcuda.AdaptiveThresholdType): Adaptive threshold algorithm to use. + threshold_type (cvcuda.ThresholdType): Threshold type that must be either cvcuda.ThresholdType.BINARY or cvcuda.ThresholdType.BINARY_INV. block_size (int): Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on. - c (double): Constant subtracted from the mean or weighted mean. Normally, it is positive but may be zero or negative as well. - stream (Stream, optional): CUDA Stream on which to perform the operation. + c (float): Constant subtracted from the mean or weighted mean. Normally, it is positive but may be zero or negative as well. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output image. + nvcv.Tensor: The output image. Caution: Restrictions to several arguments may apply. Check the C @@ -138,8 +139,9 @@ void ExportOpAdaptiveThreshold(py::module &m) m.def("adaptivethreshold_into", &AdaptiveThresholdInto, "dst"_a, "src"_a, "max_value"_a, "adaptive_method"_a = NVCV_ADAPTIVE_THRESH_MEAN_C, "threshold_type"_a = NVCV_THRESH_BINARY, "block_size"_a, "c"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.adaptivethreshold_into(dst: nvcv.Tensor, src: nvcv.Tensor, max_value: double, adaptive_method: NVCVAdaptiveThresholdType = < NVCV_ADAPTIVE_TH -RESH_MEAN_C >, threshold_type: NVCVThresholdType = < NVCV_THRESH_BINARY >, block_size: int, c: double, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.adaptivethreshold_into(dst: nvcv.Tensor, src: nvcv.Tensor, max_value: float, adaptive_method: cvcuda.AdaptiveThresholdType = < + cvcuda.AdaptiveThresholdType.MEAN_C >, threshold_type: cvcuda.ThresholdType = cvcuda.ThresholdType.BINARY, + block_size: int, c: float, stream: Optional[nvcv.cuda.Stream] = None) Executes the adaptive threshold operation on the given cuda stream. @@ -148,14 +150,14 @@ RESH_MEAN_C >, threshold_type: NVCVThresholdType = < NVCV_THRESH_BINARY >, block for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - max_value (double): Non-zero value assigned to the pixels for which the condition is satisfied. - adaptive_method (NVCVAdaptiveThresholdType): Adaptive threshold algorithm to use. - threshold_type (NVCVThresholdType): Threshold type that must be either THRESH_BINARY or THRESH_BINARY_INV. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + max_value (float): Non-zero value assigned to the pixels for which the condition is satisfied. + adaptive_method (cvcuda.AdaptiveThresholdType): Adaptive threshold algorithm to use. + threshold_type (cvcuda.ThresholdType): Threshold type that must be either cvcuda.ThresholdType.BINARY or cvcuda.ThresholdType.BINARY_INV. block_size (int): Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on. - c (double): Constant subtracted from the mean or weighted mean. Normally, it is positive but may be zero or negative as well. - stream (Stream, optional): CUDA Stream on which to perform the operation. + c (float): Constant subtracted from the mean or weighted mean. Normally, it is positive but may be zero or negative as well. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -168,7 +170,7 @@ RESH_MEAN_C >, threshold_type: NVCVThresholdType = < NVCV_THRESH_BINARY >, block m.def("adaptivethreshold", &AdaptiveThresholdVarShape, "src"_a, "max_value"_a, "adaptive_method"_a = NVCV_ADAPTIVE_THRESH_MEAN_C, "threshold_type"_a = NVCV_THRESH_BINARY, "max_block_size"_a, "block_size"_a, "c"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.adaptivethreshold(src: nvcv.ImageBatchVarShape, max_value: nvcv.Tensor, adaptive_method: NVCVAdaptiveThresholdType = < NVCV_ADAPTIVE_THRESH_MEAN_C >, threshold_type: NVCVThresholdType = < NVCV_THRESH_BINARY > , block_size: int, c: double, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.adaptivethreshold(src: nvcv.ImageBatchVarShape, max_value: nvcv.Tensor, adaptive_method: cvcuda.AdaptiveThresholdType = cvcuda.AdaptiveThresholdType.MEAN_C, threshold_type: cvcuda.ThresholdType = cvcuda.ThresholdType.BINARY, block_size: int, c: float, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the adaptive threshold operation on the given cuda stream. @@ -177,17 +179,17 @@ RESH_MEAN_C >, threshold_type: NVCVThresholdType = < NVCV_THRESH_BINARY >, block for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing the result of the operation. - max_value (Tensor): Non-zero value assigned to the pixels for which the condition is satisfied, specified per image. - adaptive_method (NVCVAdaptiveThresholdType): Adaptive threshold algorithm to use. - threshold_type (NVCVThresholdType): Threshold type that must be either THRESH_BINARY or THRESH_BINARY_INV. + src (nvcv.ImageBatchVarShape): Input image batch containing the result of the operation. + max_value (nvcv.Tensor): Non-zero value assigned to the pixels for which the condition is satisfied, specified per image. + adaptive_method (cvcuda.AdaptiveThresholdType): Adaptive threshold algorithm to use. + threshold_type (cvcuda.ThresholdType): Threshold type that must be either cvcuda.ThresholdType.BINARY or cvcuda.ThresholdType.BINARY_INV. max_block_size (int): The maximum block size that will be used by the operator. - block_size (Tensor): Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on, specified per image. - c (Tensor): Constant subtracted from the mean or weighted mean. Normally, it is positive but may be zero or negative as well, specified per image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + block_size (nvcv.Tensor): Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on, specified per image. + c (nvcv.Tensor): Constant subtracted from the mean or weighted mean. Normally, it is positive but may be zero or negative as well, specified per image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -197,9 +199,9 @@ RESH_MEAN_C >, threshold_type: NVCVThresholdType = < NVCV_THRESH_BINARY >, block m.def("adaptivethreshold_into", &AdaptiveThresholdVarShapeInto, "dst"_a, "src"_a, "max_value"_a, "adaptive_method"_a = NVCV_ADAPTIVE_THRESH_MEAN_C, "threshold_type"_a = NVCV_THRESH_BINARY, "max_block_size"_a, "block_size"_a, "c"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.adaptivethreshold_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, max_value: nvcv.Tensor, adaptive_method: NVCVAdaptiveThres -holdType = < NVCV_ADAPTIVE_THRESH_MEAN_C > , threshold_type: NVCVThresholdType = < NVCV_THRESH_BINARY >, block_size: int, c: double, stream: Optional[nvcv.cu -da.Stream] = None) + cvcuda.adaptivethreshold_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, max_value: nvcv.Tensor, + adaptive_method: cvcuda.AdaptiveThresholdType = cvcuda.AdaptiveThresholdType.MEAN_C, threshold_type: cvcuda.ThresholdType = cvcuda.ThresholdType.BINARY, + block_size: int, c: float, stream: Optional[nvcv.cuda.Stream] = None) Executes the adaptive threshold operation on the given cuda stream. @@ -208,15 +210,15 @@ da.Stream] = None) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - src (ImageBatchVarShape): Input image batch containing the result of the operation. - max_value (Tensor): Non-zero value assigned to the pixels for which the condition is satisfied, specified per image. - adaptive_method (NVCVAdaptiveThresholdType): Adaptive threshold algorithm to use. - threshold_type (NVCVThresholdType): Threshold type that must be either THRESH_BINARY or THRESH_BINARY_INV. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing the result of the operation. + max_value (nvcv.Tensor): Non-zero value assigned to the pixels for which the condition is satisfied, specified per image. + adaptive_method (cvcuda.AdaptiveThresholdType): Adaptive threshold algorithm to use. + threshold_type (cvcuda.ThresholdType): Threshold type that must be either cvcuda.ThresholdType.BINARY or cvcuda.ThresholdType.BINARY_INV. max_block_size (int): The maximum block size that will be used by the operator. - block_size (Tensor): Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on, specified per image. - c (Tensor): Constant subtracted from the mean or weighted mean. Normally, it is positive but may be zero or negative as well, specified per image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + block_size (nvcv.Tensor): Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on, specified per image. + c (nvcv.Tensor): Constant subtracted from the mean or weighted mean. Normally, it is positive but may be zero or negative as well, specified per image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpAdvCvtColor.cpp b/python/mod_cvcuda/OpAdvCvtColor.cpp index d2b3cc04d..72f0155d7 100644 --- a/python/mod_cvcuda/OpAdvCvtColor.cpp +++ b/python/mod_cvcuda/OpAdvCvtColor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -46,56 +46,12 @@ Tensor AdvCvtColorInto(Tensor &output, Tensor &input, NVCVColorConversionCode co Tensor AdvCvtColor(Tensor &input, NVCVColorConversionCode code, NVCVColorSpec spec, std::optional pstream) { - if (input.shape().rank() < 3 || input.shape().rank() > 4) - { - throw std::runtime_error("Invalid input tensor shape"); - } - - int64_t outputShape[4] = {}; - bool heightIndex = input.shape().rank() == 4 ? 1 : 0; - for (int i = 0; i < input.shape().rank(); i++) - { - outputShape[i] = input.shape()[i]; - } - - switch (code) - { - case NVCVColorConversionCode::NVCV_COLOR_YUV2RGB_NV12: - case NVCVColorConversionCode::NVCV_COLOR_YUV2BGR_NV12: - case NVCVColorConversionCode::NVCV_COLOR_YUV2RGB_NV21: - case NVCVColorConversionCode::NVCV_COLOR_YUV2BGR_NV21: - { - outputShape[heightIndex] = (2 * outputShape[heightIndex]) / 3; // output height must be 2/3 of input height - outputShape[heightIndex + 2] = 3; // output channels must be 3 - break; - } + nvcv::ImageFormat outputFormat = GetOutputFormat(input.dtype(), code); + nvcv::TensorShape outputShape = GetOutputTensorShape(input.shape(), outputFormat, code); - case NVCVColorConversionCode::NVCV_COLOR_RGB2YUV_NV12: - case NVCVColorConversionCode::NVCV_COLOR_BGR2YUV_NV12: - case NVCVColorConversionCode::NVCV_COLOR_RGB2YUV_NV21: - case NVCVColorConversionCode::NVCV_COLOR_BGR2YUV_NV21: - { - outputShape[heightIndex] - = (3 * outputShape[heightIndex]) / 2; // output height must be 3/2 of input height for UV plane - outputShape[heightIndex + 2] = 1; // output channels must be 1 for NV - break; - } - default: - break; - } + Tensor output = Tensor::Create(outputShape, input.dtype()); - if (input.shape().rank() == 4) - { - nvcv::TensorShape yuvCorrectedShape({outputShape[0], outputShape[1], outputShape[2], outputShape[3]}, "NHWC"); - Tensor output = Tensor::Create(yuvCorrectedShape, input.dtype()); - return AdvCvtColorInto(output, input, code, spec, pstream); - } - else - { - nvcv::TensorShape yuvCorrectedShape({outputShape[0], outputShape[1], outputShape[2]}, "HWC"); - Tensor output = Tensor::Create(yuvCorrectedShape, input.dtype()); - return AdvCvtColorInto(output, input, code, spec, pstream); - } + return AdvCvtColorInto(output, input, code, spec, pstream); } } // namespace @@ -113,13 +69,13 @@ void ExportOpAdvCvtColor(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - code (NVCVColorConversionCode): Code describing the desired color conversion. - spec (NVCVColorSpec): Color specification for the conversion. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + code (cvcuda.ColorConversion): Code describing the desired color conversion. + spec (cvcuda.ColorSpec): Color specification for the conversion. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output color converted image. + nvcv.Tensor: The output color converted image. Caution: Restrictions to several arguments may apply. Check the C @@ -136,11 +92,11 @@ void ExportOpAdvCvtColor(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - code (NVCVColorConversionCode): Code describing the desired color conversion. - spec (NVCVColorSpec): Color specification for the conversion. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + code (cvcuda.ColorConversion): Code describing the desired color conversion. + spec (cvcuda.ColorSpec): Color specification for the conversion. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpAverageBlur.cpp b/python/mod_cvcuda/OpAverageBlur.cpp index dc37c337f..991410f6d 100644 --- a/python/mod_cvcuda/OpAverageBlur.cpp +++ b/python/mod_cvcuda/OpAverageBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -114,7 +114,7 @@ void ExportOpAverageBlur(py::module &m) m.def("averageblur", &AverageBlur, "src"_a, "kernel_size"_a, "kernel_anchor"_a = def_anchor, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.averageblur(src: nvcv.Tensor, kernel_size: Tuple [int,int], kernel_anchor: Tuple [int,int], border: NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.averageblur(src: nvcv.Tensor, kernel_size: Tuple[int, int], kernel_anchor: Tuple[int, int], border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the AverageBlur operation on the given cuda stream. @@ -123,18 +123,18 @@ void ExportOpAverageBlur(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - kernel_size (Tuple [int,int]): Specifies the size of the blur kernel. - kernel_anchor (Tuple [int,int]): Kernel anchor, use (-1,-1) to indicate kernel center. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + kernel_size (Tuple[int, int]): Specifies the size of the blur kernel. + kernel_anchor (Tuple[int, int]): Kernel anchor, use (-1,-1) to indicate kernel center. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. See also: Refer to the CV-CUDA C API reference for the AverageBlur operator for more details and usage examples. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -143,7 +143,7 @@ void ExportOpAverageBlur(py::module &m) m.def("averageblur_into", &AverageBlurInto, "dst"_a, "src"_a, "kernel_size"_a, "kernel_anchor"_a = def_anchor, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.averageblur_into(dst: nvcv.Tensor, src: nvcv.Tensor, kernel_size: Tuple [int,int], kernel_anchor: Tuple [int,int], border: NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.averageblur_into(dst: nvcv.Tensor, src: nvcv.Tensor, kernel_size: Tuple[int, int], kernel_anchor: Tuple[int, int], border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) Executes the AverageBlur operation on the given cuda stream and writes the result into the 'dst' tensor. @@ -152,12 +152,12 @@ void ExportOpAverageBlur(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - kernel_size (Tuple [int,int]): Specifies the size of the blur kernel. - kernel_anchor (Tuple [int,int]): Kernel anchor, use (-1,-1) to indicate kernel center. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + kernel_size (Tuple[int, int]): Specifies the size of the blur kernel. + kernel_anchor (Tuple[int, int]): Kernel anchor, use (-1,-1) to indicate kernel center. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -169,20 +169,20 @@ void ExportOpAverageBlur(py::module &m) m.def("averageblur", &AverageBlurVarShape, "src"_a, "max_kernel_size"_a, "kernel_size"_a, "kernel_anchor"_a, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.averageblur(src: nvcv.ImageBatchVarShape, kernel_size: Tuple [int,int], kernel_anchor: Tuple [int,int], border: NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.averageblur(src: nvcv.ImageBatchVarShape, kernel_size: Tuple[int, int], kernel_anchor: Tuple[int, int], border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the AverageBlur operation with a variable shape tensors on the given cuda stream. Args: - src (Tensor): Input tensor containing one or more images. - max_kernel_size (Tuple [int,int]): Specifies the maximum size of the blur kernel. - kernel_size (Tuple [int,int]): Specifies the size of the blur kernel within the maximum kernel size. - kernel_anchor (Tuple [int,int]): Kernel anchor, use (-1,-1) to indicate kernel center. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + max_kernel_size (Tuple[int, int]): Specifies the maximum size of the blur kernel. + kernel_size (Tuple[int, int]): Specifies the size of the blur kernel within the maximum kernel size. + kernel_anchor (Tuple[int, int]): Kernel anchor, use (-1,-1) to indicate kernel center. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -192,18 +192,18 @@ void ExportOpAverageBlur(py::module &m) m.def("averageblur_into", &AverageBlurVarShapeInto, "dst"_a, "src"_a, "max_kernel_size"_a, "kernel_size"_a, "kernel_anchor"_a, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.averageblur_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, kernel_size: Tuple [int,int], kernel_anchor: Tuple [int,int], border: NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT > , stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.averageblur_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, kernel_size: Tuple[int, int], kernel_anchor: Tuple[int, int], border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) Executes the AverageBlur operation with a variable shape tensors on the given cuda stream. Args: - dst (ImageBatchVarShape): Output containing one or more images. - src (ImageBatchVarShape): Input containing one or more images. - max_kernel_size (Tuple [int,int]): Specifies the maximum size of the blur kernel. - kernel_size (Tuple [int,int]): Specifies the size of the blur kernel within the maximum kernel size. - kernel_anchor (Tuple [int,int]): Kernel anchor, use (-1,-1) to indicate kernel center. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.ImageBatchVarShape): Output containing one or more images. + src (nvcv.ImageBatchVarShape): Input containing one or more images. + max_kernel_size (Tuple[int, int]): Specifies the maximum size of the blur kernel. + kernel_size (Tuple[int, int]): Specifies the size of the blur kernel within the maximum kernel size. + kernel_anchor (Tuple[int, int]): Kernel anchor, use (-1,-1) to indicate kernel center. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpBilateralFilter.cpp b/python/mod_cvcuda/OpBilateralFilter.cpp index 8e844351d..e5d60d5a2 100644 --- a/python/mod_cvcuda/OpBilateralFilter.cpp +++ b/python/mod_cvcuda/OpBilateralFilter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -108,7 +108,7 @@ void ExportOpBilateralFilter(py::module &m) m.def("bilateral_filter", &BilateralFilter, "src"_a, "diameter"_a, "sigma_color"_a, "sigma_space"_a, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.bilateral_filter(src: nvcv.Tensor, diameter: int, sigma_color: float, sigma_space: float, border:NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT > , stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.bilateral_filter(src: nvcv.Tensor, diameter: int, sigma_color: float, sigma_space: float, border:cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Bilateral Filter operation on the given cuda stream. @@ -117,15 +117,15 @@ void ExportOpBilateralFilter(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. + src (nvcv.Tensor): Input tensor containing one or more images. diameter (int): Bilateral filter diameter. sigma_color (float): Gaussian exponent for color difference. sigma_space (float): Gaussian exponent for position difference. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -136,7 +136,7 @@ void ExportOpBilateralFilter(py::module &m) "sigma_space"_a, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.bilateral_filter_into(dst: nvcv.Tensor, src: nvcv.Tensor, diameter: int, sigma_color: float, sigma_space: float, border:NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.bilateral_filter_into(dst: nvcv.Tensor, src: nvcv.Tensor, diameter: int, sigma_color: float, sigma_space: float, border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) Executes the Bilateral Filter operation on the given cuda stream. @@ -145,13 +145,13 @@ void ExportOpBilateralFilter(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. diameter (int): Bilateral filter diameter. sigma_color (float): Gaussian exponent for color difference. sigma_space (float): Gaussian exponent for position difference. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -164,7 +164,7 @@ void ExportOpBilateralFilter(py::module &m) m.def("bilateral_filter", &VarShapeBilateralFilter, "src"_a, "diameter"_a, "sigma_color"_a, "sigma_space"_a, py::kw_only(), "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "stream"_a = nullptr, R"pbdoc( - cvcuda.bilateral_filter(src: nvcv.ImageBatchVarShape, diameter: nvcv.Tensor, sigma_color: nvcv.Tensor, sigma_space: nvcv.Tensor, border:NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT > , stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.bilateral_filter(src: nvcv.ImageBatchVarShape, diameter: nvcv.Tensor, sigma_color: nvcv.Tensor, sigma_space: nvcv.Tensor, border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Bilateral Filter operation on the given cuda stream. @@ -173,15 +173,15 @@ void ExportOpBilateralFilter(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input tensor containing one or more images. - diameter (Tensor): Bilateral filter diameters in each image. - sigma_color (Tensor): Gaussian exponents for color difference in each image. - sigma_space (Tensor): Gaussian exponents for position difference in each image. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input tensor containing one or more images. + diameter (nvcv.Tensor): Bilateral filter diameters in each image. + sigma_color (nvcv.Tensor): Gaussian exponents for color difference in each image. + sigma_space (nvcv.Tensor): Gaussian exponents for position difference in each image. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -192,7 +192,7 @@ void ExportOpBilateralFilter(py::module &m) "sigma_space"_a, py::kw_only(), "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "stream"_a = nullptr, R"pbdoc( - cvcuda.bilateral_filter_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, diameter: nvcv.Tensor, sigma_color: nvcv.Tensor, sigma_space: nvcv.Tensor, border:NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT > , stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.bilateral_filter_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, diameter: nvcv.Tensor, sigma_color: nvcv.Tensor, sigma_space: nvcv.Tensor, border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) Executes the Bilateral Filter operation on the given cuda stream. @@ -201,13 +201,13 @@ void ExportOpBilateralFilter(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - src (ImageBatchVarShape): Input image batch containing one or more images. - diameter (Tensor): Bilateral filter diameters in each image. - sigma_color (Tensor): Gaussian exponents for color difference in each image. - sigma_space (Tensor): Gaussian exponents for position difference in each image. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + diameter (nvcv.Tensor): Bilateral filter diameters in each image. + sigma_color (nvcv.Tensor): Gaussian exponents for color difference in each image. + sigma_space (nvcv.Tensor): Gaussian exponents for position difference in each image. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpBndBox.cpp b/python/mod_cvcuda/OpBndBox.cpp index 1551832f7..7688cdfbb 100644 --- a/python/mod_cvcuda/OpBndBox.cpp +++ b/python/mod_cvcuda/OpBndBox.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -67,12 +67,12 @@ void ExportOpBndBox(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - bboxes (NVCVBndBoxesI): Bounding boxes in reference to the input tensor. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + bboxes (cvcuda.BndBoxesI): Bounding boxes in reference to the input tensor. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -88,10 +88,10 @@ void ExportOpBndBox(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - bboxes (NVCVBndBoxesI): Bounding boxes in reference to the input tensor. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + bboxes (cvcuda.BndBoxesI): Bounding boxes in reference to the input tensor. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpBoxBlur.cpp b/python/mod_cvcuda/OpBoxBlur.cpp index 2c1b21dab..13a6735fd 100644 --- a/python/mod_cvcuda/OpBoxBlur.cpp +++ b/python/mod_cvcuda/OpBoxBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -66,12 +66,12 @@ void ExportOpBoxBlur(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - bboxes (NVCVBlurBoxesI): Blur boxes in reference to the input tensor. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + bboxes (cvcuda.BlurBoxesI): Blur boxes in reference to the input tensor. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -87,10 +87,10 @@ void ExportOpBoxBlur(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - bboxes (NVCVBlurBoxesI): Blur boxes in reference to the input tensor. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + bboxes (cvcuda.BlurBoxesI): Blur boxes in reference to the input tensor. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpBrightnessContrast.cpp b/python/mod_cvcuda/OpBrightnessContrast.cpp index f0c106dd9..cc0c7a6a1 100644 --- a/python/mod_cvcuda/OpBrightnessContrast.cpp +++ b/python/mod_cvcuda/OpBrightnessContrast.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -134,7 +134,32 @@ void ExportOpBrightnessContrast(py::module &m) cvcuda.brightness_contrast(src: nvcv.Tensor, brightness: nvcv.Tensor, contrast: nvcv.Tensor, brightness_shift: nvcv.Tensor, contrast_center: nvcv.Tensor, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Adjusts the brightness and contrast of the images according to the formula: - `out = brightness_shift + brightness * (contrast_center + contrast * (in - contrast_center))`. + ``out = brightness_shift + brightness * (contrast_center + contrast * (in - contrast_center))``. + + See also: + Refer to the CV-CUDA C API reference for the BrightnessContrast operator + for more details and usage examples. + + Args: + src (nvcv.Tensor): Input tensor. + brightness (nvcv.Tensor, optional): Optional tensor describing brightness multiplier. + If specified, it must contain only 1 element. If not specified, the neutral ``1.`` + is used. + contrast (nvcv.Tensor, optional): Optional tensor describing contrast multiplier. + If specified, it must contain only 1 element. If not specified, the neutral ``1.`` + is used. + brightness_shift (nvcv.Tensor, optional): Optional tensor describing brightness shift. + If specified, it must contain only 1 element. If not specified, the neutral ``0.`` + is used. + contrast_center (nvcv.Tensor, optional): Optional tensor describing contrast center. + If specified, it must contain only 1 element. If not specified, the middle of the + assumed input type range is used. For floats it is ``0.5``, for unsigned integer + types it is ``2 * (number_of_bits - 1)``, for signed integer types it is + ``2 * (number_of_bits - 2)``. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + + Returns: + nvcv.Tensor: The output tensor. )pbdoc"); m.def("brightness_contrast_into", &BrightnessContrastInto, "dst"_a, "src"_a, "brightness"_a = nullptr, "contrast"_a = nullptr, "brightness_shift"_a = nullptr, "contrast_center"_a = nullptr, py::kw_only(), @@ -144,7 +169,33 @@ void ExportOpBrightnessContrast(py::module &m) cvcuda.brightness_contrast_into(dst: nvcv.Tensor, src: nvcv.Tensor, brightness: nvcv.Tensor, contrast: nvcv.Tensor, brightness_shift: nvcv.Tensor, contrast_center: nvcv.Tensor, stream: Optional[nvcv.cuda.Stream] = None) Adjusts the brightness and contrast of the images according to the formula: - `out = brightness_shift + brightness * (contrast_center + contrast * (in - contrast_center))`. + ``out = brightness_shift + brightness * (contrast_center + contrast * (in - contrast_center))``. + + See also: + Refer to the CV-CUDA C API reference for the BrightnessContrast operator + for more details and usage examples. + + Args: + src (nvcv.Tensor): Input tensor. + dst (nvcv.Tensor): Output tensor containing the result of the operation. + brightness (nvcv.Tensor, optional): Optional tensor describing brightness multiplier. + If specified, it must contain only 1 element. If not specified, the neutral ``1.`` + is used. + contrast (nvcv.Tensor, optional): Optional tensor describing contrast multiplier. + If specified, it must contain only 1 element. If not specified, the neutral ``1.`` + is used. + brightness_shift (nvcv.Tensor, optional): Optional tensor describing brightness shift. + If specified, it must contain only 1 element. If not specified, the neutral ``0.`` + is used. + contrast_center (nvcv.Tensor, optional): Optional tensor describing contrast center. + If specified, it must contain only 1 element. If not specified, the middle of the + assumed input type range is used. For floats it is ``0.5``, for unsigned integer + types it is ``2 * (number_of_bits - 1)``, for signed integer types it is + ``2 * (number_of_bits - 2)``. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + + Returns: + None )pbdoc"); // VarShape variants @@ -155,10 +206,41 @@ void ExportOpBrightnessContrast(py::module &m) cvcuda.brightness_contrast(src: nvcv.ImageBatchVarShape, brightness: nvcv.Tensor, contrast: nvcv.Tensor, brightness_shift: nvcv.Tensor, contrast_center: nvcv.Tensor, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Adjusts the brightness and contrast of the images according to the formula: - `out = brightness_shift + brightness * (contrast_center + contrast * (in - contrast_center))`. + ``out = brightness_shift + brightness * (contrast_center + contrast * (in - contrast_center))``. The brightness/brightness_shift/contrast/contrast_center tensors' length must match the number of samples in the batch. + + + See also: + Refer to the CV-CUDA C API reference for the BrightnessContrast operator + for more details and usage examples. + + Args: + src (nvcv.ImageBatchVarShape): Input tensor. + brightness (nvcv.Tensor, optional): Optional tensor describing brightness multiplier. + If specified, it must contain 1 or N elements where N is the number of input + images. If it contains a single element, the same value is used for all input + images. If not specified, the neutral ``1.`` is used. + contrast (nvcv.Tensor, optional): Optional tensor describing contrast multiplier. + If specified, it must contain either 1 or N elements where N is the number of + input images. If it contains a single element, the same value is used for all + input images. If not specified, the neutral ``1.`` is used. + brightness_shift (nvcv.Tensor, optional): Optional tensor describing brightness shift. + If specified, it must contain either 1 or N elements where N is the number of + input images. If it contains a single element, the same value is used for all + input images. If not specified, the neutral ``0.`` is used. + contrast_center (nvcv.Tensor, optional): Optional tensor describing contrast center. + If specified, it must contain either 1 or N elements where N is the number of input + images. If it contains a single element, the same value is used for all input + images. If not specified, the middle of the assumed input type range is used. For + floats it is ``0.5``, for unsigned integer types it is + ``2 * (number_of_bits - 1)``, for signed integer types it is + ``2 * (number_of_bits - 2)``. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + + Returns: + nvcv.ImageBatchVarShape: The output image batch. )pbdoc"); m.def("brightness_contrast_into", &VarShapeBrightnessContrastInto, "dst"_a, "src"_a, "brightness"_a = nullptr, "contrast"_a = nullptr, "brightness_shift"_a = nullptr, "contrast_center"_a = nullptr, py::kw_only(), @@ -168,7 +250,42 @@ void ExportOpBrightnessContrast(py::module &m) cvcuda.brightness_contrast_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, brightness: nvcv.Tensor, contrast: nvcv.Tensor, brightness_shift: nvcv.Tensor, contrast_center: nvcv.Tensor, stream: Optional[nvcv.cuda.Stream] = None) Adjusts the brightness and contrast of the images according to the formula: - `out = brightness_shift + brightness * (contrast_center + contrast * (in - contrast_center))`. + ``out = brightness_shift + brightness * (contrast_center + contrast * (in - contrast_center))``. + + The brightness/brightness_shift/contrast/contrast_center tensors' length must match the + number of samples in the batch. + + + See also: + Refer to the CV-CUDA C API reference for the BrightnessContrast operator + for more details and usage examples. + + Args: + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + brightness (nvcv.ImageBatchVarShape, optional): Optional tensor describing brightness multiplier. + If specified, it must contain 1 or N elements where N is the number of input + images. If it contains a single element, the same value is used for all input + images. If not specified, the neutral ``1.`` is used. + contrast (nvcv.Tensor, optional): Optional tensor describing contrast multiplier. + If specified, it must contain either 1 or N elements where N is the number of + input images. If it contains a single element, the same value is used for all + input images. If not specified, the neutral ``1.`` is used. + brightness_shift (nvcv.Tensor, optional): Optional tensor describing brightness shift. + If specified, it must contain either 1 or N elements where N is the number of + input images. If it contains a single element, the same value is used for all + input images. If not specified, the neutral ``0.`` is used. + contrast_center (nvcv.Tensor, optional): Optional tensor describing contrast center. + If specified, it must contain either 1 or N elements where N is the number of input + images. If it contains a single element, the same value is used for all input + images. If not specified, the middle of the assumed input type range is used. For + floats it is ``0.5``, for unsigned integer types it is + ``2 * (number_of_bits - 1)``, for signed integer types it is + ``2 * (number_of_bits - 2)``. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + + Returns: + None )pbdoc"); } diff --git a/python/mod_cvcuda/OpCenterCrop.cpp b/python/mod_cvcuda/OpCenterCrop.cpp index 259928511..537d33c5f 100644 --- a/python/mod_cvcuda/OpCenterCrop.cpp +++ b/python/mod_cvcuda/OpCenterCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,8 +22,8 @@ #include #include #include +#include #include -#include #include #include #include @@ -94,12 +94,12 @@ void ExportOpCenterCrop(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - crop_size (Tuple [int,int]): Crop size in width and height. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + crop_size (Tuple[int, int]): Crop size in width and height. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -116,10 +116,10 @@ void ExportOpCenterCrop(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - crop_size (Tuple [int,int]): Crop size in width and height. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + crop_size (Tuple[int, int]): Crop size in width and height. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpChannelReorder.cpp b/python/mod_cvcuda/OpChannelReorder.cpp index 653dd359e..6e677cc70 100644 --- a/python/mod_cvcuda/OpChannelReorder.cpp +++ b/python/mod_cvcuda/OpChannelReorder.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -85,15 +85,15 @@ void ExportOpChannelReorder(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input tensor containing one or more images. - order(Tensor): 2D tensor with layout "NC" which specifies, for each output image sample in the batch, + src (nvcv.ImageBatchVarShape): Input tensor containing one or more images. + order (nvcv.Tensor): 2D tensor with layout "NC" which specifies, for each output image sample in the batch, the index of the input channel to copy to the output channel. - format(ImageFormat): Format of the destination image. + format (nvcv.Format): Format of the destination image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -110,13 +110,13 @@ void ExportOpChannelReorder(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output tensor to store the result of the operation. - src (ImageBatchVarShape): Input tensor containing one or more images. - order(Tensor): 2D tensor with layout "NC" which specifies, for each output image sample in the batch, + dst (nvcv.ImageBatchVarShape): Output tensor to store the result of the operation. + src (nvcv.ImageBatchVarShape): Input tensor containing one or more images. + order (nvcv.Tensor): 2D tensor with layout "NC" which specifies, for each output image sample in the batch, the index of the input channel to copy to the output channel. - format(ImageFormat): Format of the destination image. + format (nvcv.Format): Format of the destination image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpColorTwist.cpp b/python/mod_cvcuda/OpColorTwist.cpp index 54c44404e..84c52dc80 100644 --- a/python/mod_cvcuda/OpColorTwist.cpp +++ b/python/mod_cvcuda/OpColorTwist.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -98,21 +98,101 @@ void ExportOpColorTwist(py::module &m) m.def("color_twist", &ColorTwistMatrix, "src"_a, "twist"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - Transforms batch of images by applying affine transformation to channels extent. - The twist should be 2D tensor describing 3x4 affine transformation matrix or 3D tensor specifying - separate transformations for each sample in the input batch. + + cvcuda.color_twist(src: nvcv.Tensor, twist: nvcv.Tensor, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + + Transforms an image by applying affine transformation to the channels extent. + + See Also: + Refer to the CV-CUDA C API reference for the ColorTwist operator for more details and + usage examples. + + Args: + src (nvcv.Tensor): Tensor corresponding to the input image. It must have + either 3 or 4 channels. In the case of 4 channels, the alpha channel is + unmodified. + twist (nvcv.Tensor): A 2D tensor describing a 3x4 affine transformation matrix. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + + Returns: + nvcv.Tensor: The output tensor. + )pbdoc"); + m.def("color_twist_into", &ColorTwistMatrixInto, "dst"_a, "src"_a, "twist"_a, py::kw_only(), "stream"_a = nullptr, + R"pbdoc( + + cvcuda.color_twist_into(dst: nvcv.Tensor, src: nvcv.Tensor, twist: nvcv.Tensor, stream: Optional[nvcv.cuda.Stream] = None) + + Transforms an image by applying affine transformation to the channels extent. + + See Also: + Refer to the CV-CUDA C API reference for the ColorTwist operator for more details and + usage examples. + + Args: + dst (nvcv.Tensor): Tensor corresponding to the output image. Must match the shape of + the input image. + src (nvcv.Tensor): Tensor corresponding to the input image. It must have + either 3 or 4 channels. In the case of 4 channels, the alpha channel is + unmodified. + twist (nvcv.Tensor): A 2D tensor describing a 3x4 affine transformation matrix. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + + Returns: + None )pbdoc"); - m.def("color_twist_into", &ColorTwistMatrixInto, "dst"_a, "src"_a, "twist"_a, py::kw_only(), "stream"_a = nullptr); // VarShape variants m.def("color_twist", &VarShapeColorTwistMatrix, "src"_a, "twist"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - Transforms batch of images by applying affine transformation to channels extent. - The twist should be 2D tensor describing 3x4 affine transformation matrix or 3D tensor specifying - separate transformations for each sample in the input tensor. + + cvcuda.color_twist(src: nvcv.ImageBatchVarShape, twist: nvcv.Tensor, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + + Transforms a batch of images by applying affine transformation to the channels extent. + + See Also: + Refer to the CV-CUDA C API reference for the ColorTwist operator for more details and + usage examples. + + Args: + src (nvcv.ImageBatchVarShape): Input image batch. Each image must have either 3 or 4 + channels. In the case of 4 channels the alpha channel is unmodified. + twist (nvcv.Tensor): A 3x4 2D tensor describing an affine transformation matrix or a + Nx3x4 3D tensor specifying separate transformations for each sample in the input + image batch. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + + Returns: + nvcv.ImageBatchVarShape: The output image batch. + )pbdoc"); m.def("color_twist_into", &VarShapeColorTwistMatrixInto, "dst"_a, "src"_a, "twist"_a, py::kw_only(), - "stream"_a = nullptr); + "stream"_a = nullptr, + R"pbdoc( + + cvcuda.color_twist_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, twist: nvcv.Tensor, stream: Optional[nvcv.cuda.Stream] = None) + + Transforms a batch of images by applying affine transformation to the channels extent. + + The twist should be a 2D tensor describing 3x4 affine transformation matrix or a 3D tensor specifying + separate transformations for each sample in the input image batch. + + See Also: + Refer to the CV-CUDA C API reference for the ColorTwist operator for more details and + usage examples. + + Args: + dst (nvcv.ImageBatchVarShape): Output image batch. The shapes of the output images + must match the input image batch. + src (nvcv.ImageBatchVarShape): Input image batch. Each image must have either 3 or 4 + channels. In the case of 4 channels the alpha channel is unmodified. + twist (nvcv.Tensor): A 3x4 2D tensor describing an affine transformation matrix or an + Nx3x4 3D tensor specifying separate transformations for each sample in the input + image batch. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + + Returns: + None + )pbdoc"); } } // namespace cvcudapy diff --git a/python/mod_cvcuda/OpComposite.cpp b/python/mod_cvcuda/OpComposite.cpp index 935ff4556..9dfe6cf5a 100644 --- a/python/mod_cvcuda/OpComposite.cpp +++ b/python/mod_cvcuda/OpComposite.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -109,7 +109,7 @@ void ExportOpComposite(py::module &m) m.def("composite", &Composite, "foreground"_a, "background"_a, "fgmask"_a, "outchannels"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.composite(foreground: nvcv.Tensor, background: nvcv.Tensor, fgmask : nvcv.Tensor, outchannels: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.composite(foreground: nvcv.Tensor, background: nvcv.Tensor, fgmask: nvcv.Tensor, outchannels: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Composite operation on the given cuda stream. @@ -118,14 +118,14 @@ void ExportOpComposite(py::module &m) for more details and usage examples. Args: - foreground (Tensor): Input tensor containing one or more foreground images. Each image is BGR (3-channel) 8-bit. - background (Tensor): Input tensor containing one or more background images. Each image is BGR (3-channel) 8-bit. - fgmask(Tensor): Input foreground mask tensor. Each mask image is grayscale 8-bit - outchannels(int): Specifies 3 channel for RGB and 4 channel for BGRA. - stream (Stream, optional): CUDA Stream on which to perform the operation. + foreground (nvcv.Tensor): Input tensor containing one or more foreground images. Each image is BGR (3-channel) 8-bit. + background (nvcv.Tensor): Input tensor containing one or more background images. Each image is BGR (3-channel) 8-bit. + fgmask (nvcv.Tensor): Input foreground mask tensor. Each mask image is grayscale 8-bit + outchannels (int): Specifies 3 channel for RGB and 4 channel for BGRA. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -135,7 +135,7 @@ void ExportOpComposite(py::module &m) m.def("composite_into", &CompositeInto, "dst"_a, "foreground"_a, "background"_a, "fgmask"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.composite_into(dst: nvcv.Tensor, foreground: nvcv.Tensor, background: nvcv.Tensor, fgmask : nvcv.Tensor, outchannels: int, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.composite_into(dst: nvcv.Tensor, foreground: nvcv.Tensor, background: nvcv.Tensor, fgmask: nvcv.Tensor, outchannels: int, stream: Optional[nvcv.cuda.Stream] = None) Executes the Composite operation on the given cuda stream. @@ -144,11 +144,11 @@ void ExportOpComposite(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - foreground (Tensor): Input tensor containing one or more foreground images. Each image is BGR (3-channel) 8-bit. - background (Tensor): Input tensor containing one or more background images. Each image is BGR (3-channel) 8-bit. - fgmask(Tensor): Input foreground mask tensor. Each mask image is grayscale 8-bit. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + foreground (nvcv.Tensor): Input tensor containing one or more foreground images. Each image is BGR (3-channel) 8-bit. + background (nvcv.Tensor): Input tensor containing one or more background images. Each image is BGR (3-channel) 8-bit. + fgmask (nvcv.Tensor): Input foreground mask tensor. Each mask image is grayscale 8-bit. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -161,7 +161,7 @@ void ExportOpComposite(py::module &m) m.def("composite", &CompositeVarShape, "foreground"_a, "background"_a, "fgmask"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.composite(foreground: nvcv.ImageBatchVarShape, background: nvcv.ImageBatchVarShape, fgmask : nvcv.ImageBatchVarShape, outchannels: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.composite(foreground: nvcv.ImageBatchVarShape, background: nvcv.ImageBatchVarShape, fgmask: nvcv.ImageBatchVarShape, outchannels: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Composite operation on the given cuda stream. @@ -170,13 +170,13 @@ void ExportOpComposite(py::module &m) for more details and usage examples. Args: - foreground (ImageBatchVarShape): Input tensor containing one or more foreground images. Each image is BGR (3-channel) 8-bit. - background (ImageBatchVarShape): Input tensor containing one or more background images. Each image is BGR (3-channel) 8-bit. - fgmask(ImageBatchVarShape): Input foreground mask image batch. Each mask image is grayscale 8-bit. - stream (Stream, optional): CUDA Stream on which to perform the operation. + foreground (nvcv.ImageBatchVarShape): Input tensor containing one or more foreground images. Each image is BGR (3-channel) 8-bit. + background (nvcv.ImageBatchVarShape): Input tensor containing one or more background images. Each image is BGR (3-channel) 8-bit. + fgmask (nvcv.ImageBatchVarShape): Input foreground mask image batch. Each mask image is grayscale 8-bit. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -186,7 +186,7 @@ void ExportOpComposite(py::module &m) m.def("composite_into", &CompositeVarShapeInto, "dst"_a, "foreground"_a, "background"_a, "fgmask"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.composite_into(dst: nvcv.ImageBatchVarShape, foreground: nvcv.ImageBatchVarShape, background: nvcv.ImageBatchVarShape, fgmask : nvcv.ImageBatchVarShape, outchannels: int, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.composite_into(dst: nvcv.ImageBatchVarShape, foreground: nvcv.ImageBatchVarShape, background: nvcv.ImageBatchVarShape, fgmask: nvcv.ImageBatchVarShape, outchannels: int, stream: Optional[nvcv.cuda.Stream] = None) Executes the Composite operation on the given cuda stream. @@ -195,11 +195,11 @@ void ExportOpComposite(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - foreground (ImageBatchVarShape): Input tensor containing one or more foreground images. Each image is BGR (3-channel) 8-bit. - background (ImageBatchVarShape): Input tensor containing one or more background images. Each image is BGR (3-channel) 8-bit. - fgmask(ImageBatchVarShape): Input foreground mask image batch. Each mask image is grayscale 8-bit. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + foreground (nvcv.ImageBatchVarShape): Input tensor containing one or more foreground images. Each image is BGR (3-channel) 8-bit. + background (nvcv.ImageBatchVarShape): Input tensor containing one or more background images. Each image is BGR (3-channel) 8-bit. + fgmask (nvcv.ImageBatchVarShape): Input foreground mask image batch. Each mask image is grayscale 8-bit. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpConv2D.cpp b/python/mod_cvcuda/OpConv2D.cpp index 41d6f64c2..7b7ac4387 100644 --- a/python/mod_cvcuda/OpConv2D.cpp +++ b/python/mod_cvcuda/OpConv2D.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -85,16 +85,16 @@ void ExportOpConv2D(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - kernel(Tensor): Convolution kernels (one for each batch image) to be used. Each image width and height + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + kernel (nvcv.Tensor): Convolution kernels (one for each batch image) to be used. Each image width and height correspond to the kernel width and height. (must be float) - kernel_anchor(Tensor): 1D Tensor with the anchor of each kernel (one for each batch image). The anchor (x, y) + kernel_anchor (nvcv.Tensor): 1D Tensor with the anchor of each kernel (one for each batch image). The anchor (x, y) indicates the relative position of a filtered point within the kernel. (-1, -1) means that the anchor is at the kernel center. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -111,14 +111,14 @@ void ExportOpConv2D(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output image batch to store the result of the operation. - src (ImageBatchVarShape): Input image batch containing one or more images. - kernel(Tensor): Convolution kernels (one for each batch image) to be used. Each image width and height + dst (nvcv.ImageBatchVarShape): Output image batch to store the result of the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + kernel (nvcv.Tensor): Convolution kernels (one for each batch image) to be used. Each image width and height correspond to the kernel width and height. (must be float) - kernel_anchor(Tensor): 1D Tensor with the anchor of each kernel (one for each batch image). The anchor (x, y) + kernel_anchor (nvcv.Tensor): 1D Tensor with the anchor of each kernel (one for each batch image). The anchor (x, y) indicates the relative position of a filtered point within the kernel. (-1, -1) means that the anchor is at the kernel center. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpConvertTo.cpp b/python/mod_cvcuda/OpConvertTo.cpp index 767c54fcd..e6c017ded 100644 --- a/python/mod_cvcuda/OpConvertTo.cpp +++ b/python/mod_cvcuda/OpConvertTo.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -68,13 +68,13 @@ void ExportOpConvertTo(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - scale (float): Scalar for output data. - offset (float): Offset for the data. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + scale (float, optional): Scalar for output data. + offset (float, optional): Offset for the data. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -91,11 +91,11 @@ void ExportOpConvertTo(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - scale (float): Scalar for output data. - offset (float): Offset for the data. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + scale (float, optional): Scalar for output data. + offset (float, optional): Offset for the data. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpCopyMakeBorder.cpp b/python/mod_cvcuda/OpCopyMakeBorder.cpp index 8a3075699..5ef960d3e 100644 --- a/python/mod_cvcuda/OpCopyMakeBorder.cpp +++ b/python/mod_cvcuda/OpCopyMakeBorder.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,8 +21,8 @@ #include #include #include +#include #include -#include #include #include #include @@ -204,7 +204,7 @@ void ExportOpCopyMakeBorder(py::module &m) "border_value"_a = std::vector(), py::kw_only(), "top"_a, "bottom"_a, "left"_a, "right"_a, "stream"_a = nullptr, R"pbdoc( - cvcuda.copymakeborder(src: nvcv.Tensor, border_mode : NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, border_value : float array , top : int, bottom : int, right : int , stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.copymakeborder(src: nvcv.Tensor, border_mode: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: List[float], top: int, bottom: int, right: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Copy Make Border operation on the given cuda stream. @@ -213,18 +213,18 @@ void ExportOpCopyMakeBorder(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - border_mode(NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - border_value(float array, optional): Border value to be used for constant border mode, each element of the array corresponds to the + src (nvcv.Tensor): Input tensor containing one or more images. + border_mode (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + border_value (List[float], optional): Border value to be used for constant border mode, each element of the array corresponds to the image color channel must be a size <= 4 and dim of 1, where the values specify the border color for each color channel. top (int): The top pixel position. left (int): The left pixel position. bottom (int): The bottom pixel position. right (int): The right pixel position. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -235,7 +235,7 @@ void ExportOpCopyMakeBorder(py::module &m) "border_mode"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "border_value"_a = std::vector(), py::kw_only(), "top"_a, "left"_a, "stream"_a = nullptr, R"pbdoc( - cvcuda.copymakeborder_into(dst: nvcv.Tensor, src: nvcv.Tensor, border_mode : NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, border_value : float array, top : int, bottom : int, right : int , stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.copymakeborder_into(dst: nvcv.Tensor, src: nvcv.Tensor, border_mode: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: List[float], top: int, bottom: int, right: int, stream: Optional[nvcv.cuda.Stream] = None) Executes the Copy Make Border operation on the given cuda stream. @@ -244,14 +244,14 @@ void ExportOpCopyMakeBorder(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - border_mode(NVCVBorderType): Border mode to be used when accessing elements outside input image. - border_value(float array, optional): Border value to be used for constant border mode, each element of the array corresponds to the + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + border_mode (cvcuda.Border): Border mode to be used when accessing elements outside input image. + border_value (List[float], optional): Border value to be used for constant border mode, each element of the array corresponds to the image color channel must be a size <= 4 and dim of 1, where the values specify the border color for each color channel. top (int): The top pixel position. left (int): The left pixel position. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -265,7 +265,7 @@ void ExportOpCopyMakeBorder(py::module &m) "border_mode"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "border_value"_a = std::vector(), py::kw_only(), "top"_a, "left"_a, "out_height"_a, "out_width"_a, "stream"_a = nullptr, R"pbdoc( - cvcuda.copymakeborderstack(src: nvcv.ImageBatchVarShape, border_mode : NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, border_value : float array, top : int, bottom : int, right : int , stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.copymakeborderstack(src: nvcv.ImageBatchVarShape, border_mode: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: List[float], top: int, bottom: int, right: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Copy Make Border Stack operation on the given cuda stream. @@ -274,18 +274,18 @@ void ExportOpCopyMakeBorder(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - border_mode(NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - border_value(float array, optional): Border value to be used for constant border mode, each element of the array corresponds to the + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + border_mode (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + border_value (List[float], optional): Border value to be used for constant border mode, each element of the array corresponds to the image color channel must be a size <= 4 and dim of 1, where the values specify the border color for each color channel. - top (Tensor): The top pixel position for each image. - left (Tensor): The left pixel position for each image. + top (nvcv.Tensor): The top pixel position for each image. + left (nvcv.Tensor): The left pixel position for each image. out_height (int): The height of the output. out_width (int): The width of the output. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output images. + nvcv.Tensor: The output images. Caution: Restrictions to several arguments may apply. Check the C @@ -296,7 +296,7 @@ void ExportOpCopyMakeBorder(py::module &m) "border_mode"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "border_value"_a = std::vector(), py::kw_only(), "top"_a, "left"_a, "stream"_a = nullptr, R"pbdoc( - cvcuda.copymakeborderstack_into(dst: nvcv.Tensor, src: nvcv.ImageBatchVarShape, border_mode : NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, border_value : float array, top : int, bottom : int, right : int , stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.copymakeborderstack_into(dst: nvcv.Tensor, src: nvcv.ImageBatchVarShape, border_mode: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: List[float], top: int, bottom: int, right: int, stream: Optional[nvcv.cuda.Stream] = None) Executes the Copy Make Border Stack operation on the given cuda stream. @@ -305,16 +305,16 @@ void ExportOpCopyMakeBorder(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (ImageBatchVarShape): Input image batch containing one or more images. - border_mode(NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - border_value(float array, optional): Border value to be used for constant border mode, each element of the array corresponds to the + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + border_mode (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + border_value (List[float], optional): Border value to be used for constant border mode, each element of the array corresponds to the image color channel must be a size <= 4 and dim of 1, where the values specify the border color for each color channel. - top (Tensor): The top pixel position for each image. - left (Tensor): The left pixel position for each image. + top (nvcv.Tensor): The top pixel position for each image. + left (nvcv.Tensor): The left pixel position for each image. out_height (int): The height of the output. out_width (int): The width of the output. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -328,7 +328,7 @@ void ExportOpCopyMakeBorder(py::module &m) "border_value"_a = std::vector(), py::kw_only(), "top"_a, "left"_a, "out_heights"_a, "out_widths"_a, "stream"_a = nullptr, R"pbdoc( - cvcuda.copymakeborder(src: nvcv.ImageBatchVarShape, border_mode : NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, border_value : float array, top : int, bottom : int, right : int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.copymakeborder(src: nvcv.ImageBatchVarShape, border_mode: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: List[float], top: int, bottom: int, right: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Copy Make Border operation on the given cuda stream. @@ -337,18 +337,18 @@ void ExportOpCopyMakeBorder(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - border_mode(NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - border_value(float array, optional): Border value to be used for constant border mode, each element of the array corresponds to the + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + border_mode (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + border_value (List[float], optional): Border value to be used for constant border mode, each element of the array corresponds to the image color channel must be a size <= 4 and dim of 1, where the values specify the border color for each color channel. - top (Tensor): The top pixel position for each image. - left (Tensor): The left pixel position for each image. - out_heights (Tensor): The heights of each output image. - out_widths (Tensor): The widths of each output image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + top (nvcv.Tensor): The top pixel position for each image. + left (nvcv.Tensor): The left pixel position for each image. + out_heights (nvcv.Tensor): The heights of each output image. + out_widths (nvcv.Tensor): The widths of each output image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -359,7 +359,7 @@ void ExportOpCopyMakeBorder(py::module &m) "border_mode"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "border_value"_a = std::vector(), py::kw_only(), "top"_a, "left"_a, "stream"_a = nullptr, R"pbdoc( - cvcuda.copymakeborder_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, border_mode : NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, border_value : float array, top : int, bottom : int, right : int , stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.copymakeborder_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, border_mode: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: List[float], top: int, bottom: int, right: int, stream: Optional[nvcv.cuda.Stream] = None) Executes the Copy Make Border operation on the given cuda stream. @@ -368,16 +368,16 @@ void ExportOpCopyMakeBorder(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - src (ImageBatchVarShape): Input image batch containing one or more images. - border_mode(NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - border_value(float array, optional): Border value to be used for constant border mode, each element of the array corresponds to the + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + border_mode (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + border_value (List[float], optional): Border value to be used for constant border mode, each element of the array corresponds to the image color channel must be a size <= 4 and dim of 1, where the values specify the border color for each color channel. - top (Tensor): The top pixel position for each image. - left (Tensor): The left pixel position for each image. - out_heights (Tensor): The heights of each output image. - out_widths (Tensor): The widths of each output image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + top (nvcv.Tensor): The top pixel position for each image. + left (nvcv.Tensor): The left pixel position for each image. + out_heights (nvcv.Tensor): The heights of each output image. + out_widths (nvcv.Tensor): The widths of each output image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpCropFlipNormalizeReformat.cpp b/python/mod_cvcuda/OpCropFlipNormalizeReformat.cpp index f5f8af139..833fea5ec 100644 --- a/python/mod_cvcuda/OpCropFlipNormalizeReformat.cpp +++ b/python/mod_cvcuda/OpCropFlipNormalizeReformat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -95,27 +95,27 @@ void ExportOpCropFlipNormalizeReformat(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - out_shape (Shape): The shape of the output. - out_dtype (DataType): The data type of the output. - out_layout (TensorLayout): The layout of the output. - rect (Tensor): The crop rectangle tensor which has shape of [batch_size, 1, 1, 4] in reference to the input tensor. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + out_shape (tuple): The shape of the output. + out_dtype (numpy.dtype): The data type of the output. + out_layout (nvcv.TensorLayout): The layout of the output. + rect (nvcv.Tensor): The crop rectangle tensor which has shape of [batch_size, 1, 1, 4] in reference to the input tensor. The crop value of [crop_x, crop_y, crop_width, crop_height] stored in the final dimension of the crop tensor, provided per image. - flip_code (Tensor): A tensor flag to specify how to flip the array; 0 means flipping around the x-axis, + flip_code (nvcv.Tensor): A tensor flag to specify how to flip the array; 0 means flipping around the x-axis, 1 means flipping around the y-axis, -1 means flipping around both axes, and any other value will result in no flip, provided per image. - base (Tensor): Tensor providing base values for normalization. - scale (Tensor): Tensor providing scale values for normalization. - globalscale (float ,optional): Additional scale value to be used in addition to scale - globalshift (float ,optional): Additional bias value to be used in addition to base. - epsilon (float ,optional): Epsilon to use when CVCUDA_NORMALIZE_SCALE_IS_STDDEV flag is set as a regularizing term to be + base (nvcv.Tensor): Tensor providing base values for normalization. + scale (nvcv.Tensor): Tensor providing scale values for normalization. + globalscale (float, optional): Additional scale value to be used in addition to scale + globalshift (float, optional): Additional bias value to be used in addition to base. + epsilon (float, optional): Epsilon to use when CVCUDA_NORMALIZE_SCALE_IS_STDDEV flag is set as a regularizing term to be added to variance. - flags (int ,optional): Algorithm flags, use CVCUDA_NORMALIZE_SCALE_IS_STDDEV if scale passed as argument + flags (int, optional): Algorithm flags, use CVCUDA_NORMALIZE_SCALE_IS_STDDEV if scale passed as argument is standard deviation instead or 0 if it is scaling. - border (BorderType ,optional): Border mode to be used when accessing elements outside input image. - bvalue (float ,optional): Border value to be used for constant border mode NVCV_BORDER_CONSTANT. - stream (Stream, optional): CUDA Stream on which to perform the operation. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + bvalue (float, optional): Border value to be used for constant border mode cvcuda.Border.CONSTANT. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -137,25 +137,25 @@ void ExportOpCropFlipNormalizeReformat(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - src (ImageBatchVarShape): Input image batch containing one or more images. - rect (Tensor): The crop rectangle tensor which has shape of [batch_size, 1, 1, 4] in reference to the input tensor. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + rect (nvcv.Tensor): The crop rectangle tensor which has shape of [batch_size, 1, 1, 4] in reference to the input tensor. The crop value of [crop_x, crop_y, crop_width, crop_height] stored in the final dimension of the crop tensor, provided per image. - flip_code (Tensor): A tensor flag to specify how to flip the array; 0 means flipping around the x-axis, + flip_code (nvcv.Tensor): A tensor flag to specify how to flip the array; 0 means flipping around the x-axis, 1 means flipping around the y-axis, -1 means flipping around both axes, and any other value will result in no flip, provided per image. - base (Tensor): Tensor providing base values for normalization. - scale (Tensor): Tensor providing scale values for normalization. - globalscale (float ,optional): Additional scale value to be used in addition to scale - globalshift (float ,optional): Additional bias value to be used in addition to base. - epsilon (float ,optional): Epsilon to use when CVCUDA_NORMALIZE_SCALE_IS_STDDEV flag is set as a regularizing term to be + base (nvcv.Tensor): Tensor providing base values for normalization. + scale (nvcv.Tensor): Tensor providing scale values for normalization. + globalscale (float, optional): Additional scale value to be used in addition to scale + globalshift (float, optional): Additional bias value to be used in addition to base. + epsilon (float, optional): Epsilon to use when CVCUDA_NORMALIZE_SCALE_IS_STDDEV flag is set as a regularizing term to be added to variance. - flags (int ,optional): Algorithm flags, use CVCUDA_NORMALIZE_SCALE_IS_STDDEV if scale passed as argument + flags (int, optional): Algorithm flags, use CVCUDA_NORMALIZE_SCALE_IS_STDDEV if scale passed as argument is standard deviation instead or 0 if it is scaling. - border (BorderType ,optional): Border mode to be used when accessing elements outside input image. - bvalue (float ,optional): Border value to be used for constant border mode NVCV_BORDER_CONSTANT. - stream (Stream, optional): CUDA Stream on which to perform the operation. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + bvalue (float, optional): Border value to be used for constant border mode cvcuda.Border.CONSTANT. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpCustomCrop.cpp b/python/mod_cvcuda/OpCustomCrop.cpp index c448eccda..123646efe 100644 --- a/python/mod_cvcuda/OpCustomCrop.cpp +++ b/python/mod_cvcuda/OpCustomCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -97,12 +97,12 @@ void ExportOpCustomCrop(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - rect (RectI): Crop rectangle in reference to the input tensor. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + rect (nvcv.RectI): Crop rectangle in reference to the input tensor. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -118,10 +118,10 @@ void ExportOpCustomCrop(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - rect (RectI): Crop rectangle in reference to the input tensor. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + rect (nvcv.RectI): Crop rectangle in reference to the input tensor. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpCvtColor.cpp b/python/mod_cvcuda/OpCvtColor.cpp index 39118b477..161f087cb 100644 --- a/python/mod_cvcuda/OpCvtColor.cpp +++ b/python/mod_cvcuda/OpCvtColor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include @@ -56,25 +56,21 @@ Tensor CvtColorInto(Tensor &output, Tensor &input, NVCVColorConversionCode code, Tensor CvtColor(Tensor &input, NVCVColorConversionCode code, std::optional pstream) { - int ndim = input.shape().size(); - auto layout = input.layout(); - auto outFormat = GetOutputFormat(input.dtype(), code); - auto out_dtype = outFormat.planeDataType(0).channelType(0); - if (ndim < 3) + nvcv::ImageFormat outputFormat = GetOutputFormat(input.dtype(), code); + nvcv::TensorShape outputShape = GetOutputTensorShape(input.shape(), outputFormat, code); + nvcv::DataType outputDType = outputFormat.planeDataType(0).channelType(0); + +#ifndef NDEBUG + assert(outputFormat.numPlanes() == 1); + nvcv::DataType channelDType = outputFormat.planeDataType(0).channelType(0); + for (int c = 1; c < outputFormat.planeDataType(0).numChannels(); ++c) { - throw std::runtime_error("Invalid input tensor shape"); + assert(channelDType == outputFormat.planeDataType(0).channelType(c)); } +#endif + + Tensor output = Tensor::Create(outputShape, outputDType); - std::array shape_data; - for (int d = 0; d < ndim; d++) - { - if (layout[d] == 'C') - shape_data[d] = outFormat.numChannels(); - else - shape_data[d] = input.shape()[d]; - } - nvcv::TensorShape out_shape(shape_data.data(), ndim, layout); - Tensor output = Tensor::Create(out_shape, out_dtype); return CvtColorInto(output, input, code, pstream); } @@ -131,7 +127,7 @@ void ExportOpCvtColor(py::module &m) m.def("cvtcolor", &CvtColor, "src"_a, "code"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.cvtcolor(src: nvcv.Tensor, code : NVCVColorConversionCode, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.cvtcolor(src: nvcv.Tensor, code: cvcuda.ColorConversion, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the CVT Color operation on the given cuda stream. @@ -140,12 +136,12 @@ void ExportOpCvtColor(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - code (NVCVColorConversionCode): Code describing the desired color conversion. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + code (cvcuda.ColorConversion): Code describing the desired color conversion. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -154,7 +150,7 @@ void ExportOpCvtColor(py::module &m) m.def("cvtcolor_into", &CvtColorInto, "dst"_a, "src"_a, "code"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.cvtcolor_into(ds : nvcv.Tensor, src: nvcv.Tensor, code : NVCVColorConversionCode, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.cvtcolor_into(dst: nvcv.Tensor, src: nvcv.Tensor, code: cvcuda.ColorConversion, stream: Optional[nvcv.cuda.Stream] = None) Executes the CVT Color operation on the given cuda stream. @@ -163,10 +159,10 @@ void ExportOpCvtColor(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - code (NVCVColorConversionCode): Code describing the desired color conversion. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + code (cvcuda.ColorConversion): Code describing the desired color conversion. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -178,7 +174,7 @@ void ExportOpCvtColor(py::module &m) m.def("cvtcolor", &CvtColorVarShape, "src"_a, "code"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.cvtcolor(src: nvcv.ImageBatchVarShape, code : NVCVColorConversionCode, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.cvtcolor(src: nvcv.ImageBatchVarShape, code: cvcuda.ColorConversion, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the CVT Color operation on the given cuda stream. @@ -187,12 +183,12 @@ void ExportOpCvtColor(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - code (NVCVColorConversionCode): Code describing the desired color conversion. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + code (cvcuda.ColorConversion): Code describing the desired color conversion. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -202,7 +198,7 @@ void ExportOpCvtColor(py::module &m) m.def("cvtcolor_into", &CvtColorVarShapeInto, "dst"_a, "src"_a, "code"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.cvtcolor_into(dst : nvcv.ImageBatchVarShape , src: nvcv.ImageBatchVarShape, code : NVCVColorConversionCode, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.cvtcolor_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, code: cvcuda.ColorConversion, stream: Optional[nvcv.cuda.Stream] = None) Executes the CVT Color operation on the given cuda stream. @@ -211,10 +207,10 @@ void ExportOpCvtColor(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - code (NVCVColorConversionCode): Code describing the desired color conversion. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + code (cvcuda.ColorConversion): Code describing the desired color conversion. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpErase.cpp b/python/mod_cvcuda/OpErase.cpp index 7f7503e26..8e84fa216 100644 --- a/python/mod_cvcuda/OpErase.cpp +++ b/python/mod_cvcuda/OpErase.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -123,7 +123,7 @@ void ExportOpErase(py::module &m) m.def("erase", &Erase, "src"_a, "anchor"_a, "erasing"_a, "values"_a, "imgIdx"_a, py::kw_only(), "random"_a = false, "seed"_a = 0, "stream"_a = nullptr, R"pbdoc( - cvcuda.erase(src: nvcv.Tensor, anchor : nvcv.Tensor, erasing : nvcv.Tensor, values : nvcv.Tensor, imgIdx : nvcv.Tensor, random : int8, seed : int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.erase(src: nvcv.Tensor, anchor: nvcv.Tensor, erasing: nvcv.Tensor, values: nvcv.Tensor, imgIdx: nvcv.Tensor, random: int, seed: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Erase operation on the given cuda stream. @@ -132,20 +132,20 @@ void ExportOpErase(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - anchor (Tensor): anchor an array of size num_erasing_area that gives the + src (nvcv.Tensor): Input tensor containing one or more images. + anchor (nvcv.Tensor): anchor an array of size num_erasing_area that gives the x coordinate and y coordinate of the top left point in the eraseing areas. - erasing (Tensor): Eraisng an array of size num_erasing_area that gives the widths of the eraseing areas, + erasing (nvcv.Tensor): Eraisng an array of size num_erasing_area that gives the widths of the eraseing areas, the heights of the eraseing areas and integers in range 0-15, each of whose bits indicates whether or not the corresponding channel need to be erased. - values (Tensor): An array of size num_erasing_area*4 that gives the filling value for each erase area. - imgIdx (Tensor): An array of size num_erasing_area that maps a erase area idx to img idx in the batch. - random (int8 , optional): random an value for random op. - seed (int ,optional): seed random seed for random filling erase area. - stream (Stream, optional): CUDA Stream on which to perform the operation. + values (nvcv.Tensor): An array of size num_erasing_area*4 that gives the filling value for each erase area. + imgIdx (nvcv.Tensor): An array of size num_erasing_area that maps a erase area idx to img idx in the batch. + random (int, optional): 8-bit integer value for random op. + seed (int, optional): seed random seed for random filling erase area. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -155,7 +155,7 @@ void ExportOpErase(py::module &m) m.def("erase_into", &EraseInto, "dst"_a, "src"_a, "anchor"_a, "erasing"_a, "values"_a, "imgIdx"_a, py::kw_only(), "random"_a = false, "seed"_a = 0, "stream"_a = nullptr, R"pbdoc( - cvcuda.erase_into(dst: nvcv.Tensor, src: nvcv.Tensor, anchor : nvcv.Tensor, erasing : nvcv.Tensor, values : nvcv.Tensor, imgIdx : nvcv.Tensor, random : int8, seed : int, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.erase_into(dst: nvcv.Tensor, src: nvcv.Tensor, anchor: nvcv.Tensor, erasing: nvcv.Tensor, values: nvcv.Tensor, imgIdx: nvcv.Tensor, random: int, seed: int, stream: Optional[nvcv.cuda.Stream] = None) Executes the Erase operation on the given cuda stream. @@ -164,18 +164,18 @@ void ExportOpErase(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - anchor (Tensor): anchor an array of size num_erasing_area that gives the + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + anchor (nvcv.Tensor): anchor an array of size num_erasing_area that gives the x coordinate and y coordinate of the top left point in the eraseing areas. - erasing (Tensor): Eraisng an array of size num_erasing_area that gives the widths of the eraseing areas, + erasing (nvcv.Tensor): Eraisng an array of size num_erasing_area that gives the widths of the eraseing areas, the heights of the eraseing areas and integers in range 0-15, each of whose bits indicates whether or not the corresponding channel need to be erased. - values (Tensor): An array of size num_erasing_area*4 that gives the filling value for each erase area. - imgIdx (Tensor): An array of size num_erasing_area that maps a erase area idx to img idx in the batch. - random (int8 , optional): random an value for random op. - seed (int ,optional): seed random seed for random filling erase area. - stream (Stream, optional): CUDA Stream on which to perform the operation. + values (nvcv.Tensor): An array of size num_erasing_area*4 that gives the filling value for each erase area. + imgIdx (nvcv.Tensor): An array of size num_erasing_area that maps a erase area idx to img idx in the batch. + random (int, optional): 8-bit integer value for random op. + seed (int, optional): seed random seed for random filling erase area. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -188,7 +188,7 @@ void ExportOpErase(py::module &m) m.def("erase", &EraseVarShape, "src"_a, "anchor"_a, "erasing"_a, "values"_a, "imgIdx"_a, py::kw_only(), "random"_a = false, "seed"_a = 0, "stream"_a = nullptr, R"pbdoc( - cvcuda.erase(src: nvcv.ImageBatchVarShape, anchor : nvcv.Tensor, erasing : nvcv.Tensor, values : nvcv.Tensor, imgIdx : nvcv.Tensor, random : int8, seed : int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.erase(src: nvcv.ImageBatchVarShape, anchor: nvcv.Tensor, erasing: nvcv.Tensor, values: nvcv.Tensor, imgIdx: nvcv.Tensor, random: int, seed: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Erase operation on the given cuda stream. @@ -197,21 +197,21 @@ void ExportOpErase(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - anchor (Tensor): anchor an array of size num_erasing_area that gives the + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + anchor (nvcv.Tensor): anchor an array of size num_erasing_area that gives the x coordinate and y coordinate of the top left point in the eraseing areas. - erasing (Tensor): Eraisng an array of size num_erasing_area that gives the widths of the eraseing areas, + erasing (nvcv.Tensor): Eraisng an array of size num_erasing_area that gives the widths of the eraseing areas, the heights of the eraseing areas and integers in range 0-15, each of whose bits indicates whether or not the corresponding channel need to be erased. - values (Tensor): An array of size num_erasing_area*4 that gives the filling value for each erase area. - imgIdx (Tensor): An array of size num_erasing_area that maps a erase area idx to img idx in the batch. - random (int8 , optional): random an value for random op. - seed (int ,optional): seed random seed for random filling erase area. + values (nvcv.Tensor): An array of size num_erasing_area*4 that gives the filling value for each erase area. + imgIdx (nvcv.Tensor): An array of size num_erasing_area that maps a erase area idx to img idx in the batch. + random (int, optional): 8-bit integer value for random op. + seed (int, optional): seed random seed for random filling erase area. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -221,7 +221,7 @@ void ExportOpErase(py::module &m) m.def("erase_into", &EraseVarShapeInto, "dst"_a, "src"_a, "anchor"_a, "erasing"_a, "values"_a, "imgIdx"_a, py::kw_only(), "random"_a = false, "seed"_a = 0, "stream"_a = nullptr, R"pbdoc( - cvcuda.erase_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, anchor : nvcv.Tensor, erasing : nvcv.Tensor, values : nvcv.Tensor, imgIdx : nvcv.Tensor, random : int8, seed : int, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.erase_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, anchor: nvcv.Tensor, erasing: nvcv.Tensor, values: nvcv.Tensor, imgIdx: nvcv.Tensor, random: int, seed: int, stream: Optional[nvcv.cuda.Stream] = None) Executes the Erase operation on the given cuda stream. @@ -230,17 +230,17 @@ void ExportOpErase(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - anchor (Tensor): anchor an array of size num_erasing_area that gives the + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + anchor (nvcv.Tensor): anchor an array of size num_erasing_area that gives the x coordinate and y coordinate of the top left point in the eraseing areas. - erasing (Tensor): Eraisng an array of size num_erasing_area that gives the widths of the eraseing areas, + erasing (nvcv.Tensor): Eraisng an array of size num_erasing_area that gives the widths of the eraseing areas, the heights of the eraseing areas and integers in range 0-15, each of whose bits indicates whether or not the corresponding channel need to be erased. - values (Tensor): An array of size num_erasing_area*4 that gives the filling value for each erase area. - imgIdx (Tensor): An array of size num_erasing_area that maps a erase area idx to img idx in the batch. - random (int8 , optional): random an value for random op. - stream (Stream, optional): CUDA Stream on which to perform the operation. + values (nvcv.Tensor): An array of size num_erasing_area*4 that gives the filling value for each erase area. + imgIdx (nvcv.Tensor): An array of size num_erasing_area that maps a erase area idx to img idx in the batch. + random (int, optional): 8-bit integer value for random op. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpFindHomography.cpp b/python/mod_cvcuda/OpFindHomography.cpp index 3560cc91f..4fe28c41c 100644 --- a/python/mod_cvcuda/OpFindHomography.cpp +++ b/python/mod_cvcuda/OpFindHomography.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,8 +22,8 @@ #include #include #include +#include #include -#include #include #include #include @@ -243,12 +243,12 @@ void ExportOpFindHomography(py::module &m) for more details and usage examples. Args: - srcPts (Tensor): Input source coordinates tensor containing 2D coordinates in the source image. - dstPts (Tensor): Input destination coordinates tensor containing 2D coordinates in the target image. + srcPts (nvcv.Tensor): Input source coordinates tensor containing 2D coordinates in the source image. + dstPts (nvcv.Tensor): Input destination coordinates tensor containing 2D coordinates in the target image. stream (Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The model homography matrix tensor. + nvcv.Tensor: The model homography matrix tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -266,13 +266,13 @@ void ExportOpFindHomography(py::module &m) for more details and usage examples. Args: - models (Tensor) : Output model tensor containing 3x3 homography matrices. - srcPts (Tensor): Input source coordinates tensor containing 2D coordinates in the source image. - dstPts (Tensor): Input destination coordinates tensor containing 2D coordinates in the target image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + models (nvcv.Tensor): Output model tensor containing 3x3 homography matrices. + srcPts (nvcv.Tensor): Input source coordinates tensor containing 2D coordinates in the source image. + dstPts (nvcv.Tensor): Input destination coordinates tensor containing 2D coordinates in the target image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The model homography matrix tensor. + nvcv.Tensor: The model homography matrix tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -290,12 +290,12 @@ void ExportOpFindHomography(py::module &m) for more details and usage examples. Args: - srcPts (TensorBatch): Input source coordinates tensor containing 2D coordinates in the source image. - dstPts (TensorBatch): Input destination coordinates tensor containing 2D coordinates in the target image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + srcPts (nvcv.TensorBatch): Input source coordinates tensor containing 2D coordinates in the source image. + dstPts (nvcv.TensorBatch): Input destination coordinates tensor containing 2D coordinates in the target image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.TensorBatch: The model homography matrix tensor batch. + nvcv.TensorBatch: The model homography matrix tensor batch. Caution: Restrictions to several arguments may apply. Check the C @@ -314,13 +314,13 @@ void ExportOpFindHomography(py::module &m) for more details and usage examples. Args: - models (TensorBatch) : Output model tensor containing 3x3 homography matrices. - srcPts (TensorBatch): Input source coordinates tensor containing 2D coordinates in the source image. - dstPts (TensorBatch): Input destination coordinates tensor containing 2D coordinates in the target image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + models (nvcv.TensorBatch): Output model tensor containing 3x3 homography matrices. + srcPts (nvcv.TensorBatch): Input source coordinates tensor containing 2D coordinates in the source image. + dstPts (nvcv.TensorBatch): Input destination coordinates tensor containing 2D coordinates in the target image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.TensorBatch: The model homography matrix tensor batch. + nvcv.TensorBatch: The model homography matrix tensor batch. Caution: diff --git a/python/mod_cvcuda/OpFlip.cpp b/python/mod_cvcuda/OpFlip.cpp index 72dce09d3..9bb883c70 100644 --- a/python/mod_cvcuda/OpFlip.cpp +++ b/python/mod_cvcuda/OpFlip.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -102,7 +102,7 @@ void ExportOpFlip(py::module &m) m.def("flip", &Flip, "src"_a, "flipCode"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.flip(src: nvcv.Tensor, flipCode : int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.flip(src: nvcv.Tensor, flipCode: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Flip operation on the given cuda stream. @@ -111,15 +111,15 @@ void ExportOpFlip(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. + src (nvcv.Tensor): Input tensor containing one or more images. flipCode (int): Flag to specify how to flip the array; 0 means flipping around the x-axis and positive value (for example, 1) means flipping around y-axis. Negative value (for example, -1) means flipping around both axes. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -128,7 +128,7 @@ void ExportOpFlip(py::module &m) m.def("flip_into", &FlipInto, "dst"_a, "src"_a, "flipCode"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.flip_into(dst : nvcv.Tensor, src: nvcv.Tensor, flipCode : int, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.flip_into(dst: nvcv.Tensor, src: nvcv.Tensor, flipCode: int, stream: Optional[nvcv.cuda.Stream] = None) Executes the Flip operation on the given cuda stream. @@ -137,13 +137,13 @@ void ExportOpFlip(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. flipCode (int): Flag to specify how to flip the array; 0 means flipping around the x-axis and positive value (for example, 1) means flipping around y-axis. Negative value (for example, -1) means flipping around both axes. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -155,7 +155,7 @@ void ExportOpFlip(py::module &m) m.def("flip", &FlipVarShape, "src"_a, "flipCode"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.flip(src: nvcv.ImageBatchVarShape, flipCode : nvcv.Tensor , stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.flip(src: nvcv.ImageBatchVarShape, flipCode: nvcv.Tensor, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Flip operation on the given cuda stream. @@ -164,15 +164,15 @@ void ExportOpFlip(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - flipCode (Tensor): Flag to specify how to flip the array; 0 means flipping + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + flipCode (nvcv.Tensor): Flag to specify how to flip the array; 0 means flipping around the x-axis and positive value (for example, 1) means flipping around y-axis. Negative value (for example, -1) means flipping around both axes. Specified for all images in batch. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -181,7 +181,7 @@ void ExportOpFlip(py::module &m) m.def("flip_into", &FlipVarShapeInto, "dst"_a, "src"_a, "flipCode"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.flip_into(dst:nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, flipCode : nvcv.Tensor , stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.flip_into(dst:nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, flipCode: nvcv.Tensor, stream: Optional[nvcv.cuda.Stream] = None) Executes the Flip operation on the given cuda stream. @@ -190,13 +190,13 @@ void ExportOpFlip(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - flipCode (Tensor): Flag to specify how to flip the array; 0 means flipping + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + flipCode (nvcv.Tensor): Flag to specify how to flip the array; 0 means flipping around the x-axis and positive value (for example, 1) means flipping around y-axis. Negative value (for example, -1) means flipping around both axes. Specified for all images in batch. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpGammaContrast.cpp b/python/mod_cvcuda/OpGammaContrast.cpp index 8df72480b..10a422471 100644 --- a/python/mod_cvcuda/OpGammaContrast.cpp +++ b/python/mod_cvcuda/OpGammaContrast.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -81,12 +81,12 @@ void ExportOpGammaContrast(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input tensor containing one or more images. - gamma (Tensor): 1D Tensor with the the gamma value for each image / image channel. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input tensor containing one or more images. + gamma (nvcv.Tensor): 1D Tensor with the the gamma value for each image / image channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -103,10 +103,10 @@ void ExportOpGammaContrast(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output tensor to store the result of the operation. - src (ImageBatchVarShape): Input tensor containing one or more images. - gamma (Tensor): 1D Tensor with the the gamma value for each image / image channel. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.ImageBatchVarShape): Output tensor to store the result of the operation. + src (nvcv.ImageBatchVarShape): Input tensor containing one or more images. + gamma (nvcv.Tensor): 1D Tensor with the the gamma value for each image / image channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpGaussian.cpp b/python/mod_cvcuda/OpGaussian.cpp index fdf9de806..e029cc644 100644 --- a/python/mod_cvcuda/OpGaussian.cpp +++ b/python/mod_cvcuda/OpGaussian.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -110,7 +110,7 @@ void ExportOpGaussian(py::module &m) m.def("gaussian", &Gaussian, "src"_a, "kernel_size"_a, "sigma"_a, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.gaussian(src : nvcv.Tensor, kernel_size: Tuple [int,int], sigma : Tuple [double,double], border : border_mode:NVCVBorderType, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.gaussian(src: nvcv.Tensor, kernel_size: Tuple[int, int], sigma: Tuple[double, double], border: border_mode: cvcuda.Border, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Gaussian operation on the given cuda stream. See also: @@ -118,14 +118,14 @@ void ExportOpGaussian(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - kernel_size (Tuple [int,int]): Kernel width, height. - sigma (Tuple [double,double]): Gaussian kernel standard deviation in X,Y directions. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + kernel_size (Tuple[int, int]): Kernel width, height. + sigma (Tuple[double, double]): Gaussian kernel standard deviation in X,Y directions. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -135,7 +135,7 @@ void ExportOpGaussian(py::module &m) m.def("gaussian_into", &GaussianInto, "dst"_a, "src"_a, "kernel_size"_a, "sigma"_a, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.gaussian_into(dst : nvcv.Tensor, src : Tensor, kernel_size: Tuple [int,int], sigma : Tuple [double,double], border : border_mode:NVCVBorderType, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.gaussian_into(dst: nvcv.Tensor, src: Tensor, kernel_size: Tuple[int, int], sigma: Tuple[double, double], border: border_mode: cvcuda.Border, stream: Optional[nvcv.cuda.Stream] = None) Executes the Gaussian operation on the given cuda stream. See also: @@ -143,12 +143,12 @@ void ExportOpGaussian(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - kernel_size (Tuple [int,int]): Kernel width, height. - sigma (Tuple [double,double]): Gaussian kernel standard deviation in X,Y directions. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + kernel_size (Tuple[int, int]): Kernel width, height. + sigma (Tuple[double, double]): Gaussian kernel standard deviation in X,Y directions. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -161,7 +161,7 @@ void ExportOpGaussian(py::module &m) m.def("gaussian", &VarShapeGaussian, "src"_a, "max_kernel_size"_a, "kernel_size"_a, "sigma"_a, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.gaussian(src : nvcv.ImageBatchVarShape, kernel_size: nvcv.Tensor, sigma : nvcv.Tensor, border : border_mode:NVCVBorderType, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.gaussian(src: nvcv.ImageBatchVarShape, kernel_size: nvcv.Tensor, sigma: nvcv.Tensor, border: border_mode: cvcuda.Border, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Gaussian operation on the given cuda stream. @@ -170,14 +170,14 @@ void ExportOpGaussian(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - kernel_size (Tensor): Kernel width, height. - sigma (Tensor): Gaussian kernel standard deviation in X,Y directions. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + kernel_size (nvcv.Tensor): Kernel width, height. + sigma (nvcv.Tensor): Gaussian kernel standard deviation in X,Y directions. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -187,7 +187,7 @@ void ExportOpGaussian(py::module &m) m.def("gaussian_into", &VarShapeGaussianInto, "dst"_a, "src"_a, "max_kernel_size"_a, "kernel_size"_a, "sigma"_a, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.gaussian_into(dst : nvcv.ImageBatchVarShape, src : nvcv.ImageBatchVarShape, kernel_size: nvcv.Tensor, sigma : nvcv.Tensor, border : border_mode:NVCVBorderType, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.gaussian_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, kernel_size: nvcv.Tensor, sigma: nvcv.Tensor, border: border_mode: cvcuda.Border, stream: Optional[nvcv.cuda.Stream] = None) Executes the Gaussian operation on the given cuda stream. @@ -196,12 +196,12 @@ void ExportOpGaussian(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - kernel_size (Tensor): Kernel width, height. - sigma (Tensor): Gaussian kernel standard deviation in X,Y directions. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + kernel_size (nvcv.Tensor): Kernel width, height. + sigma (nvcv.Tensor): Gaussian kernel standard deviation in X,Y directions. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpGaussianNoise.cpp b/python/mod_cvcuda/OpGaussianNoise.cpp index 94280ceba..5d254a623 100644 --- a/python/mod_cvcuda/OpGaussianNoise.cpp +++ b/python/mod_cvcuda/OpGaussianNoise.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -111,7 +111,7 @@ void ExportOpGaussianNoise(py::module &m) m.def("gaussiannoise", &GaussianNoise, "src"_a, "mu"_a, "sigma"_a, "per_channel"_a, "seed"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.gaussiannoise(src : nvcv.Tensor, mu : nvcv.Tensor, sigma : nvcv.Tensor, per_channel : bool, seed : int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.gaussiannoise(src: nvcv.Tensor, mu: nvcv.Tensor, sigma: nvcv.Tensor, per_channel: bool, seed: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the GaussianNoise operation on the given cuda stream. @@ -120,15 +120,15 @@ void ExportOpGaussianNoise(py::module &m) for more details and usage examples. Args: - src (Tensor): Input image batch containing one or more images. - mu (Tensor): An array of size batch that gives the mu value of each image. - sigma (Tensor): An array of size batch that gives the sigma value of each image. + src (nvcv.Tensor): Input image batch containing one or more images. + mu (nvcv.Tensor): An array of size batch that gives the mu value of each image. + sigma (nvcv.Tensor): An array of size batch that gives the sigma value of each image. per_channel (bool): Whether to add the same noise for all channels. seed (int): Seed for random numbers. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output image batch. + nvcv.Tensor: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -138,7 +138,7 @@ void ExportOpGaussianNoise(py::module &m) m.def("gaussiannoise_into", &GaussianNoiseInto, "dst"_a, "src"_a, "mu"_a, "sigma"_a, "per_channel"_a, "seed"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.gaussiannoise_into(dst : nvcv.Tensor, src : nvcv.Tensor, mu : nvcv.Tensor, sigma : nvcv.Tensor, per_channel : bool, seed : int, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.gaussiannoise_into(dst: nvcv.Tensor, src: nvcv.Tensor, mu: nvcv.Tensor, sigma: nvcv.Tensor, per_channel: bool, seed: int, stream: Optional[nvcv.cuda.Stream] = None) Executes the GaussianNoise operation on the given cuda stream. @@ -147,13 +147,13 @@ void ExportOpGaussianNoise(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output image batch containing the result of the operation. - src (Tensor): Input image batch containing one or more images. - mu (Tensor): An array of size batch that gives the mu value of each image. - sigma (Tensor): An array of size batch that gives the sigma value of each image. + dst (nvcv.Tensor): Output image batch containing the result of the operation. + src (nvcv.Tensor): Input image batch containing one or more images. + mu (nvcv.Tensor): An array of size batch that gives the mu value of each image. + sigma (nvcv.Tensor): An array of size batch that gives the sigma value of each image. per_channel (bool): Whether to add the same noise for all channels. seed (int): Seed for random numbers. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -166,7 +166,7 @@ void ExportOpGaussianNoise(py::module &m) m.def("gaussiannoise", &GaussianNoiseVarShape, "src"_a, "mu"_a, "sigma"_a, "per_channel"_a, "seed"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.gaussiannoise(src : nvcv.ImageBatchVarShape, mu : nvcv.Tensor, sigma : nvcv.Tensor, per_channel : bool, seed : int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.gaussiannoise(src: nvcv.ImageBatchVarShape, mu: nvcv.Tensor, sigma: nvcv.Tensor, per_channel: bool, seed: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the GaussianNoise operation on the given cuda stream. @@ -176,11 +176,11 @@ void ExportOpGaussianNoise(py::module &m) Args: src (ImageBatchVarShape): Input image batch containing one or more images. - mu (Tensor): An array of size batch that gives the mu value of each image. - sigma (Tensor): An array of size batch that gives the sigma value of each image. + mu (nvcv.Tensor): An array of size batch that gives the mu value of each image. + sigma (nvcv.Tensor): An array of size batch that gives the sigma value of each image. per_channel (bool): Whether to add the same noise for all channels. seed (int): Seed for random numbers. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: cvcuda.ImageBatchVarShape: The output image batch. @@ -193,7 +193,7 @@ void ExportOpGaussianNoise(py::module &m) m.def("gaussiannoise_into", &GaussianNoiseVarShapeInto, "dst"_a, "src"_a, "mu"_a, "sigma"_a, "per_channel"_a, "seed"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.gaussiannoise_into(dst : nvcv.ImageBatchVarShape, src : nvcv.ImageBatchVarShape, mu : nvcv.Tensor, sigma : nvcv.Tensor, per_channel : bool, seed : int, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.gaussiannoise_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, mu: nvcv.Tensor, sigma: nvcv.Tensor, per_channel: bool, seed: int, stream: Optional[nvcv.cuda.Stream] = None) Executes the GaussianNoise operation on the given cuda stream. @@ -204,11 +204,11 @@ void ExportOpGaussianNoise(py::module &m) Args: dst (ImageBatchVarShape): Output image batch containing the result of the operation. src (ImageBatchVarShape): Input image batch containing one or more images. - mu (Tensor): An array of size batch that gives the mu value of each image. - sigma (Tensor): An array of size batch that gives the sigma value of each image. + mu (nvcv.Tensor): An array of size batch that gives the mu value of each image. + sigma (nvcv.Tensor): An array of size batch that gives the sigma value of each image. per_channel (bool): Whether to add the same noise for all channels. seed (int): Seed for random numbers. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpHQResize.cpp b/python/mod_cvcuda/OpHQResize.cpp index 295771013..28ef3ef17 100644 --- a/python/mod_cvcuda/OpHQResize.cpp +++ b/python/mod_cvcuda/OpHQResize.cpp @@ -561,23 +561,23 @@ void ExportOpHQResize(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. + src (nvcv.Tensor): Input tensor containing one or more images. The tensor layout must match: (N)(D)HW(C). - out_size (Shape): Tuple of 2 or 3 ints describing the output shape in (D)HW layout. + out_size (tuple): Tuple of 2 or 3 ints describing the output shape in (D)HW layout. antialias (bool): If set to true, an antialiasing is enabled for scaling down. - roi(Tuple): Optional bounding box describing the input's region of interest. + roi (Tuple): Optional bounding box describing the input's region of interest. For 2D resampling it should be (lowH, lowW, highH, highW), for 3D: (lowD, lowH, lowW, highD, highH, highW). If, for some axis, the low bound is bigger than the high bound, the image is flipped across the axis. - interpolation(Interp): Interpolation type used. Used both for scaling down and up, + interpolation (cvcuda.Interp): Interpolation type used. Used both for scaling down and up, cannot be specified together with (min_interpolation or mag_interpolation). - min_interpolation(Interp): Interpolation type used for scaling down. - mag_interpolation(Interp): Interpolation type used for scaling up. - stream (Stream, optional): CUDA Stream on which to perform the operation. + min_interpolation (cvcuda.Interp): Interpolation type used for scaling down. + mag_interpolation (cvcuda.Interp): Interpolation type used for scaling up. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -593,23 +593,23 @@ void ExportOpHQResize(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input batch of images. - out_size (Shape): Tuple of 2 ints describing the output shape in HW layout. + src (nvcv.ImageBatchVarShape): Input batch of images. + out_size (tuple): Tuple of 2 ints describing the output shape in HW layout. antialias (bool): If set to true, an antialiasing is enabled for scaling down. - roi(List[Tuple]): Optional bounding boxes describing the input's region of interest. + roi (List[Tuple[int]]): Optional bounding boxes describing the input's region of interest. It should be a list of tuples. The list length must match the number of input tensors or be 1 (so that the same ROI is used for all samples). Each tuple must be of the form (lowH, lowW, highH, highW). If, for some axis, the low bound is bigger than the high bound, the image is flipped across the axis. - interpolation(Interp): Interpolation type used. Used both for scaling down and up, + interpolation (cvcuda.Interp): Interpolation type used. Used both for scaling down and up, cannot be specified together with (min_interpolation or mag_interpolation). - min_interpolation(Interp): Interpolation type used for scaling down. - mag_interpolation(Interp): Interpolation type used for scaling up. - stream (Stream, optional): CUDA Stream on which to perform the operation. + min_interpolation (cvcuda.Interp): Interpolation type used for scaling down. + mag_interpolation (cvcuda.Interp): Interpolation type used for scaling up. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The batch of resized images. + nvcv.ImageBatchVarShape: The batch of resized images. Caution: Restrictions to several arguments may apply. Check the C @@ -626,10 +626,10 @@ void ExportOpHQResize(py::module &m) for more details and usage examples. Args: - src (TensorBatch): Input batch containing one or more tensors of (D)HW(C) layout. - out_size (Shape): Tuple of 2 or 3 ints describing the output shape in (D)HW layout. + src (nvcv.TensorBatch): Input batch containing one or more tensors of (D)HW(C) layout. + out_size (tuple): Tuple of 2 or 3 ints describing the output shape in (D)HW layout. antialias (bool): If set to true, an antialiasing is enabled for scaling down. - roi(List[Tuple]): Optional bounding boxes describing the input's region of interest. + roi (List[Tuple[int]]): Optional bounding boxes describing the input's region of interest. It should be a list of tuples. The list length must match the number of input tensors or be 1 (so that the same ROI is used for all samples). Each tuple must be of the form: @@ -637,14 +637,14 @@ void ExportOpHQResize(py::module &m) * for 3D: (lowD, lowH, lowW, highD, highH, highW). If, for some axis, the low bound is bigger than the high bound, the tensor is flipped across the axis. - interpolation(Interp): Interpolation type used. Used both for scaling down and up, + interpolation (cvcuda.Interp): Interpolation type used. Used both for scaling down and up, cannot be specified together with (min_interpolation or mag_interpolation). - min_interpolation(Interp): Interpolation type used for scaling down. - mag_interpolation(Interp): Interpolation type used for scaling up. - stream (Stream, optional): CUDA Stream on which to perform the operation. + min_interpolation (cvcuda.Interp): Interpolation type used for scaling down. + mag_interpolation (cvcuda.Interp): Interpolation type used for scaling up. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.TensorBatch: The batch of resized tensors. + nvcv.TensorBatch: The batch of resized tensors. Caution: Restrictions to several arguments may apply. Check the C @@ -661,25 +661,25 @@ void ExportOpHQResize(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor. It's layout must match the src tensor. + dst (nvcv.Tensor): Output tensor. It's layout must match the src tensor. The size of D, H, and W extents may be different. The dst type must match the src's type or be float32. - src (Tensor): Input tensor containing one or more images. + src (nvcv.Tensor): Input tensor containing one or more images. The tensor layout must match: (N)(D)HW(C). antialias (bool): If set to true, an antialiasing is enabled for scaling down. - roi(Tuple): Optional bounding box describing the input's region of interest. + roi (Tuple[int]): Optional bounding box describing the input's region of interest. For 2D resampling it should be (lowH, lowW, highH, highW), for 3D: (lowD, lowH, lowW, highD, highH, highW). If, for some axis, the low bound is bigger than the high bound, the image is flipped across the axis. - interpolation(Interp): Interpolation type used. Used both for scaling down and up, + interpolation (cvcuda.Interp): Interpolation type used. Used both for scaling down and up, cannot be specified together with (min_interpolation or mag_interpolation). - min_interpolation(Interp): Interpolation type used for scaling down. - mag_interpolation(Interp): Interpolation type used for scaling up. - stream (Stream, optional): CUDA Stream on which to perform the operation. + min_interpolation (cvcuda.Interp): Interpolation type used for scaling down. + mag_interpolation (cvcuda.Interp): Interpolation type used for scaling up. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -695,25 +695,25 @@ void ExportOpHQResize(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output batch. The layout must match the input batch. + dst (nvcv.ImageBatchVarShape): Output batch. The layout must match the input batch. The size of D, H, and W extents may be different. The dst type must match the src's type or be float32. - src (ImageBatchVarShape): Input batch of images. + src (nvcv.ImageBatchVarShape): Input batch of images. antialias (bool): If set to true, an antialiasing is enabled for scaling down. - roi(List[Tuple]): Optional bounding boxes describing the input's region of interest. + roi (List[Tuple[int]]): Optional bounding boxes describing the input's region of interest. It should be a list of tuples. The list length must match the number of input tensors or be 1 (so that the same ROI is used for all samples). Each tuple must be of the form (lowH, lowW, highH, highW). If, for some axis, the low bound is bigger than the high bound, the image is flipped across the axis. - interpolation(Interp): Interpolation type used. Used both for scaling down and up, + interpolation (cvcuda.Interp): Interpolation type used. Used both for scaling down and up, cannot be specified together with (min_interpolation or mag_interpolation). - min_interpolation(Interp): Interpolation type used for scaling down. - mag_interpolation(Interp): Interpolation type used for scaling up. - stream (Stream, optional): CUDA Stream on which to perform the operation. + min_interpolation (cvcuda.Interp): Interpolation type used for scaling down. + mag_interpolation (cvcuda.Interp): Interpolation type used for scaling up. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The batch of resized images. + nvcv.ImageBatchVarShape: The batch of resized images. Caution: Restrictions to several arguments may apply. Check the C @@ -730,12 +730,12 @@ void ExportOpHQResize(py::module &m) for more details and usage examples. Args: - dst (TensorBatch): Output batch. The layout must match the input batch. + dst (nvcv.TensorBatch): Output batch. The layout must match the input batch. The size of D, H, and W extents may be different. The dst type must match the src's type or be float32. - src (TensorBatch): Input batch containing one or more tensors of (D)HW(C) layout. + src (nvcv.TensorBatch): Input batch containing one or more tensors of (D)HW(C) layout. antialias (bool): If set to true, an antialiasing is enabled for scaling down. - roi(List[Tuple]): Optional bounding boxes describing the input's region of interest. + roi (List[Tuple[int]]): Optional bounding boxes describing the input's region of interest. It should be a list of tuples. The list length must match the number of input tensors or be 1 (so that the same ROI is used for all samples). Each tuple must be of the form: @@ -743,14 +743,14 @@ void ExportOpHQResize(py::module &m) * for 3D: (lowD, lowH, lowW, highD, highH, highW). If, for some axis, the low bound is bigger than the high bound, the tensor is flipped across the axis. - interpolation(Interp): Interpolation type used. Used both for scaling down and up, + interpolation (cvcuda.Interp): Interpolation type used. Used both for scaling down and up, cannot be specified together with (min_interpolation or mag_interpolation). - min_interpolation(Interp): Interpolation type used for scaling down. - mag_interpolation(Interp): Interpolation type used for scaling up. - stream (Stream, optional): CUDA Stream on which to perform the operation. + min_interpolation (cvcuda.Interp): Interpolation type used for scaling down. + mag_interpolation (cvcuda.Interp): Interpolation type used for scaling up. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.TensorBatch: The batch of resized tensors. + nvcv.TensorBatch: The batch of resized tensors. Caution: Restrictions to several arguments may apply. Check the C diff --git a/python/mod_cvcuda/OpHistogram.cpp b/python/mod_cvcuda/OpHistogram.cpp index 715882433..b626b46dc 100644 --- a/python/mod_cvcuda/OpHistogram.cpp +++ b/python/mod_cvcuda/OpHistogram.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -101,12 +101,12 @@ void ExportOpHistogram(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images, input tensor must be (N)HWC, currently only grayscale uint8 is supported. - mask (Tensor, optional): Input tensor containing the mask of the pixels to be considered for the histogram, must be the same shape as src. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images, input tensor must be (N)HWC, currently only grayscale uint8 is supported. + mask (nvcv.Tensor, optional): Input tensor containing the mask of the pixels to be considered for the histogram, must be the same shape as src. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor containing the histogram. The tensor is formatted as HWC with W = 256 and H = number of input tensors, and C = 1. + nvcv.Tensor: The output tensor containing the histogram. The tensor is formatted as HWC with W = 256 and H = number of input tensors, and C = 1. Caution: Restrictions to several arguments may apply. Check the C @@ -123,10 +123,10 @@ void ExportOpHistogram(py::module &m) for more details and usage examples. Args: - histogram (Tensor): Output tensor containing the histogram. The tensor is formatted as HWC with W = 256 and H = number of input tensors, and C = 1. - src (Tensor): Input tensor containing one or more images, input tensor must be (N)HWC, currently only grayscale uint8 is supported. - mask (Tensor, optional): Input tensor containing the bit mask of the pixels to be considered for the histogram, must be the same shape as src. - stream (Stream, optional): CUDA Stream on which to perform the operation. + histogram (nvcv.Tensor): Output tensor containing the histogram. The tensor is formatted as HWC with W = 256 and H = number of input tensors, and C = 1. + src (nvcv.Tensor): Input tensor containing one or more images, input tensor must be (N)HWC, currently only grayscale uint8 is supported. + mask (nvcv.Tensor, optional): Input tensor containing the bit mask of the pixels to be considered for the histogram, must be the same shape as src. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None. diff --git a/python/mod_cvcuda/OpHistogramEq.cpp b/python/mod_cvcuda/OpHistogramEq.cpp index 30bf40384..51e82f3c6 100644 --- a/python/mod_cvcuda/OpHistogramEq.cpp +++ b/python/mod_cvcuda/OpHistogramEq.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -102,7 +102,7 @@ void ExportOpHistogramEq(py::module &m) m.def("histogrameq", &HistogramEq, "src"_a, "dtype"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.histogrameq(src: nvcv.Tensor, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.histogrameq(src: nvcv.Tensor, dtype: numpy.dtype, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the histogram equalization operation on the given cuda stream. @@ -111,11 +111,12 @@ void ExportOpHistogramEq(py::module &m) for more details and usage examples. Args: - src (Tensor): Input image batch containing one or more images. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input image batch containing one or more images. + dtype (numpy.dtype): The data type of the output. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output image batch. + nvcv.Tensor: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -133,9 +134,9 @@ void ExportOpHistogramEq(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output image batch containing the result of the operation. - src (Tensor): Input image batch containing one or more images. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output image batch containing the result of the operation. + src (nvcv.Tensor): Input image batch containing one or more images. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -156,11 +157,11 @@ void ExportOpHistogramEq(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + cvcuda.nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -178,9 +179,9 @@ void ExportOpHistogramEq(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - src (ImageBatchVarShape): Input image batch containing one or more images. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpInpaint.cpp b/python/mod_cvcuda/OpInpaint.cpp index 36af7a0ec..41a54486e 100644 --- a/python/mod_cvcuda/OpInpaint.cpp +++ b/python/mod_cvcuda/OpInpaint.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -195,7 +195,7 @@ void ExportOpInpaint(py::module &m) m.def("inpaint", &Inpaint, "src"_a, "masks"_a, "inpaintRadius"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.inpaint(src: nvcv.Tensor, masks:Tensor, inpaintRadius:double, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.inpaint(src: nvcv.Tensor, masks: Tensor, inpaintRadius: float, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Inpaint operation on the given cuda stream. @@ -204,13 +204,13 @@ void ExportOpInpaint(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - masks (Tensor): Mask tensor, 8-bit 1-channel images. Non-zero pixels indicate the area that needs to be inpainted. - inpaintRadius (double): Radius of a circular neighborhood of each point inpainted that is considered by the algorithm. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + masks (nvcv.Tensor): Mask tensor, 8-bit 1-channel images. Non-zero pixels indicate the area that needs to be inpainted. + inpaintRadius (float): Radius of a circular neighborhood of each point inpainted that is considered by the algorithm. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -220,7 +220,7 @@ void ExportOpInpaint(py::module &m) m.def("inpaint_into", &InpaintInto, "dst"_a, "src"_a, "masks"_a, "inpaintRadius"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.inpaint_into(dst: nvcv.Tensor,src: nvcv.Tensor, masks:Tensor, inpaintRadius:double, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.inpaint_into(dst: nvcv.Tensor, src: nvcv.Tensor, masks: Tensor, inpaintRadius: float, stream: Optional[nvcv.cuda.Stream] = None) Executes the Inpaint operation on the given cuda stream. @@ -229,11 +229,11 @@ void ExportOpInpaint(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - masks (Tensor): Mask tensor, 8-bit 1-channel images. Non-zero pixels indicate the area that needs to be inpainted. - inpaintRadius (double): Radius of a circular neighborhood of each point inpainted that is considered by the algorithm. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + masks (nvcv.Tensor): Mask tensor, 8-bit 1-channel images. Non-zero pixels indicate the area that needs to be inpainted. + inpaintRadius (float): Radius of a circular neighborhood of each point inpainted that is considered by the algorithm. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -246,7 +246,7 @@ void ExportOpInpaint(py::module &m) m.def("inpaint", &InpaintVarShape, "src"_a, "masks"_a, "inpaintRadius"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.inpaint(src: nvcv.ImageBatchVarShape, masks:ImageBatchVarShape, inpaintRadius:double, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.inpaint(src: nvcv.ImageBatchVarShape, masks:ImageBatchVarShape, inpaintRadius: float, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Inpaint operation on the given cuda stream. @@ -255,13 +255,13 @@ void ExportOpInpaint(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - masks (ImageBatchVarShape): Mask image batch, 8-bit 1-channel images. Non-zero pixels indicate the area that needs to be inpainted. - inpaintRadius (double): Radius of a circular neighborhood of each point inpainted that is considered by the algorithm. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + masks (nvcv.ImageBatchVarShape): Mask image batch, 8-bit 1-channel images. Non-zero pixels indicate the area that needs to be inpainted. + inpaintRadius (float): Radius of a circular neighborhood of each point inpainted that is considered by the algorithm. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -272,7 +272,7 @@ void ExportOpInpaint(py::module &m) "stream"_a = nullptr, R"pbdoc( - cvcuda.inpaint_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, masks:ImageBatchVarShape, inpaintRadius:double, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.inpaint_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, masks:ImageBatchVarShape, inpaintRadius: float, stream: Optional[nvcv.cuda.Stream] = None) Executes the Inpaint operation on the given cuda stream. @@ -281,11 +281,11 @@ void ExportOpInpaint(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output image batch to store the result of the operation. - src (ImageBatchVarShape): Input image batch containing one or more images. - masks (ImageBatchVarShape): Mask image batch, 8-bit 1-channel images. Non-zero pixels indicate the area that needs to be inpainted. - inpaintRadius (double): Radius of a circular neighborhood of each point inpainted that is considered by the algorithm. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.ImageBatchVarShape): Output image batch to store the result of the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + masks (nvcv.ImageBatchVarShape): Mask image batch, 8-bit 1-channel images. Non-zero pixels indicate the area that needs to be inpainted. + inpaintRadius (float): Radius of a circular neighborhood of each point inpainted that is considered by the algorithm. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpJointBilateralFilter.cpp b/python/mod_cvcuda/OpJointBilateralFilter.cpp index 243054c4a..798e1a715 100644 --- a/python/mod_cvcuda/OpJointBilateralFilter.cpp +++ b/python/mod_cvcuda/OpJointBilateralFilter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -113,7 +113,7 @@ void ExportOpJointBilateralFilter(py::module &m) "sigma_space"_a, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.joint_bilateral_filter(src: nvcv.Tensor, srcColor:Tensor, diameter: int, sigma_color: float, sigma_space: float, border:NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.joint_bilateral_filter(src: nvcv.Tensor, srcColor: Tensor, diameter: int, sigma_color: float, sigma_space: float, border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Joint Bilateral Filter operation on the given cuda stream. @@ -122,16 +122,16 @@ void ExportOpJointBilateralFilter(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - srcColor (Tensor): Input tensor for color distance. + src (nvcv.Tensor): Input tensor containing one or more images. + srcColor (nvcv.Tensor): Input tensor for color distance. diameter (int): Bilateral filter diameter. sigma_color (float): Gaussian exponent for color difference. sigma_space (float): Gaussian exponent for position difference. - border (NVCVBorderType, optional): Texture border mode for input tensor. - stream (Stream, optional): CUDA Stream on which to perform the operation. + border (cvcuda.Border, optional): Border mode for input tensor. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -142,7 +142,7 @@ void ExportOpJointBilateralFilter(py::module &m) "sigma_color"_a, "sigma_space"_a, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.joint_bilateral_filter_into(dst: nvcv.Tensor,src: nvcv.Tensor, srcColor:Tensor, diameter: int, sigma_color: float, sigma_space: float, border:NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.joint_bilateral_filter_into(dst: nvcv.Tensor, src: nvcv.Tensor, srcColor: Tensor, diameter: int, sigma_color: float, sigma_space: float, border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) Executes the Joint Bilateral Filter operation on the given cuda stream. @@ -151,14 +151,14 @@ void ExportOpJointBilateralFilter(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - srcColor (Tensor): Input tensor for color distance. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + srcColor (nvcv.Tensor): Input tensor for color distance. diameter (int): Bilateral filter diameter. sigma_color (float): Gaussian exponent for color difference. sigma_space (float): Gaussian exponent for position difference. - border (NVCVBorderType, optional): Texture border mode for input tensor. - stream (Stream, optional): CUDA Stream on which to perform the operation. + border (cvcuda.Border, optional): Border mode for input tensor. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -172,7 +172,7 @@ void ExportOpJointBilateralFilter(py::module &m) "sigma_space"_a, py::kw_only(), "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "stream"_a = nullptr, R"pbdoc( - cvcuda.joint_bilateral_filter(src: nvcv.ImageBatchVarShape, srcColor:ImageBatchVarShape,*, diameter: int, sigma_color: float, sigma_space: float, border:NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.joint_bilateral_filter(src: nvcv.ImageBatchVarShape, srcColor:ImageBatchVarShape,*, diameter: int, sigma_color: float, sigma_space: float, border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Joint Bilateral operation on the given cuda stream. @@ -181,15 +181,16 @@ void ExportOpJointBilateralFilter(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - srcColor (ImageBatchVarShape): Input images for color distance. - diameter (Tensor): Bilateral filter diameter per image. - sigma_color (Tensor): Gaussian exponent for color difference per image. - sigma_space (Tensor): Gaussian exponent for position difference per image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + srcColor (nvcv.ImageBatchVarShape): Input images for color distance. + diameter (nvcv.Tensor): Bilateral filter diameter per image. + sigma_color (nvcv.Tensor): Gaussian exponent for color difference per image. + sigma_space (nvcv.Tensor): Gaussian exponent for position difference per image. + border (cvcuda.Border, optional): Border mode for input tensor. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -200,7 +201,7 @@ void ExportOpJointBilateralFilter(py::module &m) "diameter"_a, "sigma_color"_a, "sigma_space"_a, py::kw_only(), "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "stream"_a = nullptr, R"pbdoc( - cvcuda.joint_bilateral_filter_into(dst: nvcv.ImageBatchVarShape,src: nvcv.ImageBatchVarShape, srcColor:ImageBatchVarShape,*, diameter: int, sigma_color: float, sigma_space: float, border:NVCVBorderType = , stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.joint_bilateral_filter_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, srcColor:ImageBatchVarShape,*, diameter: int, sigma_color: float, sigma_space: float, border: border (cvcuda.Border, optional): Border mode for input tensor. = , stream: Optional[nvcv.cuda.Stream] = None) Executes the Joint Bilateral operation on the given cuda stream. @@ -209,12 +210,13 @@ void ExportOpJointBilateralFilter(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - diameter (Tensor): Bilateral filter diameter per image. - sigma_color (Tensor): Gaussian exponent for color difference per image. - sigma_space (Tensor): Gaussian exponent for position difference per image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + diameter (nvcv.Tensor): Bilateral filter diameter per image. + sigma_color (nvcv.Tensor): Gaussian exponent for color difference per image. + sigma_space (nvcv.Tensor): Gaussian exponent for position difference per image. + border (cvcuda.Border, optional): Border mode for input tensor. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpLabel.cpp b/python/mod_cvcuda/OpLabel.cpp index c93158acd..a82d52baa 100644 --- a/python/mod_cvcuda/OpLabel.cpp +++ b/python/mod_cvcuda/OpLabel.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -158,7 +158,7 @@ void ExportOpLabel(py::module &m) Refer to the CV-CUDA C API reference for the Label operator for more details and usage examples. Args: - src (Tensor): Input tensor to label connected-component regions. + src (nvcv.Tensor): Input tensor to label connected-component regions. connectivity (cvcuda.ConnectivityType, optional): Choice to control connectivity of input elements, default is cvcuda.CONNECTIVITY_4_2D. assign_labels (cvcuda.LABEL, optional): Choice on how labels are assigned, @@ -168,20 +168,20 @@ void ExportOpLabel(py::module &m) count (bool, optional): Use True to return the count of valid labeled regions. stats (bool, optional): Use True to return the statistics of valid labeled regions. max_labels (Number, optional): Maximum number of labels to compute statistics for, default is 10000. - bg_label (Tensor, optional): Background tensor to define input values to be considered background + bg_label (nvcv.Tensor, optional): Background tensor to define input values to be considered background labels and thus ignored. - min_thresh (Tensor, optional): Minimum threshold tensor to mask input values below it to be 0, and others 1. - max_thresh (Tensor, optional): Maximum threshold tensor to mask input values above it to be 0, and others 1. - min_size (Tensor, optional): Minimum size tensor to remove islands, i.e. labeled regions with number of + min_thresh (nvcv.Tensor, optional): Minimum threshold tensor to mask input values below it to be 0, and others 1. + max_thresh (nvcv.Tensor, optional): Maximum threshold tensor to mask input values above it to be 0, and others 1. + min_size (nvcv.Tensor, optional): Minimum size tensor to remove islands, i.e. labeled regions with number of elements less than the minimum size. - mask (Tensor, optional): Mask tensor, its behavior is controlled by \ref mask_type. One choice is to + mask (nvcv.Tensor, optional): Mask tensor, its behavior is controlled by \ref mask_type. One choice is to control island removal in addition to \ref min_size, i.e. regions with at least one element inside the mask (non-zero values) are not removed in case mask_type is cvcuda.REMOVE_ISLANDS_OUTSIDE_MASK_ONLY. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - Tuple[Tensor, Tensor, Tensor]: A tuple wih output labels, count of regions and their statistics. + Tuple[nvcv.Tensor, nvcv.Tensor, nvcv.Tensor]: A tuple with output labels, count of regions and their statistics. The count or stats tensors may be None if theirs arguments are False. Caution: @@ -200,30 +200,30 @@ void ExportOpLabel(py::module &m) Refer to the CV-CUDA C API reference for the Label operator for more details and usage examples. Args: - dst (Tensor): Output tensor with labels. - count (Tensor, optional): Output tensor with count number of labeled regions. - stats (Tensor, optional): Output tensor with statistics for each labeled region. - src (Tensor): Input tensor to label connected-component regions. + dst (nvcv.Tensor): Output tensor with labels. + count (nvcv.Tensor, optional): Output tensor with count number of labeled regions. + stats (nvcv.Tensor, optional): Output tensor with statistics for each labeled region. + src (nvcv.Tensor): Input tensor to label connected-component regions. connectivity (cvcuda.ConnectivityType, optional): Choice to control connectivity of input elements, default is cvcuda.CONNECTIVITY_4_2D. assign_labels (cvcuda.LABEL, optional): Choice on how labels are assigned, default is cvcuda.LABEL.FAST. mask_type (cvcuda.LabelMaskType, optional): Choice on how the mask is used, default is cvcuda.REMOVE_ISLANDS_OUTSIDE_MASK_ONLY. - bg_label (Tensor, optional): Background tensor to define input values to be considered background + bg_label (nvcv.Tensor, optional): Background tensor to define input values to be considered background labels and thus ignored. - min_thresh (Tensor, optional): Minimum threshold tensor to mask input values below it to be 0, and others 1. - max_thresh (Tensor, optional): Maximum threshold tensor to mask input values above it to be 0, and others 1. - min_size (Tensor, optional): Minimum size tensor to remove islands, i.e. labeled regions with number of + min_thresh (nvcv.Tensor, optional): Minimum threshold tensor to mask input values below it to be 0, and others 1. + max_thresh (nvcv.Tensor, optional): Maximum threshold tensor to mask input values above it to be 0, and others 1. + min_size (nvcv.Tensor, optional): Minimum size tensor to remove islands, i.e. labeled regions with number of elements less than the minimum size. - mask (Tensor, optional): Mask tensor, its behavior is controlled by \ref mask_type. One choice is to + mask (nvcv.Tensor, optional): Mask tensor, its behavior is controlled by \ref mask_type. One choice is to control island removal in addition to \ref min_size, i.e. regions with at least one element inside the mask (non-zero values) are not removed in case mask_type is cvcuda.REMOVE_ISLANDS_OUTSIDE_MASK_ONLY. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - Tuple[Tensor, Tensor, Tensor]: A tuple wih output labels, count of regions and their statistics. + Tuple[nvcv.Tensor, nvcv.Tensor, nvcv.Tensor]: A tuple with output labels, count of regions and their statistics. The count or stats tensors may be None if theirs arguments are None. Caution: diff --git a/python/mod_cvcuda/OpLaplacian.cpp b/python/mod_cvcuda/OpLaplacian.cpp index 3b5655837..71b4d57e8 100644 --- a/python/mod_cvcuda/OpLaplacian.cpp +++ b/python/mod_cvcuda/OpLaplacian.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -116,13 +116,13 @@ void ExportOpLaplacian(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. + src (nvcv.Tensor): Input tensor containing one or more images. ksize (int): Aperture size used to compute the second-derivative filters, it can be 1 or 3. scale (float): Scale factor for the Laplacian values (use 1 for no scale). - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -141,11 +141,11 @@ void ExportOpLaplacian(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. ksize (int): Aperture size used to compute the second-derivative filters, it can be 1 or 3. scale (float): Scale factor for the Laplacian values (use 1 for no scale). - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -167,13 +167,13 @@ void ExportOpLaplacian(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - ksize (Tensor): Aperture size used to compute the second-derivative filters, it can be 1 or 3 for each image. - scale (Tensor): Scale factor for the Laplacian values (use 1 for no scale) for each image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + ksize (nvcv.Tensor): Aperture size used to compute the second-derivative filters, it can be 1 or 3 for each image. + scale (nvcv.Tensor): Scale factor for the Laplacian values (use 1 for no scale) for each image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -192,11 +192,11 @@ void ExportOpLaplacian(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - ksize (Tensor): Aperture size used to compute the second-derivative filters, it can be 1 or 3 for each image. - scale (Tensor): Scale factor for the Laplacian values (use 1 for no scale) for each image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + ksize (nvcv.Tensor): Aperture size used to compute the second-derivative filters, it can be 1 or 3 for each image. + scale (nvcv.Tensor): Scale factor for the Laplacian values (use 1 for no scale) for each image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpMedianBlur.cpp b/python/mod_cvcuda/OpMedianBlur.cpp index 2122945ba..993e1b2b5 100644 --- a/python/mod_cvcuda/OpMedianBlur.cpp +++ b/python/mod_cvcuda/OpMedianBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -105,7 +105,7 @@ void ExportOpMedianBlur(py::module &m) m.def("median_blur", &MedianBlur, "src"_a, "ksize"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.median_blur(src: nvcv.Tensor, ksize:Tuple [int,int], stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.median_blur(src: nvcv.Tensor, ksize: Tuple[int, int], stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Median Blur operation on the given cuda stream. @@ -114,12 +114,12 @@ void ExportOpMedianBlur(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - ksize (Tuple [int,int]): Width and Height of the kernel. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + ksize (Tuple[int, int]): Width and Height of the kernel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -129,7 +129,7 @@ void ExportOpMedianBlur(py::module &m) m.def("median_blur_into", &MedianBlurInto, "dst"_a, "src"_a, "ksize"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.median_blur_into(dst: nvcv.Tensor,src: nvcv.Tensor, ksize:Tuple [int,int], stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.median_blur_into(dst: nvcv.Tensor, src: nvcv.Tensor, ksize: Tuple[int, int], stream: Optional[nvcv.cuda.Stream] = None) Executes the Median Blur operation on the given cuda stream. @@ -138,10 +138,10 @@ void ExportOpMedianBlur(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - ksize (Tuple [int,int]): Width and Height of the kernel. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + ksize (Tuple[int, int]): Width and Height of the kernel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -153,7 +153,7 @@ void ExportOpMedianBlur(py::module &m) m.def("median_blur", &VarShapeMedianBlur, "src"_a, "ksize"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.median_blur(src: nvcv.ImageBatchVarShape, ksize:Tuple [int,int], stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.median_blur(src: nvcv.ImageBatchVarShape, ksize: Tuple[int, int], stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Median Blur operation on the given cuda stream. @@ -162,12 +162,12 @@ void ExportOpMedianBlur(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - ksize (Tensor): Width and Height of the kernel for each image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + ksize (nvcv.Tensor): Width and Height of the kernel for each image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -177,7 +177,7 @@ void ExportOpMedianBlur(py::module &m) m.def("median_blur_into", &VarShapeMedianBlurInto, "dst"_a, "src"_a, "ksize"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.median_blur_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, ksize:Tuple [int,int], stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.median_blur_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, ksize: Tuple[int, int], stream: Optional[nvcv.cuda.Stream] = None) Executes the Median Blur operation on the given cuda stream. @@ -186,10 +186,10 @@ void ExportOpMedianBlur(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - ksize (Tensor): Width and Height of the kernel for each image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + ksize (nvcv.Tensor): Width and Height of the kernel for each image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpMinAreaRect.cpp b/python/mod_cvcuda/OpMinAreaRect.cpp index 30fcb7d6b..3bc010feb 100644 --- a/python/mod_cvcuda/OpMinAreaRect.cpp +++ b/python/mod_cvcuda/OpMinAreaRect.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -84,15 +84,15 @@ void ExportOpMinAreaRect(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more contours.src[i,j,k] is the set of contours + src (nvcv.Tensor): Input tensor containing one or more contours.src[i,j,k] is the set of contours where i ranges from 0 to batch-1, j ranges from 0 to max number of points in cotours k is the coordinate of each points which is in [0,1] - numPointsInContour (Tensor): Input tensor containing the number of points in each input contours. - totalContours (Int) : Number of input contours - stream (Stream, optional): CUDA Stream on which to perform the operation. + numPointsInContour (nvcv.Tensor): Input tensor containing the number of points in each input contours. + totalContours (int): Number of input contours + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor of rotated bounding boxes.The output will give 4 points' cooridinate(x,y) + nvcv.Tensor: The output tensor of rotated bounding boxes.The output will give 4 points' cooridinate(x,y) of each contour's minimum rotated bounding boxes Caution: @@ -110,15 +110,15 @@ void ExportOpMinAreaRect(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor will give 4 points' cooridinate(x,y) + dst (nvcv.Tensor): Output tensor will give 4 points' cooridinate(x,y) of each contour's minimum rotated bounding boxes - src (Tensor): Input tensor containing one or more contours. src[i,j,k] is the set of contours + src (nvcv.Tensor): Input tensor containing one or more contours. src[i,j,k] is the set of contours where i ranges from 0 to batch-1, j ranges from 0 to max number of points in cotours k is the coordinate of each points which is in [0,1] - numPointsInContour (Tensor): Input tensor containing the number of points in each input contours. - totalContours (Int) : Number of input contours - stream (Stream, optional): CUDA Stream on which to perform the operation. - stream (Stream, optional): CUDA Stream on which to perform the operation. + numPointsInContour (nvcv.Tensor): Input tensor containing the number of points in each input contours. + totalContours (int): Number of input contours + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpMinMaxLoc.cpp b/python/mod_cvcuda/OpMinMaxLoc.cpp index eb1eaa80b..ff74eb587 100644 --- a/python/mod_cvcuda/OpMinMaxLoc.cpp +++ b/python/mod_cvcuda/OpMinMaxLoc.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -275,12 +275,12 @@ inline std::string GetDocString(const std::string &strInto, const std::string &s if (strTensor.find("tensor") != std::string::npos) { strSrc = std::string(R"pbdoc( - src (Tensor): Input tensor to get minimum/maximum values/locations.)pbdoc"); + src (nvcv.Tensor): Input tensor to get minimum/maximum values/locations.)pbdoc"); } else if (strTensor.find("batch") != std::string::npos) { strSrc = std::string(R"pbdoc( - src (ImageBatchVarShape): Input image batch to get minimum/maximum values/locations.)pbdoc"); + src (nvcv.ImageBatchVarShape): Input image batch to get minimum/maximum values/locations.)pbdoc"); } std::string strArgs; @@ -289,16 +289,16 @@ inline std::string GetDocString(const std::string &strInto, const std::string &s if (strMinMax.find("min") != std::string::npos) { strArgs += std::string(R"pbdoc( - min_val (Tensor): Output tensor with minimum value. - min_loc (Tensor): Output tensor with minimum locations. - num_min (Tensor): Output tensor with number of minimum locations found.)pbdoc"); + min_val (nvcv.Tensor): Output tensor with minimum value. + min_loc (nvcv.Tensor): Output tensor with minimum locations. + num_min (nvcv.Tensor): Output tensor with number of minimum locations found.)pbdoc"); } if (strMinMax.find("max") != std::string::npos) { strArgs += std::string(R"pbdoc( - max_val (Tensor): Output tensor with maximum value. - max_loc (Tensor): Output tensor with maximum locations. - num_max (Tensor): Output tensor with number of maximum locations found.)pbdoc"); + max_val (nvcv.Tensor): Output tensor with maximum value. + max_loc (nvcv.Tensor): Output tensor with maximum locations. + num_max (nvcv.Tensor): Output tensor with number of maximum locations found.)pbdoc"); } strArgs += strSrc; } @@ -314,19 +314,19 @@ inline std::string GetDocString(const std::string &strInto, const std::string &s if (strMinMax.find("minimum/maximum") != std::string::npos) { strReturns = std::string(R"pbdoc( - Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: A tuple with minimum value, locations and number + Tuple[nvcv.Tensor, nvcv.Tensor, nvcv.Tensor, nvcv.Tensor, nvcv.Tensor, nvcv.Tensor]: A tuple with minimum value, locations and number of minima, and also maximum value, locations and number of maxima.)pbdoc"); } else if (strMinMax.find("min") != std::string::npos) { strReturns = std::string(R"pbdoc( - Tuple[Tensor, Tensor, Tensor]: A tuple with minimum value, locations and number + Tuple[nvcv.Tensor, nvcv.Tensor, nvcv.Tensor]: A tuple with minimum value, locations and number of minima.)pbdoc"); } else if (strMinMax.find("max") != std::string::npos) { strReturns = std::string(R"pbdoc( - Tuple[Tensor, Tensor, Tensor]: A tuple with maximum value, locations and number + Tuple[nvcv.Tensor, nvcv.Tensor, nvcv.Tensor]: A tuple with maximum value, locations and number of maxima.)pbdoc"); } @@ -341,7 +341,7 @@ inline std::string GetDocString(const std::string &strInto, const std::string &s Args:)pbdoc") + strArgs + std::string(R"pbdoc( - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns:)pbdoc") + strReturns + std::string(R"pbdoc( diff --git a/python/mod_cvcuda/OpMorphology.cpp b/python/mod_cvcuda/OpMorphology.cpp index e3e91cc63..cf7e91a39 100644 --- a/python/mod_cvcuda/OpMorphology.cpp +++ b/python/mod_cvcuda/OpMorphology.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -140,7 +140,7 @@ void ExportOpMorphology(py::module &m) "workspace"_a = nullptr, "iteration"_a = 1, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "stream"_a = nullptr, R"pbdoc( - cvcuda.morphology(src : nvcv.Tensor, morphologyType: MorphologyType, maskSize: Tuple [int,int], anchor: Tuple [int,int], workspace: nvcv.Tensor, iteration: int, border: NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.morphology(src: nvcv.Tensor, morphologyType: cvcuda.MorphologyType, maskSize: Tuple[int, int], anchor: Tuple[int, int], workspace: nvcv.Tensor, iteration: int, border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Morphology operation on the given cuda stream. @@ -149,17 +149,18 @@ void ExportOpMorphology(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - morphologyType (MorphologyType): Type of operation to perform (Erode/Dilate). - maskSize (Tuple [int,int]): Mask width and height for morphology operation. - anchor (Tuple [int,int]): X,Y offset of kernel, use -1,-1 for center. - workspace (Tensor, optional): Workspace tensor for intermediate results, must be the same size as src. Can be omitted if operation is Dilate/Erode with iteration = 1. + src (nvcv.Tensor): Input tensor containing one or more images. + morphologyType (cvcuda.MorphologyType): Type of operation to perform (e.g. cvcuda.MorphologyType.ERODE or + cvcuda.MorphologyType.DILATE). + maskSize (Tuple[int, int]): Mask width and height for morphology operation. + anchor (Tuple[int, int]): X,Y offset of kernel, use -1,-1 for center. + workspace (nvcv.Tensor, optional): Workspace tensor for intermediate results, must be the same size as src. Can be omitted if operation is Dilate/Erode with iteration = 1. iteration (int, optional): Number of times to run the kernel. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -171,7 +172,7 @@ void ExportOpMorphology(py::module &m) "stream"_a = nullptr, R"pbdoc( - cvcuda.morphology_into(dst: nvcv.Tensor, src: nvcv.Tensor, morphologyType: MorphologyType, maskSize: Tuple [int,int], anchor: Tuple [int,int], workspace: nvcv.Tensor, iteration: int, border: NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.morphology_into(dst: nvcv.Tensor, src: nvcv.Tensor, morphologyType: cvcuda.MorphologyType, maskSize: Tuple[int, int], anchor: Tuple[int, int], workspace: nvcv.Tensor, iteration: int, border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) Executes the Morphology operation on the given cuda stream. @@ -180,15 +181,16 @@ void ExportOpMorphology(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - morphologyType (MorphologyType): Type of operation to perform (Erode/Dilate). - maskSize (Tuple [int,int]): Mask width and height for morphology operation. - anchor (Tuple [int,int]): X,Y offset of kernel, use -1,-1 for center. - workspace (Tensor, optional): Workspace tensor for intermediate results, must be the same size as src. Can be omitted if operation is Dilate/Erode with iteration = 1. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + morphologyType (cvcuda.MorphologyType): Type of operation to perform (e.g. cvcuda.MorphologyType.ERODE or + cvcuda.MorphologyType.DILATE). + maskSize (Tuple[int, int]): Mask width and height for morphology operation. + anchor (Tuple[int, int]): X,Y offset of kernel, use -1,-1 for center. + workspace (nvcv.Tensor, optional): Workspace tensor for intermediate results, must be the same size as src. Can be omitted if operation is Dilate/Erode with iteration = 1. iteration (int, optional): Number of times to run the kernel. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -202,7 +204,7 @@ void ExportOpMorphology(py::module &m) "workspace"_a = nullptr, "iteration"_a = 1, "border"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "stream"_a = nullptr, R"pbdoc( - cvcuda.morphology(src: nvcv.ImageBatchVarShape, morphologyType: MorphologyType, maskSize: nvcv.Tensor, anchor : nvcv.Tensor, workspace: nvcv.ImageBatchVarShape, iteration: int, border: NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.morphology(src: nvcv.ImageBatchVarShape, morphologyType: cvcuda.MorphologyType, maskSize: nvcv.Tensor, anchor: nvcv.Tensor, workspace: nvcv.ImageBatchVarShape, iteration: int, border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Morphology operation on the given cuda stream. @@ -211,17 +213,18 @@ void ExportOpMorphology(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - morphologyType (MorphologyType): Type of operation to perform (Erode/Dilate). - maskSize (Tensor): Mask width and height for morphology operation for every image. - anchor (Tensor): X,Y offset of kernel for every image, use -1,-1 for center. - workspace (ImageBatchVarShape, optional): Workspace tensor for intermediate results, must be the same size as src. Can be omitted if operation is Dilate/Erode with iteration = 1. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + morphologyType (cvcuda.MorphologyType): Type of operation to perform (e.g. cvcuda.MorphologyType.ERODE or + cvcuda.MorphologyType.DILATE). + maskSize (nvcv.Tensor): Mask width and height for morphology operation for every image. + anchor (nvcv.Tensor): X,Y offset of kernel for every image, use -1,-1 for center. + workspace (nvcv.ImageBatchVarShape, optional): Workspace tensor for intermediate results, must be the same size as src. Can be omitted if operation is Dilate/Erode with iteration = 1. iteration (int, optional): Number of times to run the kernel. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -233,7 +236,7 @@ void ExportOpMorphology(py::module &m) "stream"_a = nullptr, R"pbdoc( - cvcuda.morphology_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, morphologyType: MorphologyType, maskSize: nvcv.Tensor, anchor : nvcv.Tensor, workspace: nvcv.ImageBatchVarShape, iteration: int, border: NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.morphology_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, morphologyType: cvcuda.MorphologyType, maskSize: nvcv.Tensor, anchor: nvcv.Tensor, workspace: nvcv.ImageBatchVarShape, iteration: int, border: cvcuda.Border = cvcuda.Border.CONSTANT, stream: Optional[nvcv.cuda.Stream] = None) Executes the Morphology operation on the given cuda stream. @@ -242,15 +245,16 @@ void ExportOpMorphology(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - morphologyType (MorphologyType): Type of operation to perform (Erode/Dilate). - maskSize (Tensor): Mask width and height for morphology operation for every image. - anchor (Tensor): X,Y offset of kernel for every image, use -1,-1 for center. - workspace (ImageBatchVarShape, optional): Workspace tensor for intermediate results, must be the same size as src. Can be omitted if operation is Dilate/Erode with iteration = 1. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + morphologyType (cvcuda.MorphologyType): Type of operation to perform (e.g. cvcuda.MorphologyType.ERODE or + cvcuda.MorphologyType.DILATE). + maskSize (nvcv.Tensor): Mask width and height for morphology operation for every image. + anchor (nvcv.Tensor): X,Y offset of kernel for every image, use -1,-1 for center. + workspace (nvcv.ImageBatchVarShape, optional): Workspace tensor for intermediate results, must be the same size as src. Can be omitted if operation is Dilate/Erode with iteration = 1. iteration (int, optional): Number of times to run the kernel. - border (NVCVBorderType, optional): Border mode to be used when accessing elements outside input image. - stream (Stream, optional): CUDA Stream on which to perform the operation. + border (cvcuda.Border, optional): Border mode to be used when accessing elements outside input image. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpNonMaximumSuppression.cpp b/python/mod_cvcuda/OpNonMaximumSuppression.cpp index 2df26ca84..240dcf2e2 100644 --- a/python/mod_cvcuda/OpNonMaximumSuppression.cpp +++ b/python/mod_cvcuda/OpNonMaximumSuppression.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -84,21 +84,21 @@ void ExportOpNonMaximumSuppression(py::module &m) for more details and usage examples. Args: - src (Tensor): src[i, j] is the set of input bounding box proposals + src (nvcv.Tensor): src[i, j] is the set of input bounding box proposals for an image where i ranges from 0 to batch-1, j ranges from 0 to number of bounding box proposals anchored at the top-left of the bounding box area - scores (Tensor): scores[i, j] are the associated scores for each + scores (nvcv.Tensor): scores[i, j] are the associated scores for each bounding box proposal in ``src`` considered during the reduce operation of NMS score_threshold (float): Minimum score of a bounding box proposals iou_threshold (float): Maximum overlap between bounding box proposals covering the same effective image region as calculated by Intersection-over-Union (IoU) - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor of selected bounding boxes. + nvcv.Tensor: The output tensor of selected bounding boxes. Caution: Restrictions to several arguments may apply. Check the C @@ -118,22 +118,22 @@ void ExportOpNonMaximumSuppression(py::module &m) for more details and usage examples. Args: - dst (Tensor): dst[i, j] is the output boolean mask marking selected + dst (nvcv.Tensor): dst[i, j] is the output boolean mask marking selected bounding boxes, where i ranges from 0 to batch-1, j ranges from 0 to the number of bounding box proposals anchored at the top-left of the bounding box area - src (Tensor): src[i, j] is the set of input bounding box proposals + src (nvcv.Tensor): src[i, j] is the set of input bounding box proposals for an image where i ranges from 0 to batch-1, j ranges from 0 to number of bounding box proposals anchored at the top-left of the bounding box area - scores (Tensor): scores[i, j] are the associated scores for each + scores (nvcv.Tensor): scores[i, j] are the associated scores for each bounding box proposal in ``src`` considered during the reduce operation of NMS score_threshold (float): Minimum score of a bounding box proposals iou_threshold (float): Maximum overlap between bounding box proposals covering the same effective image region as calculated by Intersection-over-Union (IoU) - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpNormalize.cpp b/python/mod_cvcuda/OpNormalize.cpp index 4cdb2d392..5b092b01f 100644 --- a/python/mod_cvcuda/OpNormalize.cpp +++ b/python/mod_cvcuda/OpNormalize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -130,7 +130,7 @@ void ExportOpNormalize(py::module &m) "globalscale"_a = defGlobalScale, "globalshift"_a = defGlobalShift, "epsilon"_a = defEpsilon, "stream"_a = nullptr, R"pbdoc( - cvcuda.normalize(src: nvcv.Tensor, base:Tensor, scale:Tensor, flags:int, globalscale: float, globalshift: float, epsilon: float, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.normalize(src: nvcv.Tensor, base: nvcv.Tensor, scale: nvcv.Tensor, flags: int, globalscale: float, globalshift: float, epsilon: float, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Normalize operation on the given cuda stream. @@ -139,19 +139,19 @@ void ExportOpNormalize(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - base (Tensor): Tensor providing base values for normalization. - scale (Tensor): Tensor providing scale values for normalization. - flags (int ,optional): Algorithm flags, use CVCUDA_NORMALIZE_SCALE_IS_STDDEV if scale passed as argument - is standard deviation instead or 0 if it is scaling. - globalscale (float ,optional): Additional scale value to be used in addition to scale. - globalshift (float ,optional): Additional bias value to be used in addition to base. - epsilon (float ,optional): Epsilon to use when CVCUDA_NORMALIZE_SCALE_IS_STDDEV flag is set as a regularizing term to be - added to variance. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + base (nvcv.Tensor): Tensor providing base values for normalization. + scale (nvcv.Tensor): Tensor providing scale values for normalization. + flags (int, optional): Algorithm flags, use cvcuda.NormalizeFlags.SCALE_IS_STDDEV if scale passed as argument + is standard deviation instead or 0 if it is scaling. + globalscale (float, optional): Additional scale value to be used in addition to scale. + globalshift (float, optional): Additional bias value to be used in addition to base. + epsilon (float, optional): Epsilon to use when cvcuda.NormalizeFlags.SCALE_IS_STDDEV flag is set as a regularizing + term to be added to variance. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -162,7 +162,7 @@ void ExportOpNormalize(py::module &m) py::kw_only(), "globalscale"_a = defGlobalScale, "globalshift"_a = defGlobalShift, "epsilon"_a = defEpsilon, "stream"_a = nullptr, R"pbdoc( - cvcuda.normalize_into(dst* Tensor, src: nvcv.Tensor, base:Tensor, scale:Tensor, flags:int, globalscale: float, globalshift: float, epsilon: float, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.normalize_into(dst: nvcv.Tensor, src: nvcv.Tensor, base: nvcv.Tensor, scale: nvcv.Tensor, flags: int, globalscale: float, globalshift: float, epsilon: float, stream: Optional[nvcv.cuda.Stream] = None) Executes the Normalize operation on the given cuda stream. @@ -171,17 +171,17 @@ void ExportOpNormalize(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - base (Tensor): Tensor providing base values for normalization. - scale (Tensor): Tensor providing scale values for normalization. - flags (int ,optional): Algorithm flags, use CVCUDA_NORMALIZE_SCALE_IS_STDDEV if scale passed as argument - is standard deviation instead or 0 if it is scaling. - globalscale (float ,optional): Additional scale value to be used in addition to scale. - globalshift (float ,optional): Additional bias value to be used in addition to base. - epsilon (float ,optional): Epsilon to use when CVCUDA_NORMALIZE_SCALE_IS_STDDEV flag is set as a regularizing term to be - added to variance. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + base (nvcv.Tensor): Tensor providing base values for normalization. + scale (nvcv.Tensor): Tensor providing scale values for normalization. + flags (int, optional): Algorithm flags, use cvcuda.NormalizeFlags.SCALE_IS_STDDEV if scale passed as argument + is standard deviation instead or 0 if it is scaling. + globalscale (float, optional): Additional scale value to be used in addition to scale. + globalshift (float, optional): Additional bias value to be used in addition to base. + epsilon (float, optional): Epsilon to use when cvcuda.NormalizeFlags.SCALE_IS_STDDEV flag is set as a regularizing + term to be added to variance. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -195,7 +195,7 @@ void ExportOpNormalize(py::module &m) "globalscale"_a = defGlobalScale, "globalshift"_a = defGlobalShift, "epsilon"_a = defEpsilon, "stream"_a = nullptr, R"pbdoc( - cvcuda.normalize(src: nvcv.ImageBatchVarShape, base:Tensor, scale:Tensor, flags:int, globalscale: float, globalshift: float, epsilon: float, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.normalize(src: nvcv.ImageBatchVarShape, base: nvcv.Tensor, scale: nvcv.Tensor, flags: int, globalscale: float, globalshift: float, epsilon: float, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Normalize operation on the given cuda stream. @@ -204,19 +204,19 @@ void ExportOpNormalize(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - base (Tensor): Tensor providing base values for normalization. - scale (Tensor): Tensor providing scale values for normalization. - flags (int ,optional): Algorithm flags, use CVCUDA_NORMALIZE_SCALE_IS_STDDEV if scale passed as argument - is standard deviation instead or 0 if it is scaling. - globalscale (float ,optional): Additional scale value to be used in addition to scale. - globalshift (float ,optional): Additional bias value to be used in addition to base. - epsilon (float ,optional): Epsilon to use when CVCUDA_NORMALIZE_SCALE_IS_STDDEV flag is set as a regularizing term to be - added to variance. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + base (nvcv.Tensor): Tensor providing base values for normalization. + scale (nvcv.Tensor): Tensor providing scale values for normalization. + flags (int, optional): Algorithm flags, use cvcuda.NormalizeFlags.SCALE_IS_STDDEV if scale passed as argument + is standard deviation instead or 0 if it is scaling. + globalscale (float, optional): Additional scale value to be used in addition to scale. + globalshift (float, optional): Additional bias value to be used in addition to base. + epsilon (float, optional): Epsilon to use when cvcuda.NormalizeFlags.SCALE_IS_STDDEV flag is set as a regularizing + term to be added to variance. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -227,7 +227,7 @@ void ExportOpNormalize(py::module &m) py::kw_only(), "globalscale"_a = defGlobalScale, "globalshift"_a = defGlobalShift, "epsilon"_a = defEpsilon, "stream"_a = nullptr, R"pbdoc( - cvcuda.normalize_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, base:Tensor, scale:Tensor, flags:int, globalscale: float, globalshift: float, epsilon: float, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.normalize_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, base: nvcv.Tensor, scale: nvcv.Tensor, flags: int, globalscale: float, globalshift: float, epsilon: float, stream: Optional[nvcv.cuda.Stream] = None) Executes the Normalize operation on the given cuda stream. @@ -236,17 +236,17 @@ void ExportOpNormalize(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - src (ImageBatchVarShape): Input image batch containing one or more images. - base (Tensor): Tensor providing base values for normalization. - scale (Tensor): Tensor providing scale values for normalization. - flags (int ,optional): Algorithm flags, use CVCUDA_NORMALIZE_SCALE_IS_STDDEV if scale passed as argument - is standard deviation instead or 0 if it is scaling. - globalscale (float ,optional): Additional scale value to be used in addition to scale. - globalshift (float ,optional): Additional bias value to be used in addition to base. - epsilon (float ,optional): Epsilon to use when CVCUDA_NORMALIZE_SCALE_IS_STDDEV flag is set as a regularizing term to be - added to variance. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + base (nvcv.Tensor): Tensor providing base values for normalization. + scale (nvcv.Tensor): Tensor providing scale values for normalization. + flags (int, optional): Algorithm flags, use cvcuda.NormalizeFlags.SCALE_IS_STDDEV if scale passed as argument + is standard deviation instead or 0 if it is scaling. + globalscale (float, optional): Additional scale value to be used in addition to scale. + globalshift (float, optional): Additional bias value to be used in addition to base. + epsilon (float, optional): Epsilon to use when cvcuda.NormalizeFlags.SCALE_IS_STDDEV flag is set as a regularizing + term to be added to variance. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpOSD.cpp b/python/mod_cvcuda/OpOSD.cpp index fa0dcd93b..21ac9d4f2 100644 --- a/python/mod_cvcuda/OpOSD.cpp +++ b/python/mod_cvcuda/OpOSD.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -67,12 +67,12 @@ void ExportOpOSD(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - elements (NVCVElements): OSD elements in reference to the input tensor. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + elements (cvcuda.Elements): OSD elements in reference to the input tensor. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -88,10 +88,10 @@ void ExportOpOSD(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - elements (NVCVElements): OSD elements in reference to the input tensor. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + elements (cvcuda.Elements): OSD elements in reference to the input tensor. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpPadAndStack.cpp b/python/mod_cvcuda/OpPadAndStack.cpp index 295f80589..d0ecfa31a 100644 --- a/python/mod_cvcuda/OpPadAndStack.cpp +++ b/python/mod_cvcuda/OpPadAndStack.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -77,15 +77,15 @@ void ExportOpPadAndStack(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): input image batch containing one or more images. - top (Tensor): Top tensor to store amount of top padding per batch input image. - left (Tensor): Left tensor to store amount of left padding per batch input image. - border (Border): Border mode to be used when accessing elements outside input image. + src (nvcv.ImageBatchVarShape): input image batch containing one or more images. + top (nvcv.Tensor): Top tensor to store amount of top padding per batch input image. + left (nvcv.Tensor): Left tensor to store amount of left padding per batch input image. + border (cvcuda.Border): Border mode to be used when accessing elements outside input image. bvalue (float): Border value to be used for constant border mode. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -102,13 +102,13 @@ void ExportOpPadAndStack(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (ImageBatchVarShape): input image batch containing one or more images. - top (Tensor): Top tensor to store amount of top padding per batch input image. - left (Tensor): Left tensor to store amount of left padding per batch input image. - border (Border): Border mode to be used when accessing elements outside input image. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.ImageBatchVarShape): input image batch containing one or more images. + top (nvcv.Tensor): Top tensor to store amount of top padding per batch input image. + left (nvcv.Tensor): Left tensor to store amount of left padding per batch input image. + border (cvcuda.Border): Border mode to be used when accessing elements outside input image. bvalue (float): Border value to be used for constant border mode. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpPairwiseMatcher.cpp b/python/mod_cvcuda/OpPairwiseMatcher.cpp index 687570a03..2d0b6b30f 100644 --- a/python/mod_cvcuda/OpPairwiseMatcher.cpp +++ b/python/mod_cvcuda/OpPairwiseMatcher.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -137,11 +137,11 @@ void ExportOpPairwiseMatcher(py::module &m) Refer to the CV-CUDA C API reference for this operator for more details and usage examples. Args: - set1 (Tensor): Input tensor with 1st set of points. - set2 (Tensor): Input tensor with 2nd set of points. - num_set1 (Tensor, optional): Input tensor with number of valid points in the 1st set. If not provided, + set1 (nvcv.Tensor): Input tensor with 1st set of points. + set2 (nvcv.Tensor): Input tensor with 2nd set of points. + num_set1 (nvcv.Tensor, optional): Input tensor with number of valid points in the 1st set. If not provided, consider the entire set1 containing valid points. - num_set2 (Tensor, optional): Input tensor with number of valid points in the 2nd set. If not provided, + num_set2 (nvcv.Tensor, optional): Input tensor with number of valid points in the 2nd set. If not provided, consider the entire set2 containing valid points. num_matches (bool, optional): Use True to return the number of matches. If not provided, it is set to True if crossCheck=True and False otherwise. @@ -151,10 +151,10 @@ void ExportOpPairwiseMatcher(py::module &m) matches_per_point (Number, optional): Number of best matches to return per point. norm_type (cvcuda.Norm, optional): Choice on how distances are normalized. Defaults to cvcuda.Norm.L2. algo_choice (cvcuda.Matcher, optional): Choice of the algorithm to perform the match. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - Tuple[Tensor, Tensor, Tensor]: A tuple wih output matches, number of matches and their distances. + Tuple[nvcv.Tensor, nvcv.Tensor, nvcv.Tensor]: A tuple with output matches, number of matches and their distances. The number of matches tensor may be None if its argument is False. The distances tensor may be None if its argument is False. @@ -173,24 +173,24 @@ void ExportOpPairwiseMatcher(py::module &m) Refer to the CV-CUDA C API reference for this operator for more details and usage examples. Args: - matches (Tensor): Output tensor with matches. - num_matches (Tensor, optional): Output tensor with number of matches. - distances (Tensor, optional): Output tensor with match distances. - set1 (Tensor): Input tensor with 1st set of points. - set2 (Tensor): Input tensor with 2nd set of points. - num_set1 (Tensor, optional): Input tensor with number of valid points in the 1st set. If not provided, + matches (nvcv.Tensor): Output tensor with matches. + num_matches (nvcv.Tensor, optional): Output tensor with number of matches. + distances (nvcv.Tensor, optional): Output tensor with match distances. + set1 (nvcv.Tensor): Input tensor with 1st set of points. + set2 (nvcv.Tensor): Input tensor with 2nd set of points. + num_set1 (nvcv.Tensor, optional): Input tensor with number of valid points in the 1st set. If not provided, consider the entire set1 containing valid points. - num_set2 (Tensor, optional): Input tensor with number of valid points in the 2nd set. If not provided, + num_set2 (nvcv.Tensor, optional): Input tensor with number of valid points in the 2nd set. If not provided, consider the entire set2 containing valid points. cross_check (bool, optional): Use True to cross check best matches, a best match is only returned if it is the best match (minimum distance) from 1st set to 2nd set and vice versa. matches_per_point (Number, optional): Number of best matches to return per point. norm_type (cvcuda.Norm, optional): Choice on how distances are normalized. Defaults to cvcuda.Norm.L2. algo_choice (cvcuda.Matcher, optional): Choice of the algorithm to perform the match. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - Tuple[Tensor, Tensor, Tensor]: A tuple wih output matches, number of matches and their distances. + Tuple[nvcv.Tensor, nvcv.Tensor, nvcv.Tensor]: A tuple with output matches, number of matches and their distances. The number of matches tensor may be None if its argument is None. The distances tensor may be None if its argument is None. diff --git a/python/mod_cvcuda/OpPillowResize.cpp b/python/mod_cvcuda/OpPillowResize.cpp index c66231248..b940b5f23 100644 --- a/python/mod_cvcuda/OpPillowResize.cpp +++ b/python/mod_cvcuda/OpPillowResize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,8 +22,8 @@ #include #include #include +#include #include -#include #include #include #include @@ -265,7 +265,7 @@ void ExportOpPillowResize(py::module &m) m.def("pillowresize", &PillowResize, "src"_a, "shape"_a, "format"_a, "interp"_a = NVCV_INTERP_LINEAR, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.pillowresize(src: nvcv.Tensor, shape:Shape, format:ImageFormat, interp: Interp = < NVCV_INTERP_LINEAR >, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.pillowresize(src: nvcv.Tensor, shape:Shape, format:ImageFormat, interp: Interp = cvcuda.Interp.LINEAR, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Pillow Resize operation on the given cuda stream. @@ -274,14 +274,14 @@ void ExportOpPillowResize(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - shape (Shape): Shape of the output image. - format (ImageFormat): Format of the input and output images. - interp(Interp): Interpolation type used for transform. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + shape (tuple): Shape of the output image. + format (nvcv.Format): Format of the input and output images. + interp (cvcuda.Interp, optional): Interpolation type used for transform. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -291,7 +291,7 @@ void ExportOpPillowResize(py::module &m) m.def("pillowresize_into", &PillowResizeInto, "dst"_a, "src"_a, "format"_a, "interp"_a = NVCV_INTERP_LINEAR, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.pillowresize_into(dst: nvcv.Tensor, src: nvcv.Tensor, shape:Shape, format:ImageFormat, interp: Interp = < NVCV_INTERP_LINEAR >, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.pillowresize_into(dst: nvcv.Tensor, src: nvcv.Tensor, shape: Tuple[int], format: nvcv.Format, interp: Interp = cvcuda.Interp.LINEAR, stream: Optional[nvcv.cuda.Stream] = None) Executes the Pillow Resize operation on the given cuda stream. @@ -300,12 +300,12 @@ void ExportOpPillowResize(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - shape (Shape): Shape of the output image. - format (ImageFormat): Format of the input and output images. - interp(Interp): Interpolation type used for transform. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + shape (tuple): Shape of the output image. + format (nvcv.Format): Format of the input and output images. + interp (cvcuda.Interp, optional): Interpolation type used for transform. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -318,7 +318,7 @@ void ExportOpPillowResize(py::module &m) m.def("pillowresize", &VarShapePillowResize, "src"_a, "sizes"_a, "interp"_a = NVCV_INTERP_LINEAR, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.pillowresize(src: nvcv.ImageBatchVarShape, shape:Shape, format:ImageFormat, interp: Interp = < NVCV_INTERP_LINEAR >, stream: Optional[nvcv.cuda.Stream] = None) ->ImageBatchVarShape + cvcuda.pillowresize(src: nvcv.ImageBatchVarShape, shape: Tuple[int], format: nvcv.Format, interp: Interp = cvcuda.Interp.LINEAR, stream: Optional[nvcv.cuda.Stream] = None) ->ImageBatchVarShape Executes the Pillow Resize operation on the given cuda stream. @@ -327,13 +327,13 @@ void ExportOpPillowResize(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - sizes (Tuple vector): Shapes of output images. - interp(Interp): Interpolation type used for transform. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + sizes (Tuple[int]): Shapes of output images. + interp (cvcuda.Interp, optional): Interpolation type used for transform. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -343,7 +343,7 @@ void ExportOpPillowResize(py::module &m) m.def("pillowresize_into", &VarShapePillowResizeInto, "dst"_a, "src"_a, "interp"_a = NVCV_INTERP_LINEAR, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.pillowresize(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, shape:Shape, format:ImageFormat, interp: Interp = < NVCV_INTERP_LINEAR >, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.pillowresize(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, shape: Tuple[int], format: nvcv.Format, interp: cvcuda.Interp = cvcuda.Interp.LINEAR, stream: Optional[nvcv.cuda.Stream] = None) Executes the Pillow Resize operation on the given cuda stream. @@ -352,11 +352,10 @@ void ExportOpPillowResize(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - sizes (Tuple vector): Shapes of output images. - interp(Interp): Interpolation type used for transform. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + interp (cvcuda.Interp, optional): Interpolation type used for transform. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpRandomResizedCrop.cpp b/python/mod_cvcuda/OpRandomResizedCrop.cpp index da428a656..3245bf84a 100644 --- a/python/mod_cvcuda/OpRandomResizedCrop.cpp +++ b/python/mod_cvcuda/OpRandomResizedCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -121,7 +121,7 @@ void ExportOpRandomResizedCrop(py::module &m) py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.random_resized_crop(src: nvcv.Tensor, shape:Tuple, min_scale:double, max_scale:double, min_ratio:double, max_ratio:double, interp: Interp = < NVCV_INTERP_LINEAR >, seed: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.random_resized_crop(src: nvcv.Tensor, shape: Tuple, min_scale: double, max_scale: double, min_ratio: double, max_ratio: double, interp: Interp = cvcuda.Interp.LINEAR, seed: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the RandomResizedCrop operation on the given cuda stream. @@ -130,18 +130,18 @@ void ExportOpRandomResizedCrop(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. + src (nvcv.Tensor): Input tensor containing one or more images. shape (Tuple): Shape of output tensor. min_scale (double, optional): Lower bound for the random area of the crop, before resizing. The scale is defined with respect to the area of the original image. max_scale (double, optional): Upper bound for the random area of the crop, before resizing. The scale is defined with respect to the area of the original image. min_ratio (double, optional): Lower bound for the random aspect ratio of the crop, before resizing. max_ratio (double, optional): Upper bound for the random aspect ratio of the crop, before resizing. - interp (Interp, optional): Interpolation type used for transform. + interp (cvcuda.Interp, optional): Interpolation type used for transform. seed (int, optional): Random seed, should be unsigned int32. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -152,7 +152,7 @@ void ExportOpRandomResizedCrop(py::module &m) "max_scale"_a = 1.0, "min_ratio"_a = 0.75, "max_ratio"_a = 1.3333333333333333, "interp"_a = NVCV_INTERP_LINEAR, "seed"_a = 0, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.random_resized_crop_into(dst: nvcv.Tensor,src: nvcv.Tensor, shape:Tuple, min_scale:double, max_scale:double, min_ratio:double, max_ratio:double, interp: Interp = < NVCV_INTERP_LINEAR >, seed: int, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.random_resized_crop_into(dst: nvcv.Tensor, src: nvcv.Tensor, shape: Tuple, min_scale: double, max_scale: double, min_ratio: double, max_ratio: double, interp: Interp = cvcuda.Interp.LINEAR, seed: int, stream: Optional[nvcv.cuda.Stream] = None) Executes the RandomResizedCrop operation on the given cuda stream. @@ -161,15 +161,15 @@ void ExportOpRandomResizedCrop(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. min_scale (double, optional): Lower bound for the random area of the crop, before resizing. The scale is defined with respect to the area of the original image. max_scale (double, optional): Upper bound for the random area of the crop, before resizing. The scale is defined with respect to the area of the original image. min_ratio (double, optional): Lower bound for the random aspect ratio of the crop, before resizing. max_ratio (double, optional): Upper bound for the random aspect ratio of the crop, before resizing. - interp (Interp, optional): Interpolation type used for transform. + interp (cvcuda.Interp, optional): Interpolation type used for transform. seed (int, optional): Random seed, should be unsigned int32. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -183,7 +183,7 @@ void ExportOpRandomResizedCrop(py::module &m) "max_scale"_a = 1.0, "min_ratio"_a = 0.75, "max_ratio"_a = 1.3333333333333333, "interp"_a = NVCV_INTERP_LINEAR, "seed"_a = 0, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.random_resized_crop(src: nvcv.ImageBatchVarShape, shape:Tuple, min_scale:double, max_scale:double, min_ratio:double, max_ratio:double, interp: Interp = < NVCV_INTERP_LINEAR >, seed: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.random_resized_crop(src: nvcv.ImageBatchVarShape, shape: Tuple, min_scale: double, max_scale: double, min_ratio: double, max_ratio: double, interp: Interp = cvcuda.Interp.LINEAR, seed: int, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the RandomResizedCrop operation on the given cuda stream. @@ -192,18 +192,18 @@ void ExportOpRandomResizedCrop(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. sizes (Tuple vector): Shapes of output images. min_scale (double, optional): Lower bound for the random area of the crop, before resizing. The scale is defined with respect to the area of the original image. max_scale (double, optional): Upper bound for the random area of the crop, before resizing. The scale is defined with respect to the area of the original image. min_ratio (double, optional): Lower bound for the random aspect ratio of the crop, before resizing. max_ratio (double, optional): Upper bound for the random aspect ratio of the crop, before resizing. - interp (Interp, optional): Interpolation type used for transform. + interp (cvcuda.Interp, optional): Interpolation type used for transform. seed (int, optional): Random seed, should be unsigned int32. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -214,7 +214,7 @@ void ExportOpRandomResizedCrop(py::module &m) "max_scale"_a = 1.0, "min_ratio"_a = 0.75, "max_ratio"_a = 1.3333333333333333, "interp"_a = NVCV_INTERP_LINEAR, "seed"_a = 0, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.random_resized_crop_into(dst: nvcv.ImageBatchVarShape,src: nvcv.ImageBatchVarShape, shape:Tuple, min_scale:double, max_scale:double, min_ratio:double, max_ratio:double, interp: Interp = < NVCV_INTERP_LINEAR >, seed: int, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.random_resized_crop_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, shape: Tuple, min_scale: double, max_scale: double, min_ratio: double, max_ratio: double, interp: Interp = cvcuda.Interp.LINEAR, seed: int, stream: Optional[nvcv.cuda.Stream] = None) Executes the RandomResizedCrop operation on the given cuda stream. @@ -223,15 +223,15 @@ void ExportOpRandomResizedCrop(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. min_scale (double, optional): Lower bound for the random area of the crop, before resizing. The scale is defined with respect to the area of the original image. max_scale (double, optional): Upper bound for the random area of the crop, before resizing. The scale is defined with respect to the area of the original image. min_ratio (double, optional): Lower bound for the random aspect ratio of the crop, before resizing. max_ratio (double, optional): Upper bound for the random aspect ratio of the crop, before resizing. - interp (Interp, optional): Interpolation type used for transform. + interp (cvcuda.Interp, optional): Interpolation type used for transform. seed (int, optional): Random seed, should be unsigned int32. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpReformat.cpp b/python/mod_cvcuda/OpReformat.cpp index 227ba0a0f..aa33fe5ac 100644 --- a/python/mod_cvcuda/OpReformat.cpp +++ b/python/mod_cvcuda/OpReformat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -69,12 +69,12 @@ void ExportOpReformat(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - layout (TensorLayout): The tensor layout of the output Tensor ((N)CHW/(N)HWC). - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + layout (nvcv.TensorLayout): The tensor layout of the output tensor ((N)CHW/(N)HWC). + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -90,9 +90,9 @@ void ExportOpReformat(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpRemap.cpp b/python/mod_cvcuda/OpRemap.cpp index 3ad42fca7..875333421 100644 --- a/python/mod_cvcuda/OpRemap.cpp +++ b/python/mod_cvcuda/OpRemap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -164,17 +164,186 @@ void ExportOpRemap(py::module &m) m.def("remap", &Remap, "src"_a, "map"_a, "src_interp"_a = NVCV_INTERP_NEAREST, "map_interp"_a = NVCV_INTERP_NEAREST, "map_type"_a = NVCV_REMAP_ABSOLUTE, "align_corners"_a = false, "border"_a = NVCV_BORDER_CONSTANT, - "border_value"_a = pyarray{}, py::kw_only(), "stream"_a = nullptr); + "border_value"_a = pyarray{}, py::kw_only(), "stream"_a = nullptr, R"pbdoc( + + cvcuda.remap(src: nvcv.Tensor, map: nvcv.Tensor, src_interp: cvcuda.Interp = cvcuda.Interp.NEAREST, map_interp: cvcuda.Interp = cvcuda.Interp.NEAREST, map_type: cvcuda.Remap = cvcuda.Remap.ABSOLUTE, align_corners: bool = False, border: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: numpy.ndarray = np.ndarray((0,)), stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + + Executes the Warp Perspective operation on the given cuda stream. + + See also: + Refer to the CV-CUDA C API reference for the Remap operator for more details and usage + examples. + + Args: + src (nvcv.Tensor): Input tensor. + src_interp (cvcuda.Interp, optional): Interpolation type used when fetching values + from the source image. + map_interp (cvcuda.Interp, optional): Interpolation type used when fetching values + from the map tensor. + map_type (cvcuda.Remap, optional): This determines how the values inside the map are + interpreted. If it is cvcuda.Remap.ABSOLUTE the map values are absolute, + denormalized positions in the input tensor to fetch values from. If it is + cvcuda.Remap.ABSOLUTE_NORMALIZED the map values are absolute, normalized + positions in [-1, 1] range to fetch values from the input tensor in a resolution + agnostic way. If it is cvcuda.Remap.RELATIVE_NORMALIZED the map values are + relative, normalized offsets to be applied to each output position to fetch values + from the input tensor, also resolution agnostic. + align_corners (bool, optional): The remap operation from output to input via the map + is done in the floating-point domain. If ``True``, they are aligned by the center + points of their corner pixels. Otherwise, they are aligned by the corner points of + their corner pixels. + border (cvcuda.Border, optional): pixel extrapolation method (cvcuda.Border.CONSTANT, + cvcuda.Border.REPLICATE, cvcuda.Border.REFLECT, cvcuda.Border.REFLECT_101, or + cvcuda.Border.WRAP). + border_value (numpy.ndarray, optional): Used to specify values for a constant border, + should have size <= 4 and dim of 1, where the values specify the border color for + each color channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + + Returns: + nvcv.Tensor: The output tensor. + + Caution: + Restrictions to several arguments may apply. Check the C API references of the CV-CUDA + operator. + )pbdoc"); m.def("remap_into", &RemapInto, "dst"_a, "src"_a, "map"_a, "src_interp"_a = NVCV_INTERP_NEAREST, "map_interp"_a = NVCV_INTERP_NEAREST, "map_type"_a = NVCV_REMAP_ABSOLUTE, "align_corners"_a = false, - "border"_a = NVCV_BORDER_CONSTANT, "border_value"_a = pyarray{}, py::kw_only(), "stream"_a = nullptr); - + "border"_a = NVCV_BORDER_CONSTANT, "border_value"_a = pyarray{}, py::kw_only(), "stream"_a = nullptr, R"pbdoc( + + cvcuda.remap_into(dst: nvcv.Tensor, src: nvcv.Tensor, map: nvcv.Tensor, src_interp: cvcuda.Interp = cvcuda.Interp.NEAREST, map_interp: cvcuda.Interp = cvcuda.Interp.NEAREST, map_type: cvcuda.Remap = cvcuda.Remap.ABSOLUTE, align_corners: bool = False, border: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: numpy.ndarray = np.ndarray((0,)), stream: Optional[nvcv.cuda.Stream] = None) + + Executes the Warp Perspective operation on the given cuda stream. + + See also: + Refer to the CV-CUDA C API reference for the Remap operator for more details and usage + examples. + + Args: + dst (nvcv.Tensor): Output tensor. + src (nvcv.Tensor): Input tensor. + src_interp (cvcuda.Interp, optional): Interpolation type used when fetching values + from the source image. + map_interp (cvcuda.Interp, optional): Interpolation type used when fetching values + from the map tensor. + map_type (cvcuda.Remap, optional): This determines how the values inside the map are + interpreted. If it is cvcuda.Remap.ABSOLUTE the map values are absolute, + denormalized positions in the input tensor to fetch values from. If it is + cvcuda.Remap.ABSOLUTE_NORMALIZED the map values are absolute, normalized + positions in [-1, 1] range to fetch values from the input tensor in a resolution + agnostic way. If it is cvcuda.Remap.RELATIVE_NORMALIZED the map values are + relative, normalized offsets to be applied to each output position to fetch values + from the input tensor, also resolution agnostic. + align_corners (bool, optional): The remap operation from output to input via the map + is done in the floating-point domain. If ``True``, they are aligned by the center + points of their corner pixels. Otherwise, they are aligned by the corner points of + their corner pixels. + border (cvcuda.Border, optional): pixel extrapolation method (cvcuda.Border.CONSTANT, + cvcuda.Border.REPLICATE, cvcuda.Border.REFLECT, cvcuda.Border.REFLECT_101, or + cvcuda.Border.WRAP). + border_value (numpy.ndarray, optional): Used to specify values for a constant border, + should have size <= 4 and dim of 1, where the values specify the border color for + each color channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + + Returns: + None + + Caution: + Restrictions to several arguments may apply. Check the C API references of the CV-CUDA + operator. + )pbdoc"); m.def("remap", &VarShapeRemap, "src"_a, "map"_a, "src_interp"_a = NVCV_INTERP_NEAREST, "map_interp"_a = NVCV_INTERP_NEAREST, "map_type"_a = NVCV_REMAP_ABSOLUTE, "align_corners"_a = false, - "border"_a = NVCV_BORDER_CONSTANT, "border_value"_a = pyarray{}, py::kw_only(), "stream"_a = nullptr); + "border"_a = NVCV_BORDER_CONSTANT, "border_value"_a = pyarray{}, py::kw_only(), "stream"_a = nullptr, R"pbdoc( + + cvcuda.remap(src: nvcv.ImageBatchVarShape, map: nvcv.Tensor, src_interp: cvcuda.Interp = cvcuda.Interp.NEAREST, map_interp: cvcuda.Interp = cvcuda.Interp.NEAREST, map_type: cvcuda.Remap = cvcuda.Remap.ABSOLUTE, align_corners: bool = False, border: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: numpy.ndarray = np.ndarray((0,)), stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + + Executes the Warp Perspective operation on the given cuda stream. + + See also: + Refer to the CV-CUDA C API reference for the Remap operator for more details and usage + examples. + + Args: + src (nvcv.ImageBatchVarShape): Input image batch. + src_interp (cvcuda.Interp, optional): Interpolation type used when fetching values + from the source image. + map_interp (cvcuda.Interp, optional): Interpolation type used when fetching values + from the map tensor. + map_type (cvcuda.Remap, optional): This determines how the values inside the map are + interpreted. If it is cvcuda.Remap.ABSOLUTE the map values are absolute, + denormalized positions in the input tensor to fetch values from. If it is + cvcuda.Remap.ABSOLUTE_NORMALIZED the map values are absolute, normalized + positions in [-1, 1] range to fetch values from the input tensor in a resolution + agnostic way. If it is cvcuda.Remap.RELATIVE_NORMALIZED the map values are + relative, normalized offsets to be applied to each output position to fetch values + from the input tensor, also resolution agnostic. + align_corners (bool, optional): The remap operation from output to input via the map + is done in the floating-point domain. If ``True``, they are aligned by the center + points of their corner pixels. Otherwise, they are aligned by the corner points of + their corner pixels. + border (cvcuda.Border, optional): pixel extrapolation method (cvcuda.Border.CONSTANT, + cvcuda.Border.REPLICATE, cvcuda.Border.REFLECT, cvcuda.Border.REFLECT_101, or + cvcuda.Border.WRAP). + border_value (numpy.ndarray, optional): Used to specify values for a constant border, + should have size <= 4 and dim of 1, where the values specify the border color for + each color channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + + Returns: + nvcv.ImageBatchVarShape: The output image batch. + + Caution: + Restrictions to several arguments may apply. Check the C API references of the CV-CUDA + operator. + )pbdoc"); m.def("remap_into", &VarShapeRemapInto, "dst"_a, "src"_a, "map"_a, "src_interp"_a = NVCV_INTERP_NEAREST, "map_interp"_a = NVCV_INTERP_NEAREST, "map_type"_a = NVCV_REMAP_ABSOLUTE, "align_corners"_a = false, - "border"_a = NVCV_BORDER_CONSTANT, "border_value"_a = pyarray{}, py::kw_only(), "stream"_a = nullptr); + "border"_a = NVCV_BORDER_CONSTANT, "border_value"_a = pyarray{}, py::kw_only(), "stream"_a = nullptr, R"pbdoc( + + cvcuda.remap_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, map: nvcv.Tensor, src_interp: cvcuda.Interp = cvcuda.Interp.NEAREST, map_interp: cvcuda.Interp = cvcuda.Interp.NEAREST, map_type: cvcuda.Remap = cvcuda.Remap.ABSOLUTE, align_corners: bool = False, border: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: numpy.ndarray = np.ndarray((0,)), stream: Optional[nvcv.cuda.Stream] = None) + + Executes the Warp Perspective operation on the given cuda stream. + + See also: + Refer to the CV-CUDA C API reference for the Remap operator for more details and usage + examples. + + Args: + dst (nvcv.ImageBatchVarShape): Output image batch. + src (nvcv.ImageBatchVarShape): Input image batch. + src_interp (cvcuda.Interp, optional): Interpolation type used when fetching values + from the source image. + map_interp (cvcuda.Interp, optional): Interpolation type used when fetching values + from the map tensor. + map_type (cvcuda.Remap, optional): This determines how the values inside the map are + interpreted. If it is cvcuda.Remap.ABSOLUTE the map values are absolute, + denormalized positions in the input tensor to fetch values from. If it is + cvcuda.Remap.ABSOLUTE_NORMALIZED the map values are absolute, normalized + positions in [-1, 1] range to fetch values from the input tensor in a resolution + agnostic way. If it is cvcuda.Remap.RELATIVE_NORMALIZED the map values are + relative, normalized offsets to be applied to each output position to fetch values + from the input tensor, also resolution agnostic. + align_corners (bool, optional): The remap operation from output to input via the map + is done in the floating-point domain. If ``True``, they are aligned by the center + points of their corner pixels. Otherwise, they are aligned by the corner points of + their corner pixels. + border (cvcuda.Border, optional): pixel extrapolation method (cvcuda.Border.CONSTANT, + cvcuda.Border.REPLICATE, cvcuda.Border.REFLECT, cvcuda.Border.REFLECT_101, or + cvcuda.Border.WRAP). + border_value (numpy.ndarray, optional): Used to specify values for a constant border, + should have size <= 4 and dim of 1, where the values specify the border color for + each color channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. + + Returns: + None + + Caution: + Restrictions to several arguments may apply. Check the C API references of the CV-CUDA + operator. + )pbdoc"); } } // namespace cvcudapy diff --git a/python/mod_cvcuda/OpResize.cpp b/python/mod_cvcuda/OpResize.cpp index a8e41fab7..b4cd35114 100644 --- a/python/mod_cvcuda/OpResize.cpp +++ b/python/mod_cvcuda/OpResize.cpp @@ -108,7 +108,7 @@ void ExportOpResize(py::module &m) m.def("resize", &Resize, "src"_a, "shape"_a, "interp"_a = NVCV_INTERP_LINEAR, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.resize(src: nvcv.Tensor, shape:Tuple, interp: Interp = < NVCV_INTERP_LINEAR >, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.resize(src: nvcv.Tensor, shape: Tuple[int], interp: cvcuda.Interp = cvcuda.Interp.LINEAR, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Resize operation on the given cuda stream. @@ -117,13 +117,13 @@ void ExportOpResize(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - shape (Tuple): Shape of output tensor. - interp(Interp): Interpolation type used for transform. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + shape (Tuple[int]): Shape of output tensor. + interp (cvcuda.Interp, optional): Interpolation type used for transform. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -133,7 +133,7 @@ void ExportOpResize(py::module &m) m.def("resize_into", &ResizeInto, "dst"_a, "src"_a, "interp"_a = NVCV_INTERP_LINEAR, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.resize_into(dst: nvcv.Tensor, src: nvcv.Tensor, shape:Tuple, interp: Interp = < NVCV_INTERP_LINEAR >, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.resize_into(dst: nvcv.Tensor, src: nvcv.Tensor, interp: cvcuda.Interp = cvcuda.Interp.LINEAR, stream: Optional[nvcv.cuda.Stream] = None) Executes the Resize operation on the given cuda stream. @@ -142,10 +142,10 @@ void ExportOpResize(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - interp(Interp): Interpolation type used for transform. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + interp (cvcuda.Interp, optional): Interpolation type used for transform. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -158,7 +158,7 @@ void ExportOpResize(py::module &m) m.def("resize", &ResizeVarShape, "src"_a, "sizes"_a, "interp"_a = NVCV_INTERP_LINEAR, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.resize(src: nvcv.ImageBatchVarShape, shape:Tuple, interp: Interp = < NVCV_INTERP_LINEAR >, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.resize(src: nvcv.ImageBatchVarShape, interp: cvcuda.Interp = cvcuda.Interp.LINEAR, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Resize operation on the given cuda stream. @@ -167,13 +167,13 @@ void ExportOpResize(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. sizes (Tuple vector): Shapes of output images. - interp(Interp): Interpolation type used for transform. - stream (Stream, optional): CUDA Stream on which to perform the operation. + interp (cvcuda.Interp, optional): Interpolation type used for transform. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -183,7 +183,7 @@ void ExportOpResize(py::module &m) m.def("resize_into", &ResizeVarShapeInto, "dst"_a, "src"_a, "interp"_a = NVCV_INTERP_LINEAR, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.resize_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, shape:Tuple, interp: Interp = < NVCV_INTERP_LINEAR >, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.resize_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, interp: cvcuda.Interp = cvcuda.Interp.LINEAR, stream: Optional[nvcv.cuda.Stream] = None) Executes the Resize operation on the given cuda stream. @@ -192,10 +192,10 @@ void ExportOpResize(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - interp(Interp): Interpolation type used for transform. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + interp (cvcuda.Interp, optional): Interpolation type used for transform. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpResizeCropConvertReformat.cpp b/python/mod_cvcuda/OpResizeCropConvertReformat.cpp index 3dcb22f34..b498a3be3 100644 --- a/python/mod_cvcuda/OpResizeCropConvertReformat.cpp +++ b/python/mod_cvcuda/OpResizeCropConvertReformat.cpp @@ -231,8 +231,8 @@ void ExportOpResizeCropConvertReformat(py::module &m) Args: src (nvcv.Tensor): Input tensor containing one or more images. resize_dim (tuple[int,int]): Dimensions, width & height, of resized tensor (prior to cropping). - interp (cvcuda.Interp): Interpolation type used for resizing. Currently, only NVCV_INTERP_NEAREST and - NVCV_INTERP_LINEAR are available. + interp (cvcuda.Interp): Interpolation type used for resizing. Currently, only cvcuda.Interp.NEAREST and + cvcuda.Interp.LINEAR are available. crop_rect (nvcv.RectI): Crop rectangle, (top, left, width, height), specifying the top-left corner and width & height dimensions of the region to crop from the resized images. layout(string, optional): String specifying output tensor layout (e.g., 'NHWC' or 'CHW'). Empty string @@ -288,8 +288,8 @@ void ExportOpResizeCropConvertReformat(py::module &m) or float) and tensor layout (e.g., 'NHWC' or 'NCHW'). src (nvcv.Tensor): Input tensor containing one or more images. resize_dim (tuple[int,int]): Dimensions, width & height, of resized tensor (prior to cropping). - interp (cvcuda.Interp): Interpolation type used for resizing. Currently, only NVCV_INTERP_NEAREST and - NVCV_INTERP_LINEAR are available. + interp (cvcuda.Interp): Interpolation type used for resizing. Currently, only cvcuda.Interp.NEAREST and + cvcuda.Interp.LINEAR are available. cropPos (tuple[int,int]): Crop position, (top, left), specifying the top-left corner of the region to crop from the resized images. The crop region's width and height is specified by the output tensor's width & height. @@ -339,11 +339,11 @@ void ExportOpResizeCropConvertReformat(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images of varying sizes, but all images + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images of varying sizes, but all images must have the same data type, channels, and layout. resize_dim (tuple[int,int]): Dimensions, width & height, of resized tensor (prior to cropping). - interp (cvcuda.Interp): Interpolation type used for resizing. Currently, only NVCV_INTERP_NEAREST and - NVCV_INTERP_LINEAR are available. + interp (cvcuda.Interp): Interpolation type used for resizing. Currently, only cvcuda.Interp.NEAREST and + cvcuda.Interp.LINEAR are available. crop_rect (nvcv.RectI): Crop rectangle, (top, left, width, height), specifying the top-left corner and width & height dimensions of the region to crop from the resized images. layout(string, optional): String specifying output tensor layout (e.g., 'NHWC' or 'CHW'). Empty string @@ -397,11 +397,11 @@ void ExportOpResizeCropConvertReformat(py::module &m) dst (nvcv.Tensor): Output tensor to store the result of the operation. Output tensor also specifies the crop dimensions (i.e., width & height), as well as the output data type (e.g., uchar3 or float) and tensor layout (e.g., 'NHWC' or 'NCHW'). - src (ImageBatchVarShape): Input image batch containing one or more images of varying sizes, but all images + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images of varying sizes, but all images must have the same data type, channels, and layout. resize_dim (tuple[int,int]): Dimensions, width & height, of resized tensor (prior to cropping). - interp (cvcuda.Interp): Interpolation type used for resizing. Currently, only NVCV_INTERP_NEAREST and - NVCV_INTERP_LINEAR are available. + interp (cvcuda.Interp): Interpolation type used for resizing. Currently, only cvcuda.Interp.NEAREST and + cvcuda.Interp.LINEAR are available. cropPos (tuple[int,int]): Crop position, (top, left), specifying the top-left corner of the region to crop from the resized images. The crop region's width and height is specified by the output tensor's width & height. diff --git a/python/mod_cvcuda/OpRotate.cpp b/python/mod_cvcuda/OpRotate.cpp index a12965f4e..037fef968 100644 --- a/python/mod_cvcuda/OpRotate.cpp +++ b/python/mod_cvcuda/OpRotate.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -109,7 +109,7 @@ void ExportOpRotate(py::module &m) m.def("rotate", &Rotate, "src"_a, "angle_deg"_a, "shift"_a, "interpolation"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.rotate(src: nvcv.Tensor, angle_deg: double, shift: Tuple [double, double], interpolation : Interp, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.rotate(src: nvcv.Tensor, angle_deg: float, shift: Tuple[float, float], interpolation: cvcuda.Interp, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Rotate operation on the given cuda stream. @@ -118,14 +118,14 @@ void ExportOpRotate(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - angle_deg (double): Angle used for rotation in degrees. - shift (Tuple [double, double]): Value of shift in {x, y} directions to move the center at the same coord after rotation. - interpolation (Interp): Interpolation type used for transform. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + angle_deg (float): Angle used for rotation in degrees. + shift (Tuple[float, float]): Value of shift in {x, y} directions to move the center at the same coord after rotation. + interpolation (cvcuda.Interp): Interpolation type used for transform. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -135,7 +135,7 @@ void ExportOpRotate(py::module &m) m.def("rotate_into", &RotateInto, "dst"_a, "src"_a, "angle_deg"_a, "shift"_a, "interpolation"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.rotate_into(dst: nvcv.Tensor, src: nvcv.Tensor, angle_deg: double, shift: Tuple [double, double], interpolation : Interp, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.rotate_into(dst: nvcv.Tensor, src: nvcv.Tensor, angle_deg: float, shift: Tuple[float, float], interpolation: cvcuda.Interp, stream: Optional[nvcv.cuda.Stream] = None) Executes the Rotate operation on the given cuda stream. @@ -144,12 +144,12 @@ void ExportOpRotate(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - angle_deg (double): Angle used for rotation in degrees. - shift (Tuple [double, double]): Value of shift in {x, y} directions to move the center at the same coord after rotation. - interpolation (Interp): Interpolation type used for transform. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + angle_deg (float): Angle used for rotation in degrees. + shift (Tuple[float, float]): Value of shift in {x, y} directions to move the center at the same coord after rotation. + interpolation (cvcuda.Interp): Interpolation type used for transform. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -162,7 +162,7 @@ void ExportOpRotate(py::module &m) m.def("rotate", &VarShapeRotate, "src"_a, "angle_deg"_a, "shift"_a, "interpolation"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.rotate(src: nvcv.ImageBatchVarShape, angle_deg: double, shift: Tuple [double, double], interpolation : Interp, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.rotate(src: nvcv.ImageBatchVarShape, angle_deg: float, shift: Tuple[float, float], interpolation: cvcuda.Interp, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Rotate operation on the given cuda stream. @@ -172,15 +172,15 @@ void ExportOpRotate(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - angle_deg (Tensor): Angle used for rotation in degrees for each image. - shift (Tensor): Value of shift in {x, y} directions to move the center at the same coord after rotation for each image. - interpolation (Interp): Interpolation type used for transform. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + angle_deg (float): Angle used for rotation in degrees for each image. + shift (Tuple[float, float]): Value of shift in {x, y} directions to move the center at the same coord after rotation for each image. + interpolation (cvcuda.Interp): Interpolation type used for transform. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -190,7 +190,7 @@ void ExportOpRotate(py::module &m) m.def("rotate_into", &VarShapeRotateInto, "dst"_a, "src"_a, "angle_deg"_a, "shift"_a, "interpolation"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.rotate_into(dst:ImageBatchVarShape, src: nvcv.ImageBatchVarShape, angle_deg: double, shift: Tuple [double, double], interpolation : Interp, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.rotate_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, angle_deg: nvcv.Tensor, shift: nvcv.Tensor, interpolation: cvcuda.Interp, stream: Optional[nvcv.cuda.Stream] = None) Executes the Rotate operation on the given cuda stream. @@ -199,12 +199,12 @@ void ExportOpRotate(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - angle_deg (Tensor): Angle used for rotation in degrees for each image. - shift (Tensor): Value of shift in {x, y} directions to move the center at the same coord after rotation for each image. - interpolation (Interp): Interpolation type used for transform. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + angle_deg (nvcv.Tensor): Angle used for rotation in degrees for each image. + shift (nvcv.Tensor): Value of shift in {x, y} directions to move the center at the same coord after rotation for each image. + interpolation (cvcuda.Interp): Interpolation type used for transform. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpSIFT.cpp b/python/mod_cvcuda/OpSIFT.cpp index f82fd1dad..fd1b32a26 100644 --- a/python/mod_cvcuda/OpSIFT.cpp +++ b/python/mod_cvcuda/OpSIFT.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -219,7 +219,7 @@ void ExportOpSIFT(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor to extract features and compute descriptors from. + src (nvcv.Tensor): Input tensor to extract features and compute descriptors from. max_features (Number, optional): Maximum number of features to be extracted, default is 5% of total pixels at a minimum of 1. num_octave_layers (Number, optional): Number of octave layers, default is 3. @@ -228,10 +228,10 @@ void ExportOpSIFT(py::module &m) init_sigma (Number, optional): Initial sigma, default is 1.6. flags (cvcuda.SIFT, optional): Flag to whether to expand the input or not, default is cvcuda.SIFT.USE_EXPANDED_INPUT. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - Tuple[Tensor, Tensor, Tensor, Tensor]: A tuple wih feature coordinates, metadata, descriptors and + Tuple[nvcv.Tensor, nvcv.Tensor, nvcv.Tensor, nvcv.Tensor]: A tuple with feature coordinates, metadata, descriptors and number of features. Caution: @@ -250,21 +250,21 @@ void ExportOpSIFT(py::module &m) for more details and usage examples. Args: - feat_coords (Tensor): Output tensor with feature coordinates. - feat_metadata (Tensor): Output tensor with feature metadata. - feat_descriptors (Tensor): Output tensor with feature descriptors. - num_features (Tensor): Output tensor with number of features. - src (Tensor): Input tensor to extract features and compute descriptors from. + feat_coords (nvcv.Tensor): Output tensor with feature coordinates. + feat_metadata (nvcv.Tensor): Output tensor with feature metadata. + feat_descriptors (nvcv.Tensor): Output tensor with feature descriptors. + num_features (nvcv.Tensor): Output tensor with number of features. + src (nvcv.Tensor): Input tensor to extract features and compute descriptors from. num_octave_layers (Number, optional): Number of octave layers, default is 3. contrast_threshold (Number, optional): Contrast threshold, default is 0.03. edge_threshold (Number, optional): Edge threshold, default is 10.0. init_sigma (Number, optional): Initial sigma, default is 1.6. flags (cvcuda.SIFT, optional): Flag to whether to expand the input or not, default is cvcuda.SIFT.USE_EXPANDED_INPUT. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - Tuple[Tensor, Tensor, Tensor, Tensor]: A tuple wih feature coordinates, metadata, descriptors and + Tuple[nvcv.Tensor, nvcv.Tensor, nvcv.Tensor, nvcv.Tensor]: A tuple with feature coordinates, metadata, descriptors and number of features. Caution: diff --git a/python/mod_cvcuda/OpStack.cpp b/python/mod_cvcuda/OpStack.cpp index da815a876..49174b827 100644 --- a/python/mod_cvcuda/OpStack.cpp +++ b/python/mod_cvcuda/OpStack.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -143,11 +143,11 @@ void ExportOpStack(py::module &m) for more details and usage examples. Args: - src (Tensor List): Input tensors containing one or more samples each images all tensors must be N(HWC/CHW) or HWC/CHW and have the same data type and shape. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (List[nvcv.Tensor]): Input tensors containing one or more samples each images all tensors must be N(HWC/CHW) or HWC/CHW and have the same data type and shape. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor containing the stacked input tensors. + nvcv.Tensor: The output tensor containing the stacked input tensors. Caution: Restrictions to several arguments may apply. Check the C @@ -163,9 +163,9 @@ void ExportOpStack(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output N(CHW/HWC) tensor to store the result of the operation. - src (Tensor List): Input tensors containing one or more samples each images all tensors must be N(HWC/CHW) or HWC/CHW and have the same data type and shape. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output N(CHW/HWC) tensor to store the result of the operation. + src (List[nvcv.Tensor]): Input tensors containing one or more samples each images all tensors must be N(HWC/CHW) or HWC/CHW and have the same data type and shape. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpThreshold.cpp b/python/mod_cvcuda/OpThreshold.cpp index 37c398ca5..081f670e5 100644 --- a/python/mod_cvcuda/OpThreshold.cpp +++ b/python/mod_cvcuda/OpThreshold.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -118,15 +118,16 @@ void ExportOpThreshold(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - thresh (Tensor): An array of size batch that gives the threshold value of each image. - maxval (Tensor): An array of size batch that gives the maxval value of each image, - using with the NVCV_THRESH_BINARY and NVCV_THRESH_BINARY_INV threshold types. - type (ThresholdType): Thresholding type. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + thresh (nvcv.Tensor): An array of size batch that gives the threshold value of each image. + maxval (nvcv.Tensor): An array of size batch that gives the maxval value of each image, + using with the cvcuda.ThresholdType.BINARY or cvcuda.ThresholdType.BINARY_INV + threshold types. + type (cvcuda.ThresholdType): Thresholding type. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -136,7 +137,7 @@ void ExportOpThreshold(py::module &m) m.def("threshold_into", &ThresholdInto, "dst"_a, "src"_a, "thresh"_a, "maxval"_a, "type"_a, py::kw_only(), "stream"_a = nullptr, R"pbdoc( - cvcuda.threshold_into(dst: nvcv.Tensor,src: nvcv.Tensor, thresh: nvcv.Tensor, maxval: nvcv.Tensor, type:ThresholdType, stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.threshold_into(dst: nvcv.Tensor, src: nvcv.Tensor, thresh: nvcv.Tensor, maxval: nvcv.Tensor, type:ThresholdType, stream: Optional[nvcv.cuda.Stream] = None) Executes the Threshold operation on the given cuda stream. @@ -145,13 +146,14 @@ void ExportOpThreshold(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - thresh (Tensor): An array of size batch that gives the threshold value of each image. - maxval (Tensor): An array of size batch that gives the maxval value of each image, - using with the NVCV_THRESH_BINARY and NVCV_THRESH_BINARY_INV threshold types. - type (ThresholdType): Thresholding type. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + thresh (nvcv.Tensor): An array of size batch that gives the threshold value of each image. + maxval (nvcv.Tensor): An array of size batch that gives the maxval value of each image, + using with the cvcuda.ThresholdType.BINARY or cvcuda.ThresholdType.BINARY_INV + threshold types. + type (cvcuda.ThresholdType): Thresholding type. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -173,15 +175,16 @@ void ExportOpThreshold(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - thresh (Tensor): An array of size batch that gives the threshold value of each image. - maxval (Tensor): An array of size batch that gives the maxval value of each image, - using with the NVCV_THRESH_BINARY and NVCV_THRESH_BINARY_INV threshold types. - type (ThresholdType): Thresholding type. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + thresh (nvcv.Tensor): An array of size batch that gives the threshold value of each image. + maxval (nvcv.Tensor): An array of size batch that gives the maxval value of each image, + using with the cvcuda.ThresholdType.BINARY or cvcuda.ThresholdType.BINARY_INV + threshold types. + type (cvcuda.ThresholdType): Thresholding type. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -200,13 +203,14 @@ void ExportOpThreshold(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - src (ImageBatchVarShape): Input image batch containing one or more images. - thresh (Tensor): An array of size batch that gives the threshold value of each image. - maxval (Tensor): An array of size batch that gives the maxval value of each image, - using with the NVCV_THRESH_BINARY and NVCV_THRESH_BINARY_INV threshold types. - type (ThresholdType): Thresholding type. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + thresh (nvcv.Tensor): An array of size batch that gives the threshold value of each image. + maxval (nvcv.Tensor): An array of size batch that gives the maxval value of each image, + using with the cvcuda.ThresholdType.BINARY or cvcuda.ThresholdType.BINARY_INV + threshold types. + type (cvcuda.ThresholdType): Thresholding type. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpWarpAffine.cpp b/python/mod_cvcuda/OpWarpAffine.cpp index a07c25692..9a5acfb1c 100644 --- a/python/mod_cvcuda/OpWarpAffine.cpp +++ b/python/mod_cvcuda/OpWarpAffine.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -145,7 +145,7 @@ void ExportOpWarpAffine(py::module &m) m.def("warp_affine", &WarpAffine, "src"_a, "xform"_a, "flags"_a, py::kw_only(), "border_mode"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "border_value"_a = 0, "stream"_a = nullptr, R"pbdoc( - cvcuda.warp_affine(src: nvcv.Tensor, xform: nvcv.Tensor, flags: nvcv.Tensor, border_mode: NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, border_value:pyarray,stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.warp_affine(src: nvcv.Tensor, xform: nvcv.Tensor, flags: nvcv.Tensor, border_mode: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: numpy.ndarray, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Warp Affine operation on the given cuda stream. @@ -154,17 +154,18 @@ void ExportOpWarpAffine(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - xform (Tensor): 2x3 float affine transformation matrix. - flags (int): Combination of interpolation methods(NVCV_INTERP_NEAREST, NVCV_INTERP_LINEAR or NVCV_INTERP_CUBIC) - and the optional flag NVCV_WARP_INVERSE_MAP, that sets xform as the inverse transformation. - border_mode (NVCVBorderType, optional): Pixel extrapolation method (NVCV_BORDER_CONSTANT or NVCV_BORDER_REPLICATE). - border_value (pyarray, optional):Used to specify values for a constant border, must be a size <= 4 and dim of 1, - where the values specify the border color for each color channel. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + xform (nvcv.Tensor): 2x3 float affine transformation matrix. + flags (int): Combination of interpolation methods(cvcuda.Interp.NEAREST, cvcuda.Interp.LINEAR or cvcuda.Interp.CUBIC) + and the optional flag cvcuda.Interp.WARP_INVERSE_MAP, that sets xform as the inverse transformation. + border_mode (cvcuda.Border, optional): Pixel extrapolation method (cvcuda.Border.CONSTANT or + cvcuda.Border.REPLICATE). + border_value (numpy.ndarray, optional):Used to specify values for a constant border, must be a size <= 4 and dim of 1, + where the values specify the border color for each color channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -174,7 +175,7 @@ void ExportOpWarpAffine(py::module &m) m.def("warp_affine_into", &WarpAffineInto, "dst"_a, "src"_a, "xform"_a, "flags"_a, py::kw_only(), "border_mode"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "border_value"_a = 0, "stream"_a = nullptr, R"pbdoc( - cvcuda.warp_affine_into(dst: nvcv.Tensor, src: nvcv.Tensor, xform: nvcv.Tensor, flags: nvcv.Tensor, border_mode: NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, border_value:pyarray,stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.warp_affine_into(dst: nvcv.Tensor, src: nvcv.Tensor, xform: nvcv.Tensor, flags: nvcv.Tensor, border_mode: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: numpy.ndarray, stream: Optional[nvcv.cuda.Stream] = None) Executes the Warp Affine operation on the given cuda stream. @@ -183,15 +184,16 @@ void ExportOpWarpAffine(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - xform (Tensor): 2x3 float affine transformation matrix. - flags (int): Combination of interpolation methods(NVCV_INTERP_NEAREST, NVCV_INTERP_LINEAR or NVCV_INTERP_CUBIC) - and the optional flag NVCV_WARP_INVERSE_MAP, that sets xform as the inverse transformation. - border_mode (NVCVBorderType, optional): Pixel extrapolation method (NVCV_BORDER_CONSTANT or NVCV_BORDER_REPLICATE). - border_value (pyarray, optional):Used to specify values for a constant border, must be a size <= 4 and dim of 1, + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + xform (nvcv.Tensor): 2x3 float affine transformation matrix. + flags (int): Combination of interpolation methods(cvcuda.Interp.NEAREST, cvcuda.Interp.LINEAR or cvcuda.Interp.CUBIC) + and the optional flag cvcuda.Interp.WARP_INVERSE_MAP, that sets xform as the inverse transformation. + border_mode (cvcuda.Border, optional): Pixel extrapolation method (cvcuda.Border.CONSTANT or + cvcuda.Border.REPLICATE). + border_value (numpy.ndarray, optional):Used to specify values for a constant border, must be a size <= 4 and dim of 1, where the values specify the border color for each color channel. - stream (Stream, optional): CUDA Stream on which to perform the operation. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -204,7 +206,7 @@ void ExportOpWarpAffine(py::module &m) m.def("warp_affine", &WarpAffineVarShape, "src"_a, "xform"_a, "flags"_a, py::kw_only(), "border_mode"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "border_value"_a = 0, "stream"_a = nullptr, R"pbdoc( - cvcuda.warp_affine(src: nvcv.ImageBatchVarShape, xform: nvcv.Tensor, flags: nvcv.Tensor, border_mode: NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, border_value:pyarray,stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.warp_affine(src: nvcv.ImageBatchVarShape, xform: nvcv.Tensor, flags: nvcv.Tensor, border_mode: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: numpy.ndarray, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Warp Affine operation on the given cuda stream. @@ -213,17 +215,18 @@ void ExportOpWarpAffine(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - xform (Tensor): 2x3 float affine transformation matrix(s) for each image. - flags (int): Combination of interpolation methods(NVCV_INTERP_NEAREST, NVCV_INTERP_LINEAR or NVCV_INTERP_CUBIC) - and the optional flag NVCV_WARP_INVERSE_MAP, that sets xform as the inverse transformation. - border_mode (NVCVBorderType, optional): Pixel extrapolation method (NVCV_BORDER_CONSTANT or NVCV_BORDER_REPLICATE). - border_value (pyarray, optional):Used to specify values for a constant border, must be a size <= 4 and dim of 1, - where the values specify the border color for each color channel. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + xform (nvcv.Tensor): 2x3 float affine transformation matrix(s) for each image. + flags (int): Combination of interpolation methods(cvcuda.Interp.NEAREST, cvcuda.Interp.LINEAR or cvcuda.Interp.CUBIC) + and the optional flag cvcuda.Interp.WARP_INVERSE_MAP, that sets xform as the inverse transformation. + border_mode (cvcuda.Border, optional): Pixel extrapolation method (cvcuda.Border.CONSTANT or + cvcuda.Border.REPLICATE). + border_value (numpy.ndarray, optional):Used to specify values for a constant border, must be a size <= 4 and dim of 1, + where the values specify the border color for each color channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -233,7 +236,7 @@ void ExportOpWarpAffine(py::module &m) m.def("warp_affine_into", &WarpAffineVarShapeInto, "dst"_a, "src"_a, "xform"_a, "flags"_a, py::kw_only(), "border_mode"_a = NVCVBorderType::NVCV_BORDER_CONSTANT, "border_value"_a = 0, "stream"_a = nullptr, R"pbdoc( - cvcuda.warp_affine_into(dst: nvcv.ImageBatchVarShape,src: nvcv.ImageBatchVarShape, xform: nvcv.Tensor, flags: nvcv.Tensor, border_mode: NVCVBorderType = < NVCVBorderType::NVCV_BORDER_CONSTANT >, border_value:pyarray,stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.warp_affine_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, xform: nvcv.Tensor, flags: nvcv.Tensor, border_mode: cvcuda.Border = cvcuda.Border.CONSTANT, border_value: numpy.ndarray, stream: Optional[nvcv.cuda.Stream] = None) Executes the Warp Affine operation on the given cuda stream. @@ -242,15 +245,16 @@ void ExportOpWarpAffine(py::module &m) for more details and usage examples. Args: - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - src (ImageBatchVarShape): Input image batch containing one or more images. - xform (Tensor): 2x3 float affine transformation matrix(s) for each image in batch. - flags (int): Combination of interpolation methods(NVCV_INTERP_NEAREST, NVCV_INTERP_LINEAR or NVCV_INTERP_CUBIC) - and the optional flag NVCV_WARP_INVERSE_MAP, that sets xform as the inverse transformation. - border_mode (NVCVBorderType, optional): Pixel extrapolation method (NVCV_BORDER_CONSTANT or NVCV_BORDER_REPLICATE). - border_value (pyarray, optional):Used to specify values for a constant border, must be a size <= 4 and dim of 1, - where the values specify the border color for each color channel. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + xform (nvcv.Tensor): 2x3 float affine transformation matrix(s) for each image in batch. + flags (int): Combination of interpolation methods(cvcuda.Interp.NEAREST, cvcuda.Interp.LINEAR or cvcuda.Interp.CUBIC) + and the optional flag cvcuda.Interp.WARP_INVERSE_MAP, that sets xform as the inverse transformation. + border_mode (cvcuda.Border, optional): Pixel extrapolation method (cvcuda.Border.CONSTANT or + cvcuda.Border.REPLICATE). + border_value (numpy.ndarray, optional):Used to specify values for a constant border, must be a size <= 4 and dim of 1, + where the values specify the border color for each color channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/OpWarpPerspective.cpp b/python/mod_cvcuda/OpWarpPerspective.cpp index 33536467f..6df68713a 100644 --- a/python/mod_cvcuda/OpWarpPerspective.cpp +++ b/python/mod_cvcuda/OpWarpPerspective.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -144,7 +144,7 @@ void ExportOpWarpPerspective(py::module &m) m.def("warp_perspective", &WarpPerspective, "src"_a, "xform"_a, "flags"_a, py::kw_only(), "border_mode"_a, "border_value"_a, "stream"_a = nullptr, R"pbdoc( - cvcuda.warp_perspective(src: nvcv.Tensor, xform: nvcv.Tensor, flags: int, border_mode:NVCVBorderType, border_value:pyarray,stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor + cvcuda.warp_perspective(src: nvcv.Tensor, xform: nvcv.Tensor, flags: int, border_mode: cvcuda.Border, border_value: numpy.ndarray, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.Tensor Executes the Warp Perspective operation on the given cuda stream. @@ -153,17 +153,18 @@ void ExportOpWarpPerspective(py::module &m) for more details and usage examples. Args: - src (Tensor): Input tensor containing one or more images. - xform (pyarray: 3x3 perspective transformation matrix. - flags (int): Combination of interpolation methods(NVCV_INTERP_NEAREST, NVCV_INTERP_LINEAR or NVCV_INTERP_CUBIC) - and the optional flag NVCV_WARP_INVERSE_MAP, that sets trans_matrix as the inverse transformation. - border_mode (NVCVBorderType): pixel extrapolation method (NVCV_BORDER_CONSTANT or NVCV_BORDER_REPLICATE). - border_value (pyarray): Used to specify values for a constant border, should be a size <= 4 and dim of 1, - where the values specify the border color for each color channel. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + xform (numpy.ndarray: 3x3 perspective transformation matrix. + flags (int): Combination of interpolation methods(cvcuda.Interp.NEAREST, cvcuda.Interp.LINEAR or cvcuda.Interp.CUBIC) + and the optional flag cvcuda.Interp.WARP_INVERSE_MAP, that sets trans_matrix as the inverse transformation. + border_mode (cvcuda.Border): pixel extrapolation method (cvcuda.Border.CONSTANT or + cvcuda.Border.REPLICATE). + border_value (numpy.ndarray): Used to specify values for a constant border, should be a size <= 4 and dim of 1, + where the values specify the border color for each color channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.Tensor: The output tensor. + nvcv.Tensor: The output tensor. Caution: Restrictions to several arguments may apply. Check the C @@ -173,7 +174,7 @@ void ExportOpWarpPerspective(py::module &m) m.def("warp_perspective_into", &WarpPerspectiveInto, "dst"_a, "src"_a, "xform"_a, "flags"_a, py::kw_only(), "border_mode"_a, "border_value"_a, "stream"_a = nullptr, R"pbdoc( - cvcuda.warp_perspective_into(dst: nvcv.Tensor,src: nvcv.Tensor, xform: nvcv.Tensor, flags: int, border_mode:NVCVBorderType, border_value:pyarray,stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.warp_perspective_into(dst: nvcv.Tensor, src: nvcv.Tensor, xform: nvcv.Tensor, flags: int, border_mode: cvcuda.Border, border_value: numpy.ndarray, stream: Optional[nvcv.cuda.Stream] = None) Executes the Warp Perspective operation on the given cuda stream. @@ -182,15 +183,16 @@ void ExportOpWarpPerspective(py::module &m) for more details and usage examples. Args: - dst (Tensor): Output tensor to store the result of the operation. - src (Tensor): Input tensor containing one or more images. - xform (pyarray: 3x3 perspective transformation matrix. - flags (int): Combination of interpolation methods(NVCV_INTERP_NEAREST, NVCV_INTERP_LINEAR or NVCV_INTERP_CUBIC) - and the optional flag NVCV_WARP_INVERSE_MAP, that sets trans_matrix as the inverse transformation. - border_mode (NVCVBorderType): pixel extrapolation method (NVCV_BORDER_CONSTANT or NVCV_BORDER_REPLICATE). - border_value (pyarray): Used to specify values for a constant border, should be a size <= 4 and dim of 1, - where the values specify the border color for each color channel. - stream (Stream, optional): CUDA Stream on which to perform the operation. + dst (nvcv.Tensor): Output tensor to store the result of the operation. + src (nvcv.Tensor): Input tensor containing one or more images. + xform (numpy.ndarray: 3x3 perspective transformation matrix. + flags (int): Combination of interpolation methods(cvcuda.Interp.NEAREST, cvcuda.Interp.LINEAR or cvcuda.Interp.CUBIC) + and the optional flag cvcuda.Interp.WARP_INVERSE_MAP, that sets trans_matrix as the inverse transformation. + border_mode (cvcuda.Border): pixel extrapolation method (cvcuda.Border.CONSTANT or + cvcuda.Border.REPLICATE). + border_value (numpy.ndarray): Used to specify values for a constant border, should be a size <= 4 and dim of 1, + where the values specify the border color for each color channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None @@ -203,7 +205,7 @@ void ExportOpWarpPerspective(py::module &m) m.def("warp_perspective", &WarpPerspectiveVarShape, "src"_a, "xform"_a, "flags"_a, py::kw_only(), "border_mode"_a, "border_value"_a, "stream"_a = nullptr, R"pbdoc( - cvcuda.warp_perspective(src: nvcv.ImageBatchVarShape, xform: nvcv.Tensor, flags: int, border_mode:NVCVBorderType, border_value:pyarray,stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape + cvcuda.warp_perspective(src: nvcv.ImageBatchVarShape, xform: nvcv.Tensor, flags: int, border_mode: cvcuda.Border, border_value: numpy.ndarray, stream: Optional[nvcv.cuda.Stream] = None) -> nvcv.ImageBatchVarShape Executes the Warp Perspective operation on the given cuda stream. @@ -212,17 +214,18 @@ void ExportOpWarpPerspective(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - xform (Tensor): 3x3 perspective transformation matrix for each image in the batch. - flags (int): Combination of interpolation methods(NVCV_INTERP_NEAREST, NVCV_INTERP_LINEAR or NVCV_INTERP_CUBIC) - and the optional flag NVCV_WARP_INVERSE_MAP, that sets trans_matrix as the inverse transformation. - border_mode (NVCVBorderType): pixel extrapolation method (NVCV_BORDER_CONSTANT or NVCV_BORDER_REPLICATE). - border_value (pyarray): Used to specify values for a constant border, must be a size <= 4 and dim of 1, - where the values specify the border color for each color channel. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + xform (nvcv.Tensor): 3x3 perspective transformation matrix for each image in the batch. + flags (int): Combination of interpolation methods(cvcuda.Interp.NEAREST, cvcuda.Interp.LINEAR or cvcuda.Interp.CUBIC) + and the optional flag cvcuda.Interp.WARP_INVERSE_MAP, that sets trans_matrix as the inverse transformation. + border_mode (cvcuda.Border): pixel extrapolation method (cvcuda.Border.CONSTANT or + cvcuda.Border.REPLICATE). + border_value (numpy.ndarray): Used to specify values for a constant border, must be a size <= 4 and dim of 1, + where the values specify the border color for each color channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: - cvcuda.ImageBatchVarShape: The output image batch. + nvcv.ImageBatchVarShape: The output image batch. Caution: Restrictions to several arguments may apply. Check the C @@ -232,7 +235,7 @@ void ExportOpWarpPerspective(py::module &m) m.def("warp_perspective_into", &WarpPerspectiveVarShapeInto, "dst"_a, "src"_a, "xform"_a, "flags"_a, py::kw_only(), "border_mode"_a, "border_value"_a, "stream"_a = nullptr, R"pbdoc( - cvcuda.warp_perspective_into(dst: nvcv.ImageBatchVarShape,src: nvcv.ImageBatchVarShape, xform: nvcv.Tensor, flags: int, border_mode:NVCVBorderType, border_value:pyarray,stream: Optional[nvcv.cuda.Stream] = None) + cvcuda.warp_perspective_into(dst: nvcv.ImageBatchVarShape, src: nvcv.ImageBatchVarShape, xform: nvcv.Tensor, flags: int, border_mode: cvcuda.Border, border_value: numpy.ndarray, stream: Optional[nvcv.cuda.Stream] = None) Executes the Warp Perspective operation on the given cuda stream. @@ -241,15 +244,16 @@ void ExportOpWarpPerspective(py::module &m) for more details and usage examples. Args: - src (ImageBatchVarShape): Input image batch containing one or more images. - dst (ImageBatchVarShape): Output image batch containing the result of the operation. - xform (Tensor): 3x3 perspective transformation matrix for each image in the batch. - flags (int): Combination of interpolation methods(NVCV_INTERP_NEAREST, NVCV_INTERP_LINEAR or NVCV_INTERP_CUBIC) - and the optional flag NVCV_WARP_INVERSE_MAP, that sets trans_matrix as the inverse transformation. - border_mode (NVCVBorderType): pixel extrapolation method (NVCV_BORDER_CONSTANT or NVCV_BORDER_REPLICATE). - border_value (pyarray): Used to specify values for a constant border, must be a size <= 4 and dim of 1, - where the values specify the border color for each color channel. - stream (Stream, optional): CUDA Stream on which to perform the operation. + src (nvcv.ImageBatchVarShape): Input image batch containing one or more images. + dst (nvcv.ImageBatchVarShape): Output image batch containing the result of the operation. + xform (nvcv.Tensor): 3x3 perspective transformation matrix for each image in the batch. + flags (int): Combination of interpolation methods(cvcuda.Interp.NEAREST, cvcuda.Interp.LINEAR or cvcuda.Interp.CUBIC) + and the optional flag cvcuda.Interp.WARP_INVERSE_MAP, that sets trans_matrix as the inverse transformation. + border_mode (cvcuda.Border): pixel extrapolation method (cvcuda.Border.CONSTANT or + cvcuda.Border.REPLICATE). + border_value (numpy.ndarray): Used to specify values for a constant border, must be a size <= 4 and dim of 1, + where the values specify the border color for each color channel. + stream (nvcv.cuda.Stream, optional): CUDA Stream on which to perform the operation. Returns: None diff --git a/python/mod_cvcuda/Operators.hpp b/python/mod_cvcuda/Operators.hpp index 55307fcff..208f57f8d 100644 --- a/python/mod_cvcuda/Operators.hpp +++ b/python/mod_cvcuda/Operators.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +16,7 @@ */ #include -#include +#include #include #include #include diff --git a/python/mod_cvcuda/OsdElement.cpp b/python/mod_cvcuda/OsdElement.cpp index d1b00de37..a6b602989 100644 --- a/python/mod_cvcuda/OsdElement.cpp +++ b/python/mod_cvcuda/OsdElement.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_cvcuda/WorkspaceCache.hpp b/python/mod_cvcuda/WorkspaceCache.hpp index ffd56a212..19761e32a 100644 --- a/python/mod_cvcuda/WorkspaceCache.hpp +++ b/python/mod_cvcuda/WorkspaceCache.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,11 +20,11 @@ #include #include +#include +#include +#include +#include #include -#include -#include -#include -#include #include #include diff --git a/python/mod_nvcv/Array.cpp b/python/mod_nvcv/Array.cpp index 710ad7bec..70f2ac752 100644 --- a/python/mod_nvcv/Array.cpp +++ b/python/mod_nvcv/Array.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -173,12 +173,14 @@ std::shared_ptr Array::Resize(Shape shape) Array::Array(const nvcv::Array::Requirements &reqs) : m_impl{reqs} , m_key{reqs} + , m_size_inbytes{doComputeSizeInBytes(reqs)} { } Array::Array(const nvcv::ArrayData &data, py::object wrappedObject) : m_impl{nvcv::ArrayWrapData(data)} , m_key{} + , m_size_inbytes{doComputeSizeInBytes(nvcv::Array::Requirements())} , m_wrappedObject(wrappedObject) { } @@ -186,9 +188,24 @@ Array::Array(const nvcv::ArrayData &data, py::object wrappedObject) Array::Array(nvcv::Array &&array) : m_impl{std::move(array)} , m_key{} + , m_size_inbytes{doComputeSizeInBytes(nvcv::Array::Requirements())} { } +int64_t Array::doComputeSizeInBytes(const nvcv::Array::Requirements &reqs) +{ + int64_t size_inbytes; + util::CheckThrow(nvcvMemRequirementsCalcTotalSizeBytes(&(reqs.mem.cudaMem), &size_inbytes)); + return size_inbytes; +} + +int64_t Array::GetSizeInBytes() const +{ + // m_size_inbytes == -1 indicates failure case and value has not been computed yet + NVCV_ASSERT(m_size_inbytes != -1 && "Array has m_size_inbytes == -1, ie m_size_inbytes has not been correctly set"); + return m_size_inbytes; +} + std::shared_ptr Array::shared_from_this() { return std::static_pointer_cast(Container::shared_from_this()); diff --git a/python/mod_nvcv/Array.hpp b/python/mod_nvcv/Array.hpp index 1cb32b942..638db0b73 100644 --- a/python/mod_nvcv/Array.hpp +++ b/python/mod_nvcv/Array.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -83,6 +83,8 @@ class Array : public Container virtual const Key &key() const override; + int64_t GetSizeInBytes() const override; + py::object cuda() const; private: @@ -90,9 +92,11 @@ class Array : public Container Array(const nvcv::ArrayData &data, py::object wrappedObject); Array(nvcv::Array &&array); - // m_impl must come before m_key - nvcv::Array m_impl; + int64_t doComputeSizeInBytes(const nvcv::Array::Requirements &reqs); + + nvcv::Array m_impl; // must come before m_key Key m_key; + int64_t m_size_inbytes = -1; mutable py::object m_cacheExternalObject; diff --git a/python/mod_nvcv/CAPI.cpp b/python/mod_nvcv/CAPI.cpp index f573704ab..41b74b857 100644 --- a/python/mod_nvcv/CAPI.cpp +++ b/python/mod_nvcv/CAPI.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/Cache.cpp b/python/mod_nvcv/Cache.cpp index 29c6d8d5f..ecc854c8f 100644 --- a/python/mod_nvcv/Cache.cpp +++ b/python/mod_nvcv/Cache.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,10 @@ #include "Cache.hpp" +#include "Definitions.hpp" + #include +#include #include #include @@ -75,10 +78,13 @@ bool CacheItem::isInUse() const return sthis.use_count() > 2; } +using Items = std::unordered_multimap, HashKey, KeyEqual>; + struct Cache::Impl { - std::mutex mtx; - std::unordered_multimap, HashKey, KeyEqual> items; + std::mutex mtx; + Items items; + int64_t cache_limit_inbytes; }; Cache::Cache() @@ -88,9 +94,21 @@ Cache::Cache() void Cache::add(CacheItem &item) { - std::unique_lock lk(pimpl->mtx); + Items savedItems; + { + std::unique_lock lk(pimpl->mtx); + if (item.GetSizeInBytes() > doGetCacheLimit()) + { + return; + } - pimpl->items.emplace(&item.key(), item.shared_from_this()); + if (item.GetSizeInBytes() + doCurrentSizeInBytes() > doGetCacheLimit()) + { + savedItems = std::move(pimpl->items); + } + + pimpl->items.emplace(&item.key(), item.shared_from_this()); + } } void Cache::removeAllNotInUseMatching(const IKey &key) @@ -184,16 +202,77 @@ std::shared_ptr Cache::fetchOne(const IKey &key) const void Cache::clear() { + Items savedItems; std::unique_lock lk(pimpl->mtx); - pimpl->items.clear(); + savedItems = std::move(pimpl->items); + lk.unlock(); + savedItems.clear(); } -size_t Cache::size() +size_t Cache::size() const { std::unique_lock lk(pimpl->mtx); return pimpl->items.size(); } +void Cache::setCacheLimit(int64_t new_cache_limit_inbytes) +{ + if (new_cache_limit_inbytes < 0) + { + throw std::invalid_argument("Cache limit must be non-negative."); + } + + size_t free_mem, total_mem; + util::CheckThrow(cudaMemGetInfo(&free_mem, &total_mem)); + + if (static_cast(total_mem) < new_cache_limit_inbytes) + { + // Cache is not device aware, so in a multi-gpu scenario it could be ok to have a cache limit larger + // than the total mem of the current device, but we should notify the user about this. + std::cerr << "WARNING: new_cache_limit=" << new_cache_limit_inbytes + << " is more than total available memory on current device: " << total_mem << std::endl; + } + + Items savedItems; + { + std::unique_lock lk(pimpl->mtx); + if (doCurrentSizeInBytes() > new_cache_limit_inbytes) + { + savedItems = std::move(pimpl->items); + } + pimpl->cache_limit_inbytes = new_cache_limit_inbytes; + } +} + +int64_t Cache::getCacheLimit() const +{ + std::unique_lock lk(pimpl->mtx); + return doGetCacheLimit(); +} + +int64_t Cache::doGetCacheLimit() const +{ + return pimpl->cache_limit_inbytes; +} + +int64_t Cache::getCurrentSizeInBytes() +{ + std::unique_lock lk(pimpl->mtx); + return doCurrentSizeInBytes(); +} + +int64_t Cache::doCurrentSizeInBytes() const +{ + int64_t current_size_inbytes = 0; + + for (auto it = pimpl->items.begin(); it != pimpl->items.end(); ++it) + { + current_size_inbytes += it->second->GetSizeInBytes(); + } + + return current_size_inbytes; +} + void Cache::doIterateThroughItems(const std::function &fn) const { // To avoid keeping mutex locked for too long, let's first gather all items @@ -229,6 +308,11 @@ void Cache::Export(py::module &m) py::class_>(nullptr, "ExternalCacheItem", py::module_local()); + // Initialy set cache limit to half the size of the GPU memory + size_t free_mem, total_mem; + util::CheckThrow(cudaMemGetInfo(&free_mem, &total_mem)); + Cache::Instance().setCacheLimit(total_mem / 2); + util::RegisterCleanup(m, [] { @@ -243,6 +327,21 @@ void Cache::Export(py::module &m) "cache_size", [] { return Cache::Instance().size(); }, "Returns the quantity of items in the NVCV Python cache"); + m.def( + "get_cache_limit_inbytes", [] { return Cache::Instance().getCacheLimit(); }, + "Returns the current cache limit [in bytes]"); + m.def( + "set_cache_limit_inbytes", + [](int64_t new_cache_limit_inbytes) { Cache::Instance().setCacheLimit(new_cache_limit_inbytes); }, + "Sets the current cache limit [in bytes]"); + + m.def( + "current_cache_size_inbytes", [] { return Cache::Instance().getCurrentSizeInBytes(); }, + "Returns the current cache size [in bytes]"); + + py::module_ internal = m.attr(INTERNAL_SUBMODULE_NAME); + internal.def("nbytes_in_cache", [](const CacheItem &item) { return item.GetSizeInBytes(); }); + // Just to check if fetchAll compiles, it's harmless Cache::Instance().fetchAll(); } diff --git a/python/mod_nvcv/Cache.hpp b/python/mod_nvcv/Cache.hpp index fdc962911..db29a2b39 100644 --- a/python/mod_nvcv/Cache.hpp +++ b/python/mod_nvcv/Cache.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,6 +20,7 @@ #include "Object.hpp" +#include #include #include #include @@ -40,6 +41,8 @@ class PYBIND11_EXPORT CacheItem : public virtual Object std::shared_ptr shared_from_this(); std::shared_ptr shared_from_this() const; + virtual int64_t GetSizeInBytes() const = 0; + bool isInUse() const; protected: @@ -54,15 +57,34 @@ class ExternalCacheItem : public CacheItem public: ExternalCacheItem(std::shared_ptr obj_) : obj(obj_) + , m_size_inbytes(doComputeSizeInBytes()) { } + int64_t GetSizeInBytes() const override + { + // m_size_inbytes == -1 indicates failure case and value has not been computed yet + NVCV_ASSERT(m_size_inbytes != -1 + && "ExternalCacheItem has m_size_inbytes == -1, ie m_size_inbytes has not been correctly set"); + return m_size_inbytes; + } + std::shared_ptr obj; const IKey &key() const override { return obj->key(); } + +private: + int64_t doComputeSizeInBytes() + { + // ExternalCacheItems (CacheItems outside of nvcv, eg. operators from cvcuda) will not pollute the + // Cache, thus for now we say they've no impact on the Cache + return 0; + } + + int64_t m_size_inbytes = -1; }; class PYBIND11_EXPORT Cache @@ -100,7 +122,11 @@ class PYBIND11_EXPORT Cache } void clear(); - size_t size(); + size_t size() const; + + void setCacheLimit(int64_t new_cache_limit); + int64_t getCacheLimit() const; + int64_t getCurrentSizeInBytes(); private: struct Impl; @@ -108,7 +134,9 @@ class PYBIND11_EXPORT Cache Cache(); - void doIterateThroughItems(const std::function &fn) const; + void doIterateThroughItems(const std::function &fn) const; + int64_t doCurrentSizeInBytes() const; + int64_t doGetCacheLimit() const; }; } // namespace nvcvpy::priv diff --git a/python/mod_nvcv/Container.cpp b/python/mod_nvcv/Container.cpp index 29a0895e8..bdf8089f2 100644 --- a/python/mod_nvcv/Container.cpp +++ b/python/mod_nvcv/Container.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -31,7 +31,7 @@ std::shared_ptr Container::shared_from_this() const void Container::Export(py::module &m) { - py::class_, Resource> cont(m, "Container"); + py::class_, Resource, CacheItem> cont(m, "Container"); py::class_, Resource> extcont( nullptr, "ExternalContainer", py::module_local()); diff --git a/python/mod_nvcv/Container.hpp b/python/mod_nvcv/Container.hpp index 732ca94e4..ecf7e0c17 100644 --- a/python/mod_nvcv/Container.hpp +++ b/python/mod_nvcv/Container.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,6 +21,7 @@ #include "Cache.hpp" #include "Resource.hpp" +#include #include #include @@ -47,12 +48,30 @@ class ExternalContainer : public Container public: explicit ExternalContainer(nvcvpy::Container &extCont) : m_extCont(extCont) + , m_size_inbytes{doComputeSizeInBytes()} { } + int64_t GetSizeInBytes() const override + { + // m_size_inbytes == -1 indicates failure case and value has not been computed yet + NVCV_ASSERT(m_size_inbytes != -1 + && "ExternalContainer has m_size_inbytes == -1, ie m_size_inbytes has not been correctly set"); + return m_size_inbytes; + } + private: nvcvpy::Container &m_extCont; + int64_t doComputeSizeInBytes() + { + // ExternalCacheItems (CacheItems outside of nvcv, eg. operators from cvcuda) will not pollute the + // Cache, thus for now we say they've no impact on the Cache + return 0; + } + + int64_t m_size_inbytes = -1; + const IKey &key() const override { return m_extCont.key(); diff --git a/python/mod_nvcv/DataType.cpp b/python/mod_nvcv/DataType.cpp index 10baef79e..a18e11946 100644 --- a/python/mod_nvcv/DataType.cpp +++ b/python/mod_nvcv/DataType.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,8 +19,8 @@ #include #include +#include #include -#include #include #include diff --git a/python/mod_nvcv/Definitions.hpp b/python/mod_nvcv/Definitions.hpp new file mode 100644 index 000000000..dc29862e9 --- /dev/null +++ b/python/mod_nvcv/Definitions.hpp @@ -0,0 +1,27 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NVCV_PYTHON_PRIV_DEFINITIONS_HPP +#define NVCV_PYTHON_PRIV_DEFINITIONS_HPP + +namespace nvcvpy::priv { + +constexpr const char *INTERNAL_SUBMODULE_NAME = "internal"; + +} + +#endif // NVCV_PYTHON_PRIV_DEFINITIONS_HPP diff --git a/python/mod_nvcv/Image.cpp b/python/mod_nvcv/Image.cpp index 15ffe6730..7eada748e 100644 --- a/python/mod_nvcv/Image.cpp +++ b/python/mod_nvcv/Image.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -452,14 +452,21 @@ nvcv::ImageDataStridedHost CreateNVCVImageDataHost(const std::vector(size), std::get<1>(size)}, fmt, nullptr /* allocator */, - rowAlign == 0 ? nvcv::MemAlignment{} : nvcv::MemAlignment{}.rowAddr(rowAlign)) - , m_key{size, fmt} { + nvcv::MemAlignment bufAlign = rowAlign == 0 ? nvcv::MemAlignment{} : nvcv::MemAlignment{}.rowAddr(rowAlign); + NVCVImageRequirements reqs; + + nvcvImageCalcRequirements(std::get<0>(size), std::get<1>(size), fmt, bufAlign.baseAddr(), bufAlign.rowAddr(), + &reqs); + + m_impl = nvcv::Image(reqs, nullptr /* allocator */); + m_key = Key{size, fmt}; + m_size_inbytes = doComputeSizeInBytes(reqs); } Image::Image(std::vector> bufs, const nvcv::ImageDataStridedCuda &imgData) : m_key{} // it's a wrap! + , m_size_inbytes{doComputeSizeInBytes(NVCVImageRequirements())} { m_wrapData.emplace(); @@ -472,8 +479,14 @@ Image::Image(std::vector bufs, const nvcv::ImageDataStridedHost &hos // We'll create a regular image and copy the host data into it. // Create the image with same size and format as host data - m_impl = nvcv::Image(hostData.size(), hostData.format(), nullptr /* allocator */, - nvcv::MemAlignment{}.rowAddr(rowAlign)); + nvcv::MemAlignment bufAlign = nvcv::MemAlignment{}.rowAddr(rowAlign); + NVCVImageRequirements reqs; + + nvcvImageCalcRequirements(hostData.size().w, hostData.size().h, hostData.format(), bufAlign.baseAddr(), + bufAlign.rowAddr(), &reqs); + + m_impl = nvcv::Image(reqs, nullptr /* allocator */); + m_size_inbytes = doComputeSizeInBytes(reqs); auto devData = *m_impl.exportData(); NVCV_ASSERT(hostData.format() == devData.format()); @@ -499,6 +512,20 @@ Image::Image(std::vector bufs, const nvcv::ImageDataStridedHost &hos }; } +int64_t Image::doComputeSizeInBytes(const NVCVImageRequirements &reqs) +{ + int64_t size_inbytes; + util::CheckThrow(nvcvMemRequirementsCalcTotalSizeBytes(&(reqs.mem.cudaMem), &size_inbytes)); + return size_inbytes; +} + +int64_t Image::GetSizeInBytes() const +{ + // m_size_inbytes == -1 indicates failure case and value has not been computed yet + NVCV_ASSERT(m_size_inbytes != -1 && "Image has m_size_inbytes == -1, ie m_size_inbytes has not been correctly set"); + return m_size_inbytes; +} + std::shared_ptr Image::shared_from_this() { return std::static_pointer_cast(Container::shared_from_this()); diff --git a/python/mod_nvcv/Image.hpp b/python/mod_nvcv/Image.hpp index 4bf10a887..bd2a25eb1 100644 --- a/python/mod_nvcv/Image.hpp +++ b/python/mod_nvcv/Image.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -59,6 +59,8 @@ class Image final : public Container int32_t height() const; nvcv::ImageFormat format() const; + int64_t GetSizeInBytes() const override; + friend std::ostream &operator<<(std::ostream &out, const Image &img); nvcv::Image &impl() @@ -108,10 +110,13 @@ class Image final : public Container explicit Image(std::vector> buf, const nvcv::ImageDataStridedCuda &imgData); explicit Image(std::vector buf, const nvcv::ImageDataStridedHost &imgData, int rowalign); + int64_t doComputeSizeInBytes(const NVCVImageRequirements &reqs); + void setWrapData(std::vector> buf, const nvcv::ImageDataStridedCuda &imgData); nvcv::Image m_impl; // must come before m_key Key m_key; + int64_t m_size_inbytes = -1; struct WrapData { diff --git a/python/mod_nvcv/ImageBatch.cpp b/python/mod_nvcv/ImageBatch.cpp index 38c838093..15dfcafca 100644 --- a/python/mod_nvcv/ImageBatch.cpp +++ b/python/mod_nvcv/ImageBatch.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,6 +22,7 @@ #include "Image.hpp" #include +#include namespace nvcvpy::priv { @@ -83,10 +84,26 @@ std::shared_ptr ImageBatchVarShape::WrapExternalBufferVector ImageBatchVarShape::ImageBatchVarShape(int capacity) : m_key(capacity) , m_impl(capacity) + , m_size_inbytes(doComputeSizeInBytes(nvcv::ImageBatchVarShape::CalcRequirements(capacity))) { m_list.reserve(capacity); } +int64_t ImageBatchVarShape::doComputeSizeInBytes(const NVCVImageBatchVarShapeRequirements &reqs) +{ + int64_t size_inbytes; + util::CheckThrow(nvcvMemRequirementsCalcTotalSizeBytes(&(reqs.mem.cudaMem), &size_inbytes)); + return size_inbytes; +} + +int64_t ImageBatchVarShape::GetSizeInBytes() const +{ + // m_size_inbytes == -1 indicates failure case and value has not been computed yet + NVCV_ASSERT(m_size_inbytes != -1 + && "ImageBatchVarShape has m_size_inbytes == -1, ie m_size_inbytes has not been correctly set"); + return m_size_inbytes; +} + const nvcv::ImageBatchVarShape &ImageBatchVarShape::impl() const { return m_impl; diff --git a/python/mod_nvcv/ImageBatch.hpp b/python/mod_nvcv/ImageBatch.hpp index ffa58d865..9c7f15cdd 100644 --- a/python/mod_nvcv/ImageBatch.hpp +++ b/python/mod_nvcv/ImageBatch.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -54,6 +54,8 @@ class ImageBatchVarShape : public Container int32_t numImages() const; Size2D maxSize() const; + int64_t GetSizeInBytes() const override; + void pushBack(Image &img); void pushBackMany(const std::vector> &imgList); void popBack(int imgCount); @@ -84,9 +86,13 @@ class ImageBatchVarShape : public Container private: explicit ImageBatchVarShape(int capacity); + + int64_t doComputeSizeInBytes(const NVCVImageBatchVarShapeRequirements &reqs); + Key m_key; ImageList m_list; nvcv::ImageBatchVarShape m_impl; + int64_t m_size_inbytes = -1; }; } // namespace nvcvpy::priv diff --git a/python/mod_nvcv/Main.cpp b/python/mod_nvcv/Main.cpp index 18d0a4485..10b7979dd 100644 --- a/python/mod_nvcv/Main.cpp +++ b/python/mod_nvcv/Main.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,6 +20,7 @@ #include "ColorSpec.hpp" #include "Container.hpp" #include "DataType.hpp" +#include "Definitions.hpp" #include "ExternalBuffer.hpp" #include "Image.hpp" #include "ImageBatch.hpp" @@ -41,6 +42,22 @@ PYBIND11_MODULE(nvcv, m) using namespace nvcvpy::priv; + // Submodule used for additional functionality needed only by tests + // so that some level of white-box testing is possible. + // + // This guarantees a clear separation of public and private APIs. + // Users are restricted to the public API, allowing us to change the + // private APIs as needed, without worring in breaking user's code. + // + // To retrieve it from inside the Export call, include "Definitions.hpp" + // and call: + // py::module_ internal = m.attr(INTERNAL_SUBMODULE_NAME); + // Functions and other properties can be then exposed as usual, e.g. + // internal.def("foo", &Foo"); + // and accessed in python as you'd expect: + // nvcv.internal.foo() + m.def_submodule(INTERNAL_SUBMODULE_NAME); + // These will be destroyed in the reverse order here // Since everything is ref counted the order should not matter // but it is safer to ini them in order @@ -48,6 +65,7 @@ PYBIND11_MODULE(nvcv, m) // Core entities ExportCAPI(m); Resource::Export(m); + Cache::Export(m); Container::Export(m); ExternalBuffer::Export(m); @@ -63,10 +81,14 @@ PYBIND11_MODULE(nvcv, m) Image::Export(m); ImageBatchVarShape::Export(m); - // Cache and Streams - Cache::Export(m); + // Streams { py::module_ cuda = m.def_submodule("cuda"); + // cuda submodule also has its submodule to export internal utilities. + // The code in the export calls below might expect it, as it is unaware that + // their functionality is not being defined directly under "nvcv" module. + cuda.def_submodule(INTERNAL_SUBMODULE_NAME); + Stream::Export(cuda); } } diff --git a/python/mod_nvcv/Rect.cpp b/python/mod_nvcv/Rect.cpp index 01e6bc003..fb461e5b5 100644 --- a/python/mod_nvcv/Rect.cpp +++ b/python/mod_nvcv/Rect.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/Resource.cpp b/python/mod_nvcv/Resource.cpp index 5aea1d631..b809e3e70 100644 --- a/python/mod_nvcv/Resource.cpp +++ b/python/mod_nvcv/Resource.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -89,7 +89,7 @@ std::shared_ptr Resource::shared_from_this() const void Resource::Export(py::module &m) { - py::class_>(m, "Resource") + py::class_>(m, "Resource", "Resource") .def_property_readonly("id", &Resource::id, "Unique resource instance identifier") .def("submitStreamSync", &Resource::submitSync, "Syncs object on new Stream"); } diff --git a/python/mod_nvcv/Resource.hpp b/python/mod_nvcv/Resource.hpp index f66b4b5ac..0936ae1c1 100644 --- a/python/mod_nvcv/Resource.hpp +++ b/python/mod_nvcv/Resource.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/Stream.cpp b/python/mod_nvcv/Stream.cpp index 39afa8203..f0bf18d81 100644 --- a/python/mod_nvcv/Stream.cpp +++ b/python/mod_nvcv/Stream.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +18,7 @@ #include "Stream.hpp" #include "Cache.hpp" +#include "Definitions.hpp" #include "StreamStack.hpp" #include @@ -32,6 +33,7 @@ namespace nvcvpy::priv { cudaStream_t Stream::m_auxStream = nullptr; std::atomic Stream::m_instanceCount = 0; std::mutex Stream::m_auxStreamMutex; +std::mutex Stream::m_gcMutex; // Here we define the representation of external cuda streams. // It defines pybind11's type casters from the python object @@ -197,6 +199,7 @@ std::shared_ptr Stream::Create() Stream::Stream() : m_owns(true) + , m_size_inbytes(doComputeSizeInBytes()) { try { @@ -259,6 +262,12 @@ cudaStream_t &Stream::GetAuxStream() return m_auxStream; } +void Stream::SyncAuxStream() +{ + cudaStream_t auxStream = GetAuxStream(); + util::CheckThrow(cudaStreamSynchronize(auxStream)); +} + Stream::~Stream() { destroy(); @@ -291,6 +300,20 @@ void Stream::destroy() } } +int64_t Stream::doComputeSizeInBytes() +{ + // We only cache the stream's handles, which are 8 byte on CPU memory, hence 0 bytes gpu memory. + return 0; +} + +int64_t Stream::GetSizeInBytes() const +{ + // m_size_inbytes == -1 indicates failure case and value has not been computed yet + NVCV_ASSERT(m_size_inbytes != -1 + && "Stream has m_size_inbytes == -1, ie m_size_inbytes has not been correctly set"); + return m_size_inbytes; +} + std::shared_ptr Stream::shared_from_this() { return std::dynamic_pointer_cast(Object::shared_from_this()); @@ -334,17 +357,29 @@ void Stream::deactivate(py::object exc_type, py::object exc_value, py::object ex StreamStack::Instance().pop(); } -void Stream::holdResources(LockResources usedResources) +// Stores the data held by a cuda host callback function in a cuda stream. +// It's used for: +// - Extend the lifetime of the objects it contains until they aren't needed +// by any future cuda kernels in the stream. +struct Stream::HostFunctionClosure { - struct HostFunctionClosure - { - // Also hold the stream reference so that it isn't destroyed before the processing is done. - std::shared_ptr stream; - LockResources resources; - }; + // Also hold the stream reference so that it isn't destroyed before the processing is done. + std::shared_ptr stream; + LockResources resources; +}; +void Stream::holdResources(LockResources usedResources) +{ if (!usedResources.empty()) { + // Looks like a good place to clear the gc bag, as every time we create + // a new closure that eventually gets added to the bag, we empty it. + // The bag shouldn't grow unlimited. + // Calling it before allocating a new closure just avoid having two + // closures not inside a cuda stream that are simultaneously alive, but + // in practice it doesn't seem to matter much. + ClearGCBag(); + auto closure = std::make_unique(); closure->stream = this->shared_from_this(); @@ -352,8 +387,9 @@ void Stream::holdResources(LockResources usedResources) auto fn = [](cudaStream_t stream, cudaError_t error, void *userData) -> void { - auto *pclosure = reinterpret_cast(userData); - delete pclosure; + std::unique_ptr pclosure(reinterpret_cast(userData)); + NVCV_ASSERT(pclosure != nullptr); + AddToGCBag(std::move(pclosure)); }; // If we naively execute the callback in the main stream (m_handle), the GPU will wait until the callback @@ -379,15 +415,74 @@ void Stream::holdResources(LockResources usedResources) util::CheckThrow(cudaEventRecord(m_event, m_handle)); // add async record the event in the main stream util::CheckThrow( cudaStreamWaitEvent(GetAuxStream(), m_event)); // add async wait for the event in the aux stream + + // cudaStreamAddCallback pushes a task to the given stream, which at some point (asynchonously) calls + // the given callback (fn), passing to it the closure we created, among other stream states. + // When fn is executed, the refcnt of all objects that the closure holds will eventually be decremented, which + // will trigger their deletion if refcnt==0. This effectively extends the objects' lifetime until + // all tasks that refer to them are finished. + // The callback will be executed in the singleton aux stream there may be contention with other callbacks and waitEvents from // other streams. However the callback is used to release resources from the cache and should not be a performance bottleneck. // This avoids opening a new aux stream for each stream object. + + // NOTE: cudaStreamAddCallback is slated for deprecation, without a proper replacement (for now). + // The other option we could use is cudaLaunchHostFunc, but it doesn't guarantee that the callback + // will be called. We need this guarantee to make sure the object's refcount is eventually decremented, + // and the closure is freed, avoiding memory leaks. + // cudaLaunchHostFunc won't call the callback if the current cuda context is in error state, for instance. + // Ref: CUDA SDK docs for both functions. util::CheckThrow( cudaStreamAddCallback(GetAuxStream(), fn, closure.get(), 0)); // add async callback in the aux stream closure.release(); } } +Stream::GCBag &Stream::GetGCBag() +{ + // By defining the gcBag inside this function instead of the global scope, + // we guarantee that it'll be destroyed *before* the global python context + // is destroyed. This is due to this function being called the first time + // (via AddToGCBag or ClearGCBag) only after the python script (and python + // ctx) has already started. + static GCBag gcBag; + return gcBag; +} + +void Stream::AddToGCBag(std::unique_ptr closure) +{ + std::unique_lock lk(m_gcMutex); + GetGCBag().push_back(std::move(closure)); +} + +void Stream::ClearGCBag() +{ + GCBag objectsToBeDestroyed; + + GCBag &gcBag = GetGCBag(); + + std::unique_lock lk(m_gcMutex); + // Do as little as possible while mutex is locked to avoid + // deadlocks. + + // In the case here, instead of simply empting up the gc bag, + // which might trigger object destruction while the mutex is locked, + // we move its contents to a temporary local bag. + + // take of benefit of ADL if available + using std::swap; + swap(objectsToBeDestroyed, gcBag); + + // Now the original bag is left empty, but no objects were + // destroyed yet. + NVCV_ASSERT(gcBag.empty()); // post-condition (can't be guaranteed after unlock) + + lk.unlock(); + + // Let the local object bag go out of scope, the objects in it + // will be finally destroyed with the mutex unlocked. +} + std::ostream &operator<<(std::ostream &out, const Stream &stream) { return out << ", CacheItem> stream(m, "Stream"); stream .def_property_readonly_static( @@ -409,6 +504,9 @@ void Stream::Export(py::module &m) "Get the current CUDA stream for this thread.") .def(py::init(&Stream::Create), "Create a new CUDA stream."); + py::module_ internal = m.attr(INTERNAL_SUBMODULE_NAME); + internal.def("syncAuxStream", &SyncAuxStream); + // Create the global stream object by wrapping cuda stream 0. // It'll be destroyed when python module is deinitialized. static priv::ExternalStream cudaDefaultStream((cudaStream_t)0); @@ -443,6 +541,7 @@ void Stream::Export(py::module &m) stream->sync(); } globalStream->sync(); + Stream::SyncAuxStream(); // There should only be 1 stream in the stack, namely the // global stream. @@ -457,11 +556,16 @@ void Stream::Export(py::module &m) { StreamStack::Instance().pop(); } + + // Make sure the gc bag is also cleaned up *after* all streams are done, + // then when know all remaining items that need to be GC'd are in the bag. + Stream::ClearGCBag(); } catch (const std::exception &e) { //Do nothing here this can happen if someone closes the cuda context prior to exit. - std::cerr << "Warning CVCUDA cleanup may be incomplete due to: " << e.what() << "\n"; + std::cerr << "Warning CVCUDA cleanup may be incomplete due to: " << e.what() + << std::endl; } }); } diff --git a/python/mod_nvcv/Stream.hpp b/python/mod_nvcv/Stream.hpp index 2dcceb726..5fd307b33 100644 --- a/python/mod_nvcv/Stream.hpp +++ b/python/mod_nvcv/Stream.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -63,6 +63,8 @@ class PYBIND11_EXPORT Stream : public CacheItem void holdResources(LockResources usedResources); + int64_t GetSizeInBytes() const override; + void sync(); cudaStream_t handle() const; @@ -77,6 +79,8 @@ class PYBIND11_EXPORT Stream : public CacheItem Stream(Stream &&) = delete; Stream(); + int64_t doComputeSizeInBytes(); + // Singleton access to the auxiliary CUDA stream class Key final : public IKey @@ -98,6 +102,11 @@ class PYBIND11_EXPORT Stream : public CacheItem cudaStream_t m_handle = nullptr; cudaEvent_t m_event = nullptr; py::object m_wrappedObj; + int64_t m_size_inbytes = -1; + + // TODO: these don't have to be static members, but simply defined + // as local entities in Stream.cpp, thereby minimizing code coupling and + // unnecessary rebuilds. //singleton aux stream and protection. this a a bit overkill //for now as python is single threaded, but it is a good practice @@ -108,6 +117,24 @@ class PYBIND11_EXPORT Stream : public CacheItem static void incrementInstanceCount(); static int decrementInstanceCount(); static cudaStream_t &GetAuxStream(); + static void SyncAuxStream(); + + // Adds the object to the garbage-collector's bag to delay its destruction + // until it's safe to destroy it. + // Safe here means: not from a thread that is processing tasks in a cuda stream, + // i.e., not inside the callback given to cudaStreamAddCallback. If this happens, + // cuda calls will be made from within the callback, and CUDA docs prohibit it. + struct HostFunctionClosure; + static void AddToGCBag(std::unique_ptr obj); + + // Clear the garbage-collector's bag. It's supposed to be called by + // functions that + static void ClearGCBag(); + + using GCBag = std::vector>; + static std::mutex m_gcMutex; + + static GCBag &GetGCBag(); }; } // namespace nvcvpy::priv diff --git a/python/mod_nvcv/Tensor.cpp b/python/mod_nvcv/Tensor.cpp index 99d01cc57..dab8d49f4 100644 --- a/python/mod_nvcv/Tensor.cpp +++ b/python/mod_nvcv/Tensor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -207,12 +207,14 @@ std::shared_ptr Tensor::Reshape(Shape shape, std::optional Tensor::shared_from_this() { return std::static_pointer_cast(Container::shared_from_this()); diff --git a/python/mod_nvcv/Tensor.hpp b/python/mod_nvcv/Tensor.hpp index 87f1b7bad..6c8bf4bfa 100644 --- a/python/mod_nvcv/Tensor.hpp +++ b/python/mod_nvcv/Tensor.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -86,15 +86,19 @@ class Tensor : public Container py::object cuda() const; + int64_t GetSizeInBytes() const override; + private: Tensor(const nvcv::Tensor::Requirements &reqs); Tensor(const nvcv::TensorData &data, py::object wrappedObject); Tensor(Image &img); Tensor(nvcv::Tensor &&tensor); - // m_impl must come before m_key - nvcv::Tensor m_impl; + int64_t doComputeSizeInBytes(const nvcv::Tensor::Requirements &reqs); + + nvcv::Tensor m_impl; // must come before m_key Key m_key; + int64_t m_size_inbytes = -1; mutable py::object m_cacheExternalObject; mutable std::optional m_cacheExternalObjectLayout; diff --git a/python/mod_nvcv/TensorBatch.cpp b/python/mod_nvcv/TensorBatch.cpp index 2e838c735..8baeaada8 100644 --- a/python/mod_nvcv/TensorBatch.cpp +++ b/python/mod_nvcv/TensorBatch.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,6 +23,7 @@ #include "Tensor.hpp" #include +#include namespace nvcvpy::priv { @@ -81,10 +82,26 @@ std::shared_ptr TensorBatch::WrapExternalBufferVector(std::vector

dtype() const; std::optional layout() const; + int64_t GetSizeInBytes() const override; + void pushBack(Tensor &tensor); void pushBackMany(std::vector> &tensorList); void popBack(int tensorCount); @@ -86,9 +88,13 @@ class TensorBatch : public Container private: TensorBatch(int capacity); + + int64_t doComputeSizeInBytes(const NVCVTensorBatchRequirements &reqs); + Key m_key; nvcv::TensorBatch m_impl; TensorList m_list; + int64_t m_size_inbytes = -1; }; } // namespace nvcvpy::priv diff --git a/python/mod_nvcv/include/nvcv/python/Array.hpp b/python/mod_nvcv/include/nvcv/python/Array.hpp index e80a6a5e8..47af49372 100644 --- a/python/mod_nvcv/include/nvcv/python/Array.hpp +++ b/python/mod_nvcv/include/nvcv/python/Array.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/include/nvcv/python/CAPI.hpp b/python/mod_nvcv/include/nvcv/python/CAPI.hpp index 612d6583f..9f0199baf 100644 --- a/python/mod_nvcv/include/nvcv/python/CAPI.hpp +++ b/python/mod_nvcv/include/nvcv/python/CAPI.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/include/nvcv/python/Cache.hpp b/python/mod_nvcv/include/nvcv/python/Cache.hpp index 8ef8017e2..022289b8c 100644 --- a/python/mod_nvcv/include/nvcv/python/Cache.hpp +++ b/python/mod_nvcv/include/nvcv/python/Cache.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/include/nvcv/python/Container.hpp b/python/mod_nvcv/include/nvcv/python/Container.hpp index b774420d1..315b9ab99 100644 --- a/python/mod_nvcv/include/nvcv/python/Container.hpp +++ b/python/mod_nvcv/include/nvcv/python/Container.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/include/nvcv/python/DataType.hpp b/python/mod_nvcv/include/nvcv/python/DataType.hpp index c7d6bca62..ba164e61f 100644 --- a/python/mod_nvcv/include/nvcv/python/DataType.hpp +++ b/python/mod_nvcv/include/nvcv/python/DataType.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/include/nvcv/python/Image.hpp b/python/mod_nvcv/include/nvcv/python/Image.hpp index 58b15dc30..98b418a63 100644 --- a/python/mod_nvcv/include/nvcv/python/Image.hpp +++ b/python/mod_nvcv/include/nvcv/python/Image.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/include/nvcv/python/ImageBatchVarShape.hpp b/python/mod_nvcv/include/nvcv/python/ImageBatchVarShape.hpp index 24bf0d2f0..994c7a0da 100644 --- a/python/mod_nvcv/include/nvcv/python/ImageBatchVarShape.hpp +++ b/python/mod_nvcv/include/nvcv/python/ImageBatchVarShape.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/include/nvcv/python/ImageFormat.hpp b/python/mod_nvcv/include/nvcv/python/ImageFormat.hpp index 366f5a011..ea22ee29a 100644 --- a/python/mod_nvcv/include/nvcv/python/ImageFormat.hpp +++ b/python/mod_nvcv/include/nvcv/python/ImageFormat.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/include/nvcv/python/LockMode.hpp b/python/mod_nvcv/include/nvcv/python/LockMode.hpp index 571b10126..154bad6d5 100644 --- a/python/mod_nvcv/include/nvcv/python/LockMode.hpp +++ b/python/mod_nvcv/include/nvcv/python/LockMode.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/include/nvcv/python/ResourceGuard.hpp b/python/mod_nvcv/include/nvcv/python/ResourceGuard.hpp index 0be46ff66..889584fdc 100644 --- a/python/mod_nvcv/include/nvcv/python/ResourceGuard.hpp +++ b/python/mod_nvcv/include/nvcv/python/ResourceGuard.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/include/nvcv/python/Stream.hpp b/python/mod_nvcv/include/nvcv/python/Stream.hpp index 38ef51700..9d06c8644 100644 --- a/python/mod_nvcv/include/nvcv/python/Stream.hpp +++ b/python/mod_nvcv/include/nvcv/python/Stream.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/include/nvcv/python/Tensor.hpp b/python/mod_nvcv/include/nvcv/python/Tensor.hpp index e3ba66053..7c0c61f7f 100644 --- a/python/mod_nvcv/include/nvcv/python/Tensor.hpp +++ b/python/mod_nvcv/include/nvcv/python/Tensor.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/mod_nvcv/include/nvcv/python/TensorBatch.hpp b/python/mod_nvcv/include/nvcv/python/TensorBatch.hpp index c6bf604b4..3729cae7a 100644 --- a/python/mod_nvcv/include/nvcv/python/TensorBatch.hpp +++ b/python/mod_nvcv/include/nvcv/python/TensorBatch.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/classification/CMakeLists.txt b/samples/classification/CMakeLists.txt index a74715d35..a051a0c42 100644 --- a/samples/classification/CMakeLists.txt +++ b/samples/classification/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/classification/ClassificationUtils.hpp b/samples/classification/ClassificationUtils.hpp index 3e98771ee..8843f2e91 100644 --- a/samples/classification/ClassificationUtils.hpp +++ b/samples/classification/ClassificationUtils.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -133,21 +133,21 @@ int ParseArgs(int argc, char *argv[], std::string &modelPath, std::string &image if (!modelFile.good()) { showUsage(); - std::cerr << "Model path '" + modelPath + "' does not exist\n"; + std::cerr << "Model path '" + modelPath + "' does not exist" << std::endl; return -1; } std::ifstream imageFile(imagePath); if (!imageFile.good()) { showUsage(); - std::cerr << "Image path '" + modelPath + "' does not exist\n"; + std::cerr << "Image path '" + modelPath + "' does not exist" << std::endl; return -1; } std::ifstream labelFile(labelPath); if (!labelFile.good()) { showUsage(); - std::cerr << "Label path '" + modelPath + "' does not exist\n"; + std::cerr << "Label path '" + modelPath + "' does not exist" << std::endl; return -1; } return 0; diff --git a/samples/classification/Main.cpp b/samples/classification/Main.cpp index 073c1716f..0e89e982e 100644 --- a/samples/classification/Main.cpp +++ b/samples/classification/Main.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -248,7 +248,7 @@ int main(int argc, char *argv[]) auto numBindings = trtBackend->getBlobCount(); if (numBindings != 2) { - std::cerr << "Number of bindings should be 2\n"; + std::cerr << "Number of bindings should be 2" << std::endl; return -1; } diff --git a/samples/classification/python/main.py b/samples/classification/python/main.py index cae6131e0..e29f4c74b 100644 --- a/samples/classification/python/main.py +++ b/samples/classification/python/main.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/CMakeLists.txt b/samples/common/CMakeLists.txt index a114213e8..1a93da47d 100644 --- a/samples/common/CMakeLists.txt +++ b/samples/common/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/python/perf_utils.py b/samples/common/python/perf_utils.py index 1d854337f..282e93656 100644 --- a/samples/common/python/perf_utils.py +++ b/samples/common/python/perf_utils.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/common/python/vpf_utils.py b/samples/common/python/vpf_utils.py index d688d7944..94311b7fa 100644 --- a/samples/common/python/vpf_utils.py +++ b/samples/common/python/vpf_utils.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/cropandresize/CMakeLists.txt b/samples/cropandresize/CMakeLists.txt index 3e09936f5..f45d7eee2 100644 --- a/samples/cropandresize/CMakeLists.txt +++ b/samples/cropandresize/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/cropandresize/Main.cpp b/samples/cropandresize/Main.cpp index 2776d8500..f22096af8 100644 --- a/samples/cropandresize/Main.cpp +++ b/samples/cropandresize/Main.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -83,7 +83,7 @@ int ParseArgs(int argc, char *argv[], std::string &imagePath, uint32_t &batchSiz if (!imageFile.good()) { showUsage(); - std::cerr << "Image path '" + imagePath + "' does not exist\n"; + std::cerr << "Image path '" + imagePath + "' does not exist" << std::endl; return -1; } return 0; diff --git a/samples/label/python/main.py b/samples/label/python/main.py index cb664f690..ec9aac181 100644 --- a/samples/label/python/main.py +++ b/samples/label/python/main.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/object_detection/python/main.py b/samples/object_detection/python/main.py index 0741ea2c8..1a3f3b1bf 100644 --- a/samples/object_detection/python/main.py +++ b/samples/object_detection/python/main.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/scripts/benchmark.py b/samples/scripts/benchmark.py index 57507f2e6..388c4b3cb 100644 --- a/samples/scripts/benchmark.py +++ b/samples/scripts/benchmark.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/scripts/benchmark_samples.sh b/samples/scripts/benchmark_samples.sh index 669db058e..5e43df461 100755 --- a/samples/scripts/benchmark_samples.sh +++ b/samples/scripts/benchmark_samples.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/scripts/build_samples.sh b/samples/scripts/build_samples.sh index b54d07ff3..ace7fd1de 100755 --- a/samples/scripts/build_samples.sh +++ b/samples/scripts/build_samples.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/scripts/requirements.txt b/samples/scripts/requirements.txt index f5a6af782..a294be171 100644 --- a/samples/scripts/requirements.txt +++ b/samples/scripts/requirements.txt @@ -1,3 +1,13 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + torch==2.2.0 torchvision==0.17.0 onnx==1.15.0 diff --git a/samples/segmentation/python/main.py b/samples/segmentation/python/main.py index 6ee5411a5..6d896ab82 100644 --- a/samples/segmentation/python/main.py +++ b/samples/segmentation/python/main.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/segmentation/python/model_inference.py b/samples/segmentation/python/model_inference.py index 84a3ee538..8f52d6468 100644 --- a/samples/segmentation/python/model_inference.py +++ b/samples/segmentation/python/model_inference.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/samples/segmentation/python/triton_client.py b/samples/segmentation/python/triton_client.py index d6eff764d..106934fcc 100644 --- a/samples/segmentation/python/triton_client.py +++ b/samples/segmentation/python/triton_client.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 09987ca83..c4b4001d9 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,6 +21,5 @@ if(LTO_ENABLED) set(CMAKE_INTERPROCEDURAL_OPTIMIZATION on) endif() -add_subdirectory(util) -add_subdirectory(nvcv_types) +add_subdirectory(nvcv) add_subdirectory(cvcuda) diff --git a/src/cvcuda/CMakeLists.txt b/src/cvcuda/CMakeLists.txt index b2b3b1245..cd892a53e 100644 --- a/src/cvcuda/CMakeLists.txt +++ b/src/cvcuda/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +add_subdirectory(util) + # cvcuda private implementation add_subdirectory(priv) diff --git a/src/cvcuda/OpAdaptiveThreshold.cpp b/src/cvcuda/OpAdaptiveThreshold.cpp index bcaf95da8..6a96bf2b1 100644 --- a/src/cvcuda/OpAdaptiveThreshold.cpp +++ b/src/cvcuda/OpAdaptiveThreshold.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpAdvCvtColor.cpp b/src/cvcuda/OpAdvCvtColor.cpp index 28431a898..161f0d739 100644 --- a/src/cvcuda/OpAdvCvtColor.cpp +++ b/src/cvcuda/OpAdvCvtColor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpAverageBlur.cpp b/src/cvcuda/OpAverageBlur.cpp index 6661e4007..e4ec7fbb7 100644 --- a/src/cvcuda/OpAverageBlur.cpp +++ b/src/cvcuda/OpAverageBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpBilateralFilter.cpp b/src/cvcuda/OpBilateralFilter.cpp index 9483bdd9b..8759f7787 100644 --- a/src/cvcuda/OpBilateralFilter.cpp +++ b/src/cvcuda/OpBilateralFilter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpBndBox.cpp b/src/cvcuda/OpBndBox.cpp index d94da6bc3..8783b2f27 100644 --- a/src/cvcuda/OpBndBox.cpp +++ b/src/cvcuda/OpBndBox.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpBoxBlur.cpp b/src/cvcuda/OpBoxBlur.cpp index 94414a321..ed0e0346b 100644 --- a/src/cvcuda/OpBoxBlur.cpp +++ b/src/cvcuda/OpBoxBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpBrightnessContrast.cpp b/src/cvcuda/OpBrightnessContrast.cpp index dba789c78..fcfbcdf4b 100644 --- a/src/cvcuda/OpBrightnessContrast.cpp +++ b/src/cvcuda/OpBrightnessContrast.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpCenterCrop.cpp b/src/cvcuda/OpCenterCrop.cpp index c623f8095..ae5970aa8 100644 --- a/src/cvcuda/OpCenterCrop.cpp +++ b/src/cvcuda/OpCenterCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpChannelReorder.cpp b/src/cvcuda/OpChannelReorder.cpp index 85df755cf..c1076b405 100644 --- a/src/cvcuda/OpChannelReorder.cpp +++ b/src/cvcuda/OpChannelReorder.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpColorTwist.cpp b/src/cvcuda/OpColorTwist.cpp index fefcaaa52..7e9e7d45e 100644 --- a/src/cvcuda/OpColorTwist.cpp +++ b/src/cvcuda/OpColorTwist.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpComposite.cpp b/src/cvcuda/OpComposite.cpp index 042b95ab5..2434a84ae 100644 --- a/src/cvcuda/OpComposite.cpp +++ b/src/cvcuda/OpComposite.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpConv2D.cpp b/src/cvcuda/OpConv2D.cpp index 8c8271b0e..e40f9e3b0 100644 --- a/src/cvcuda/OpConv2D.cpp +++ b/src/cvcuda/OpConv2D.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +23,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpConvertTo.cpp b/src/cvcuda/OpConvertTo.cpp index 6646bd5d4..346acc941 100644 --- a/src/cvcuda/OpConvertTo.cpp +++ b/src/cvcuda/OpConvertTo.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpCopyMakeBorder.cpp b/src/cvcuda/OpCopyMakeBorder.cpp index 1d028f38e..15bad1f52 100644 --- a/src/cvcuda/OpCopyMakeBorder.cpp +++ b/src/cvcuda/OpCopyMakeBorder.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpCropFlipNormalizeReformat.cpp b/src/cvcuda/OpCropFlipNormalizeReformat.cpp index 4eca2f99c..5d7cbc5d3 100644 --- a/src/cvcuda/OpCropFlipNormalizeReformat.cpp +++ b/src/cvcuda/OpCropFlipNormalizeReformat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpCustomCrop.cpp b/src/cvcuda/OpCustomCrop.cpp index 11a47da64..1ea2ea428 100644 --- a/src/cvcuda/OpCustomCrop.cpp +++ b/src/cvcuda/OpCustomCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpCvtColor.cpp b/src/cvcuda/OpCvtColor.cpp index 45c564e15..ce57d4583 100644 --- a/src/cvcuda/OpCvtColor.cpp +++ b/src/cvcuda/OpCvtColor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpErase.cpp b/src/cvcuda/OpErase.cpp index f3f04432d..51c11b0e3 100644 --- a/src/cvcuda/OpErase.cpp +++ b/src/cvcuda/OpErase.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpFindHomography.cpp b/src/cvcuda/OpFindHomography.cpp index bb2d5062e..c3ded42ca 100644 --- a/src/cvcuda/OpFindHomography.cpp +++ b/src/cvcuda/OpFindHomography.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpFlip.cpp b/src/cvcuda/OpFlip.cpp index 9f9cd4806..9278673f1 100644 --- a/src/cvcuda/OpFlip.cpp +++ b/src/cvcuda/OpFlip.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpGammaContrast.cpp b/src/cvcuda/OpGammaContrast.cpp index 0efb6846d..0a3175181 100644 --- a/src/cvcuda/OpGammaContrast.cpp +++ b/src/cvcuda/OpGammaContrast.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpGaussian.cpp b/src/cvcuda/OpGaussian.cpp index 91d1a3f1d..e618caebe 100644 --- a/src/cvcuda/OpGaussian.cpp +++ b/src/cvcuda/OpGaussian.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpGaussianNoise.cpp b/src/cvcuda/OpGaussianNoise.cpp index 80da5c343..490e5edec 100644 --- a/src/cvcuda/OpGaussianNoise.cpp +++ b/src/cvcuda/OpGaussianNoise.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpHQResize.cpp b/src/cvcuda/OpHQResize.cpp index fd9c3ec28..00f283f07 100644 --- a/src/cvcuda/OpHQResize.cpp +++ b/src/cvcuda/OpHQResize.cpp @@ -23,7 +23,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpHistogram.cpp b/src/cvcuda/OpHistogram.cpp index 1e357af08..9cbc8877a 100644 --- a/src/cvcuda/OpHistogram.cpp +++ b/src/cvcuda/OpHistogram.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpHistogramEq.cpp b/src/cvcuda/OpHistogramEq.cpp index 2015cf080..3e24990c7 100644 --- a/src/cvcuda/OpHistogramEq.cpp +++ b/src/cvcuda/OpHistogramEq.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpInpaint.cpp b/src/cvcuda/OpInpaint.cpp index e56bcad33..74cb87445 100644 --- a/src/cvcuda/OpInpaint.cpp +++ b/src/cvcuda/OpInpaint.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpJointBilateralFilter.cpp b/src/cvcuda/OpJointBilateralFilter.cpp index bb256ca40..00b609e4e 100644 --- a/src/cvcuda/OpJointBilateralFilter.cpp +++ b/src/cvcuda/OpJointBilateralFilter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpLabel.cpp b/src/cvcuda/OpLabel.cpp index 807c99e6a..d2a529f42 100644 --- a/src/cvcuda/OpLabel.cpp +++ b/src/cvcuda/OpLabel.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include CVCUDA_DEFINE_API(0, 5, NVCVStatus, cvcudaLabelCreate, (NVCVOperatorHandle * handle)) { diff --git a/src/cvcuda/OpLaplacian.cpp b/src/cvcuda/OpLaplacian.cpp index 407ff318d..69895da09 100644 --- a/src/cvcuda/OpLaplacian.cpp +++ b/src/cvcuda/OpLaplacian.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpMedianBlur.cpp b/src/cvcuda/OpMedianBlur.cpp index 76e2c7b2c..728541c6a 100644 --- a/src/cvcuda/OpMedianBlur.cpp +++ b/src/cvcuda/OpMedianBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpMinAreaRect.cpp b/src/cvcuda/OpMinAreaRect.cpp index 72c02a135..e274dfca7 100644 --- a/src/cvcuda/OpMinAreaRect.cpp +++ b/src/cvcuda/OpMinAreaRect.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpMinMaxLoc.cpp b/src/cvcuda/OpMinMaxLoc.cpp index 4f9e461c4..449d7d08d 100644 --- a/src/cvcuda/OpMinMaxLoc.cpp +++ b/src/cvcuda/OpMinMaxLoc.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpMorphology.cpp b/src/cvcuda/OpMorphology.cpp index 3f194f067..f3fb6e5d6 100644 --- a/src/cvcuda/OpMorphology.cpp +++ b/src/cvcuda/OpMorphology.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpNonMaximumSuppression.cpp b/src/cvcuda/OpNonMaximumSuppression.cpp index da3d1b7d7..c94793efc 100644 --- a/src/cvcuda/OpNonMaximumSuppression.cpp +++ b/src/cvcuda/OpNonMaximumSuppression.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpNormalize.cpp b/src/cvcuda/OpNormalize.cpp index db5c50346..1a45efa43 100644 --- a/src/cvcuda/OpNormalize.cpp +++ b/src/cvcuda/OpNormalize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpOSD.cpp b/src/cvcuda/OpOSD.cpp index 1cf34f2b4..b779bd33a 100644 --- a/src/cvcuda/OpOSD.cpp +++ b/src/cvcuda/OpOSD.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpPadAndStack.cpp b/src/cvcuda/OpPadAndStack.cpp index 5a50b2577..bcd9ecd75 100644 --- a/src/cvcuda/OpPadAndStack.cpp +++ b/src/cvcuda/OpPadAndStack.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpPairwiseMatcher.cpp b/src/cvcuda/OpPairwiseMatcher.cpp index 07b0db91b..53434d0cc 100644 --- a/src/cvcuda/OpPairwiseMatcher.cpp +++ b/src/cvcuda/OpPairwiseMatcher.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpPillowResize.cpp b/src/cvcuda/OpPillowResize.cpp index 7ba00b31a..b9f5fadab 100644 --- a/src/cvcuda/OpPillowResize.cpp +++ b/src/cvcuda/OpPillowResize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +23,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpRandomResizedCrop.cpp b/src/cvcuda/OpRandomResizedCrop.cpp index 192e98885..ac9c25581 100644 --- a/src/cvcuda/OpRandomResizedCrop.cpp +++ b/src/cvcuda/OpRandomResizedCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpReformat.cpp b/src/cvcuda/OpReformat.cpp index a6d7091ed..be5b6b7af 100644 --- a/src/cvcuda/OpReformat.cpp +++ b/src/cvcuda/OpReformat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpRemap.cpp b/src/cvcuda/OpRemap.cpp index 7bc0c63e5..9681c55b7 100644 --- a/src/cvcuda/OpRemap.cpp +++ b/src/cvcuda/OpRemap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpResize.cpp b/src/cvcuda/OpResize.cpp index d797df584..01eeec561 100644 --- a/src/cvcuda/OpResize.cpp +++ b/src/cvcuda/OpResize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpResizeCropConvertReformat.cpp b/src/cvcuda/OpResizeCropConvertReformat.cpp index 4da7dd2cc..97277f6d2 100644 --- a/src/cvcuda/OpResizeCropConvertReformat.cpp +++ b/src/cvcuda/OpResizeCropConvertReformat.cpp @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; @@ -41,24 +41,24 @@ CVCUDA_DEFINE_API(0, 8, NVCVStatus, cvcudaResizeCropConvertReformatCreate, (NVCV }); } -CVCUDA_DEFINE_API(0, 8, NVCVStatus, cvcudaResizeCropConvertReformatSubmit, +CVCUDA_DEFINE_API(0, 10, NVCVStatus, cvcudaResizeCropConvertReformatSubmit, (NVCVOperatorHandle handle, cudaStream_t stream, NVCVTensorHandle in, NVCVTensorHandle out, const NVCVSize2D resizeDim, const NVCVInterpolationType interpolation, const int2 cropPos, - const NVCVChannelManip manip, const float scale, const float offset)) + const NVCVChannelManip manip, const float scale, const float offset, const bool srcCast)) { return nvcv::ProtectCall( [&] { nvcv::TensorWrapHandle input(in), output(out); priv::ToDynamicRef(handle)(stream, input, output, resizeDim, interpolation, - cropPos, manip, scale, offset); + cropPos, manip, scale, offset, srcCast); }); } -CVCUDA_DEFINE_API(0, 8, NVCVStatus, cvcudaResizeCropConvertReformatVarShapeSubmit, +CVCUDA_DEFINE_API(0, 10, NVCVStatus, cvcudaResizeCropConvertReformatVarShapeSubmit, (NVCVOperatorHandle handle, cudaStream_t stream, NVCVImageBatchHandle in, NVCVTensorHandle out, const NVCVSize2D resizeDim, const NVCVInterpolationType interpolation, const int2 cropPos, - const NVCVChannelManip manip, const float scale, const float offset)) + const NVCVChannelManip manip, const float scale, const float offset, const bool srcCast)) { return nvcv::ProtectCall( [&] @@ -66,6 +66,6 @@ CVCUDA_DEFINE_API(0, 8, NVCVStatus, cvcudaResizeCropConvertReformatVarShapeSubmi nvcv::ImageBatchVarShapeWrapHandle input(in); nvcv::TensorWrapHandle output(out); priv::ToDynamicRef(handle)(stream, input, output, resizeDim, interpolation, - cropPos, manip, scale, offset); + cropPos, manip, scale, offset, srcCast); }); } diff --git a/src/cvcuda/OpRotate.cpp b/src/cvcuda/OpRotate.cpp index a6620e0a9..db642a5c8 100644 --- a/src/cvcuda/OpRotate.cpp +++ b/src/cvcuda/OpRotate.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpSIFT.cpp b/src/cvcuda/OpSIFT.cpp index 36abac153..29e6605b3 100644 --- a/src/cvcuda/OpSIFT.cpp +++ b/src/cvcuda/OpSIFT.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpStack.cpp b/src/cvcuda/OpStack.cpp index b7a4a2931..8ebf4dd99 100644 --- a/src/cvcuda/OpStack.cpp +++ b/src/cvcuda/OpStack.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpThreshold.cpp b/src/cvcuda/OpThreshold.cpp index 105fd5a85..02fdce76a 100644 --- a/src/cvcuda/OpThreshold.cpp +++ b/src/cvcuda/OpThreshold.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpWarpAffine.cpp b/src/cvcuda/OpWarpAffine.cpp index 284c38091..f8e10d91d 100644 --- a/src/cvcuda/OpWarpAffine.cpp +++ b/src/cvcuda/OpWarpAffine.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/OpWarpPerspective.cpp b/src/cvcuda/OpWarpPerspective.cpp index aefc0c98b..7de743d53 100644 --- a/src/cvcuda/OpWarpPerspective.cpp +++ b/src/cvcuda/OpWarpPerspective.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include #include -#include +#include namespace priv = cvcuda::priv; diff --git a/src/cvcuda/include/cvcuda/OpCropFlipNormalizeReformat.h b/src/cvcuda/include/cvcuda/OpCropFlipNormalizeReformat.h index 80d2ee002..2978acb72 100644 --- a/src/cvcuda/include/cvcuda/OpCropFlipNormalizeReformat.h +++ b/src/cvcuda/include/cvcuda/OpCropFlipNormalizeReformat.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/include/cvcuda/OpErase.h b/src/cvcuda/include/cvcuda/OpErase.h index b3b9a3bb1..819f2cc38 100644 --- a/src/cvcuda/include/cvcuda/OpErase.h +++ b/src/cvcuda/include/cvcuda/OpErase.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/include/cvcuda/OpFindHomography.h b/src/cvcuda/include/cvcuda/OpFindHomography.h index b1806254d..d40fb875b 100644 --- a/src/cvcuda/include/cvcuda/OpFindHomography.h +++ b/src/cvcuda/include/cvcuda/OpFindHomography.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/include/cvcuda/OpLabel.h b/src/cvcuda/include/cvcuda/OpLabel.h index 06a3a7ac8..12e3e5027 100644 --- a/src/cvcuda/include/cvcuda/OpLabel.h +++ b/src/cvcuda/include/cvcuda/OpLabel.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/include/cvcuda/OpLabel.hpp b/src/cvcuda/include/cvcuda/OpLabel.hpp index 1b6997d9a..f6b9948b0 100644 --- a/src/cvcuda/include/cvcuda/OpLabel.hpp +++ b/src/cvcuda/include/cvcuda/OpLabel.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/include/cvcuda/OpRemap.h b/src/cvcuda/include/cvcuda/OpRemap.h index 4da16a008..df3afd250 100644 --- a/src/cvcuda/include/cvcuda/OpRemap.h +++ b/src/cvcuda/include/cvcuda/OpRemap.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -76,7 +76,7 @@ CVCUDA_PUBLIC NVCVStatus cvcudaRemapCreate(NVCVOperatorHandle *handle); * 16bit Signed | No * 32bit Unsigned | No * 32bit Signed | No - * 32bit Float | Yes + * 32bit Float | Yes (only if Channel=1) * 64bit Float | No * * Output: @@ -91,7 +91,7 @@ CVCUDA_PUBLIC NVCVStatus cvcudaRemapCreate(NVCVOperatorHandle *handle); * 16bit Signed | No * 32bit Unsigned | No * 32bit Signed | No - * 32bit Float | Yes + * 32bit Float | Yes (only if Channel=1) * 64bit Float | No * * Input map: diff --git a/src/cvcuda/include/cvcuda/OpResizeCropConvertReformat.h b/src/cvcuda/include/cvcuda/OpResizeCropConvertReformat.h index ca27f038f..cd70ff631 100644 --- a/src/cvcuda/include/cvcuda/OpResizeCropConvertReformat.h +++ b/src/cvcuda/include/cvcuda/OpResizeCropConvertReformat.h @@ -217,19 +217,15 @@ CVCUDA_PUBLIC NVCVStatus cvcudaResizeCropConvertReformatCreate(NVCVOperatorHandl * @retval #NVCV_SUCCESS Operation executed successfully. */ /** @{ */ -CVCUDA_PUBLIC NVCVStatus cvcudaResizeCropConvertReformatSubmit(NVCVOperatorHandle handle, cudaStream_t stream, - NVCVTensorHandle in, NVCVTensorHandle out, - const NVCVSize2D resizeDim, - const NVCVInterpolationType interpolation, - const int2 cropPos, const NVCVChannelManip manip, - const float scale, const float offset, bool srcCast); +CVCUDA_PUBLIC NVCVStatus cvcudaResizeCropConvertReformatSubmit( + NVCVOperatorHandle handle, cudaStream_t stream, NVCVTensorHandle in, NVCVTensorHandle out, + const NVCVSize2D resizeDim, const NVCVInterpolationType interpolation, const int2 cropPos, + const NVCVChannelManip manip, const float scale, const float offset, const bool srcCast); -CVCUDA_PUBLIC NVCVStatus cvcudaResizeCropConvertReformatVarShapeSubmit(NVCVOperatorHandle handle, cudaStream_t stream, - NVCVImageBatchHandle in, NVCVTensorHandle out, - const NVCVSize2D resizeDim, - const NVCVInterpolationType interpolation, - const int2 cropPos, const NVCVChannelManip manip, - const float scale, float offset, bool srcCast); +CVCUDA_PUBLIC NVCVStatus cvcudaResizeCropConvertReformatVarShapeSubmit( + NVCVOperatorHandle handle, cudaStream_t stream, NVCVImageBatchHandle in, NVCVTensorHandle out, + const NVCVSize2D resizeDim, const NVCVInterpolationType interpolation, const int2 cropPos, + const NVCVChannelManip manip, const float scale, const float offset, const bool srcCast); /** @} */ #ifdef __cplusplus diff --git a/src/cvcuda/include/cvcuda/OpSIFT.h b/src/cvcuda/include/cvcuda/OpSIFT.h index 39e5142f2..b2ba49348 100644 --- a/src/cvcuda/include/cvcuda/OpSIFT.h +++ b/src/cvcuda/include/cvcuda/OpSIFT.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/include/cvcuda/Types.h b/src/cvcuda/include/cvcuda/Types.h index d222b38eb..360905a7e 100644 --- a/src/cvcuda/include/cvcuda/Types.h +++ b/src/cvcuda/include/cvcuda/Types.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/include/cvcuda/Workspace.hpp b/src/cvcuda/include/cvcuda/Workspace.hpp index e878ff00e..6e576e26d 100644 --- a/src/cvcuda/include/cvcuda/Workspace.hpp +++ b/src/cvcuda/include/cvcuda/Workspace.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/ArrayWrap.hpp b/src/cvcuda/include/cvcuda/cuda_tools/ArrayWrap.hpp similarity index 100% rename from src/nvcv_types/include/nvcv/cuda/ArrayWrap.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/ArrayWrap.hpp diff --git a/src/nvcv_types/include/nvcv/cuda/Atomics.hpp b/src/cvcuda/include/cvcuda/cuda_tools/Atomics.hpp similarity index 96% rename from src/nvcv_types/include/nvcv/cuda/Atomics.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/Atomics.hpp index 539c91a61..311639c72 100644 --- a/src/nvcv_types/include/nvcv/cuda/Atomics.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/Atomics.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/BorderVarShapeWrap.hpp b/src/cvcuda/include/cvcuda/cuda_tools/BorderVarShapeWrap.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/cuda/BorderVarShapeWrap.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/BorderVarShapeWrap.hpp index 71f8e0ccd..d7942c83f 100644 --- a/src/nvcv_types/include/nvcv/cuda/BorderVarShapeWrap.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/BorderVarShapeWrap.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/BorderWrap.hpp b/src/cvcuda/include/cvcuda/cuda_tools/BorderWrap.hpp similarity index 100% rename from src/nvcv_types/include/nvcv/cuda/BorderWrap.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/BorderWrap.hpp diff --git a/src/nvcv_types/include/nvcv/cuda/DropCast.hpp b/src/cvcuda/include/cvcuda/cuda_tools/DropCast.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/cuda/DropCast.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/DropCast.hpp index 9d69754e8..d2f7aa379 100644 --- a/src/nvcv_types/include/nvcv/cuda/DropCast.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/DropCast.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/FullTensorWrap.hpp b/src/cvcuda/include/cvcuda/cuda_tools/FullTensorWrap.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/cuda/FullTensorWrap.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/FullTensorWrap.hpp index 7953160b5..13e19690d 100644 --- a/src/nvcv_types/include/nvcv/cuda/FullTensorWrap.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/FullTensorWrap.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/ImageBatchVarShapeWrap.hpp b/src/cvcuda/include/cvcuda/cuda_tools/ImageBatchVarShapeWrap.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/cuda/ImageBatchVarShapeWrap.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/ImageBatchVarShapeWrap.hpp index 482f5b3a7..bbd23775f 100644 --- a/src/nvcv_types/include/nvcv/cuda/ImageBatchVarShapeWrap.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/ImageBatchVarShapeWrap.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/InterpolationVarShapeWrap.hpp b/src/cvcuda/include/cvcuda/cuda_tools/InterpolationVarShapeWrap.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/cuda/InterpolationVarShapeWrap.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/InterpolationVarShapeWrap.hpp index fa2fe717b..e44c5e76c 100644 --- a/src/nvcv_types/include/nvcv/cuda/InterpolationVarShapeWrap.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/InterpolationVarShapeWrap.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/InterpolationWrap.hpp b/src/cvcuda/include/cvcuda/cuda_tools/InterpolationWrap.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/cuda/InterpolationWrap.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/InterpolationWrap.hpp index bb9cebd5b..bb8236d5e 100644 --- a/src/nvcv_types/include/nvcv/cuda/InterpolationWrap.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/InterpolationWrap.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/MathOps.hpp b/src/cvcuda/include/cvcuda/cuda_tools/MathOps.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/cuda/MathOps.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/MathOps.hpp index 87d63cc26..4506cf101 100644 --- a/src/nvcv_types/include/nvcv/cuda/MathOps.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/MathOps.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/MathWrappers.hpp b/src/cvcuda/include/cvcuda/cuda_tools/MathWrappers.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/cuda/MathWrappers.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/MathWrappers.hpp index 46d66da28..89be6e93d 100644 --- a/src/nvcv_types/include/nvcv/cuda/MathWrappers.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/MathWrappers.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/Printer.hpp b/src/cvcuda/include/cvcuda/cuda_tools/Printer.hpp similarity index 100% rename from src/nvcv_types/include/nvcv/cuda/Printer.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/Printer.hpp diff --git a/src/nvcv_types/include/nvcv/cuda/RangeCast.hpp b/src/cvcuda/include/cvcuda/cuda_tools/RangeCast.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/cuda/RangeCast.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/RangeCast.hpp index 510825fbf..64e5b0e7d 100644 --- a/src/nvcv_types/include/nvcv/cuda/RangeCast.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/RangeCast.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/SaturateCast.hpp b/src/cvcuda/include/cvcuda/cuda_tools/SaturateCast.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/cuda/SaturateCast.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/SaturateCast.hpp index b0cde23ad..41840eb4e 100644 --- a/src/nvcv_types/include/nvcv/cuda/SaturateCast.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/SaturateCast.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/StaticCast.hpp b/src/cvcuda/include/cvcuda/cuda_tools/StaticCast.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/cuda/StaticCast.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/StaticCast.hpp index a960cce0e..5f81d14a4 100644 --- a/src/nvcv_types/include/nvcv/cuda/StaticCast.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/StaticCast.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/TensorBatchWrap.hpp b/src/cvcuda/include/cvcuda/cuda_tools/TensorBatchWrap.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/cuda/TensorBatchWrap.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/TensorBatchWrap.hpp index 7023221cc..5076823e3 100644 --- a/src/nvcv_types/include/nvcv/cuda/TensorBatchWrap.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/TensorBatchWrap.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,9 +24,10 @@ #ifndef NVCV_CUDA_TENSOR_BATCH_WRAP_HPP #define NVCV_CUDA_TENSOR_BATCH_WRAP_HPP +#include "TensorWrap.hpp" // for TensorWrap, etc. #include "TypeTraits.hpp" // for HasTypeTraits, etc -#include "nvcv/TensorBatchData.hpp" -#include "nvcv/cuda/TensorWrap.hpp" + +#include #include diff --git a/src/nvcv_types/include/nvcv/cuda/TensorWrap.hpp b/src/cvcuda/include/cvcuda/cuda_tools/TensorWrap.hpp similarity index 100% rename from src/nvcv_types/include/nvcv/cuda/TensorWrap.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/TensorWrap.hpp diff --git a/src/nvcv_types/include/nvcv/cuda/TypeTraits.hpp b/src/cvcuda/include/cvcuda/cuda_tools/TypeTraits.hpp similarity index 100% rename from src/nvcv_types/include/nvcv/cuda/TypeTraits.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/TypeTraits.hpp diff --git a/src/nvcv_types/include/nvcv/cuda/detail/MathWrappersImpl.hpp b/src/cvcuda/include/cvcuda/cuda_tools/detail/MathWrappersImpl.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/cuda/detail/MathWrappersImpl.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/detail/MathWrappersImpl.hpp index 7c519178e..e64481777 100644 --- a/src/nvcv_types/include/nvcv/cuda/detail/MathWrappersImpl.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/detail/MathWrappersImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/detail/Metaprogramming.hpp b/src/cvcuda/include/cvcuda/cuda_tools/detail/Metaprogramming.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/cuda/detail/Metaprogramming.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/detail/Metaprogramming.hpp index 1626b04fa..4e1ccda9b 100644 --- a/src/nvcv_types/include/nvcv/cuda/detail/Metaprogramming.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/detail/Metaprogramming.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/detail/RangeCastImpl.hpp b/src/cvcuda/include/cvcuda/cuda_tools/detail/RangeCastImpl.hpp similarity index 96% rename from src/nvcv_types/include/nvcv/cuda/detail/RangeCastImpl.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/detail/RangeCastImpl.hpp index cbd907059..de51cfd78 100644 --- a/src/nvcv_types/include/nvcv/cuda/detail/RangeCastImpl.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/detail/RangeCastImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/detail/SaturateCastImpl.hpp b/src/cvcuda/include/cvcuda/cuda_tools/detail/SaturateCastImpl.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/cuda/detail/SaturateCastImpl.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/detail/SaturateCastImpl.hpp index b8078db76..b1bcdbbc3 100644 --- a/src/nvcv_types/include/nvcv/cuda/detail/SaturateCastImpl.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/detail/SaturateCastImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/cuda/math/LinAlg.hpp b/src/cvcuda/include/cvcuda/cuda_tools/math/LinAlg.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/cuda/math/LinAlg.hpp rename to src/cvcuda/include/cvcuda/cuda_tools/math/LinAlg.hpp index 4287d485f..2a7d8bd38 100644 --- a/src/nvcv_types/include/nvcv/cuda/math/LinAlg.hpp +++ b/src/cvcuda/include/cvcuda/cuda_tools/math/LinAlg.hpp @@ -24,8 +24,8 @@ #ifndef NVCV_CUDA_MATH_LINALG_HPP #define NVCV_CUDA_MATH_LINALG_HPP -#include // for cuda::max, etc. -#include // for cuda::Require, etc. +#include // for cuda::max, etc. +#include // for cuda::Require, etc. #include // for std::swap, etc. #include // for assert, etc. diff --git a/src/cvcuda/priv/CMakeLists.txt b/src/cvcuda/priv/CMakeLists.txt index 488669f70..8a61def0e 100644 --- a/src/cvcuda/priv/CMakeLists.txt +++ b/src/cvcuda/priv/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/priv/OpAdaptiveThreshold.cpp b/src/cvcuda/priv/OpAdaptiveThreshold.cpp index f1ef45212..d512aecb7 100644 --- a/src/cvcuda/priv/OpAdaptiveThreshold.cpp +++ b/src/cvcuda/priv/OpAdaptiveThreshold.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpAdvCvtColor.cu b/src/cvcuda/priv/OpAdvCvtColor.cu index cac368e71..4872e9755 100644 --- a/src/cvcuda/priv/OpAdvCvtColor.cu +++ b/src/cvcuda/priv/OpAdvCvtColor.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,11 +22,11 @@ #include "legacy/CvCudaUtils.cuh" +#include #include #include #include -#include -#include +#include #define BLOCK 32 diff --git a/src/cvcuda/priv/OpAverageBlur.cpp b/src/cvcuda/priv/OpAverageBlur.cpp index ba79ca60f..78ee4a704 100644 --- a/src/cvcuda/priv/OpAverageBlur.cpp +++ b/src/cvcuda/priv/OpAverageBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpBilateralFilter.cpp b/src/cvcuda/priv/OpBilateralFilter.cpp index 959f6a1aa..a9d51c843 100644 --- a/src/cvcuda/priv/OpBilateralFilter.cpp +++ b/src/cvcuda/priv/OpBilateralFilter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpBndBox.cpp b/src/cvcuda/priv/OpBndBox.cpp index 36fa702fa..2a6eb5c7f 100644 --- a/src/cvcuda/priv/OpBndBox.cpp +++ b/src/cvcuda/priv/OpBndBox.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpBoxBlur.cpp b/src/cvcuda/priv/OpBoxBlur.cpp index c5a71ede7..b503ca5fc 100644 --- a/src/cvcuda/priv/OpBoxBlur.cpp +++ b/src/cvcuda/priv/OpBoxBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpBrightnessContrast.cu b/src/cvcuda/priv/OpBrightnessContrast.cu index 735febf7c..9e181a8b1 100644 --- a/src/cvcuda/priv/OpBrightnessContrast.cu +++ b/src/cvcuda/priv/OpBrightnessContrast.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,20 +17,20 @@ #include "OpBrightnessContrast.hpp" +#include +#include +#include +#include +#include +#include #include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include #include #include diff --git a/src/cvcuda/priv/OpCenterCrop.cpp b/src/cvcuda/priv/OpCenterCrop.cpp index d11cedde6..f35c36a5e 100644 --- a/src/cvcuda/priv/OpCenterCrop.cpp +++ b/src/cvcuda/priv/OpCenterCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpChannelReorder.cpp b/src/cvcuda/priv/OpChannelReorder.cpp index 2815b386b..b689e83cf 100644 --- a/src/cvcuda/priv/OpChannelReorder.cpp +++ b/src/cvcuda/priv/OpChannelReorder.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpColorTwist.cu b/src/cvcuda/priv/OpColorTwist.cu index 4fd6f3995..312033862 100644 --- a/src/cvcuda/priv/OpColorTwist.cu +++ b/src/cvcuda/priv/OpColorTwist.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,21 +17,21 @@ #include "OpColorTwist.hpp" +#include +#include +#include +#include +#include +#include +#include #include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include namespace cuda = nvcv::cuda; namespace util = nvcv::util; diff --git a/src/cvcuda/priv/OpComposite.cpp b/src/cvcuda/priv/OpComposite.cpp index b6ea21068..bce123058 100644 --- a/src/cvcuda/priv/OpComposite.cpp +++ b/src/cvcuda/priv/OpComposite.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpConv2D.cpp b/src/cvcuda/priv/OpConv2D.cpp index 2cf6fec61..a9fc049e7 100644 --- a/src/cvcuda/priv/OpConv2D.cpp +++ b/src/cvcuda/priv/OpConv2D.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpConvertTo.cpp b/src/cvcuda/priv/OpConvertTo.cpp index 1525fc319..f2c67c89a 100644 --- a/src/cvcuda/priv/OpConvertTo.cpp +++ b/src/cvcuda/priv/OpConvertTo.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpCopyMakeBorder.cpp b/src/cvcuda/priv/OpCopyMakeBorder.cpp index e74b5bee2..f5fc122c7 100644 --- a/src/cvcuda/priv/OpCopyMakeBorder.cpp +++ b/src/cvcuda/priv/OpCopyMakeBorder.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpCropFlipNormalizeReformat.cu b/src/cvcuda/priv/OpCropFlipNormalizeReformat.cu index 7d38cf156..4ab092897 100644 --- a/src/cvcuda/priv/OpCropFlipNormalizeReformat.cu +++ b/src/cvcuda/priv/OpCropFlipNormalizeReformat.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,18 +17,18 @@ #include "OpCropFlipNormalizeReformat.hpp" +#include +#include +#include +#include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include #include diff --git a/src/cvcuda/priv/OpCustomCrop.cpp b/src/cvcuda/priv/OpCustomCrop.cpp index cd1069022..f761a10e5 100644 --- a/src/cvcuda/priv/OpCustomCrop.cpp +++ b/src/cvcuda/priv/OpCustomCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpCvtColor.cpp b/src/cvcuda/priv/OpCvtColor.cpp index a6167bc5f..7b12e1bb0 100644 --- a/src/cvcuda/priv/OpCvtColor.cpp +++ b/src/cvcuda/priv/OpCvtColor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpErase.cpp b/src/cvcuda/priv/OpErase.cpp index 51817a478..e778f569b 100644 --- a/src/cvcuda/priv/OpErase.cpp +++ b/src/cvcuda/priv/OpErase.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpFindHomography.cu b/src/cvcuda/priv/OpFindHomography.cu index 7e8beef82..abd413dbc 100644 --- a/src/cvcuda/priv/OpFindHomography.cu +++ b/src/cvcuda/priv/OpFindHomography.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +18,13 @@ #include "OpFindHomography.hpp" #include +#include +#include +#include +#include +#include +#include +#include #include #include #include @@ -26,16 +33,9 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include #include diff --git a/src/cvcuda/priv/OpFlip.cpp b/src/cvcuda/priv/OpFlip.cpp index b55c8773c..7428ba11d 100644 --- a/src/cvcuda/priv/OpFlip.cpp +++ b/src/cvcuda/priv/OpFlip.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpGammaContrast.cpp b/src/cvcuda/priv/OpGammaContrast.cpp index 20b71526e..c63f8b355 100644 --- a/src/cvcuda/priv/OpGammaContrast.cpp +++ b/src/cvcuda/priv/OpGammaContrast.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpGaussian.cpp b/src/cvcuda/priv/OpGaussian.cpp index d8be1bc06..643d2e168 100644 --- a/src/cvcuda/priv/OpGaussian.cpp +++ b/src/cvcuda/priv/OpGaussian.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpGaussianNoise.cpp b/src/cvcuda/priv/OpGaussianNoise.cpp index 76b4160c1..0899ec2e3 100644 --- a/src/cvcuda/priv/OpGaussianNoise.cpp +++ b/src/cvcuda/priv/OpGaussianNoise.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpHQResize.cu b/src/cvcuda/priv/OpHQResize.cu index 86c069f51..dfdc8bb03 100644 --- a/src/cvcuda/priv/OpHQResize.cu +++ b/src/cvcuda/priv/OpHQResize.cu @@ -16,13 +16,20 @@ */ #include "OpHQResize.hpp" +#include "WorkspaceUtil.hpp" #include "cvcuda/Workspace.hpp" #include "OpHQResizeBatchWrap.cuh" #include "OpHQResizeFilter.cuh" #include -#include +#include +#include +#include +#include +#include +#include +#include #include #include #include @@ -30,16 +37,9 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include #include #include diff --git a/src/cvcuda/priv/OpHQResizeBatchWrap.cuh b/src/cvcuda/priv/OpHQResizeBatchWrap.cuh index 02232975f..50c92314e 100644 --- a/src/cvcuda/priv/OpHQResizeBatchWrap.cuh +++ b/src/cvcuda/priv/OpHQResizeBatchWrap.cuh @@ -17,18 +17,18 @@ #ifndef CVCUDA_PRIV_HQ_RESIZE_BATCH_WRAP_CUH #define CVCUDA_PRIV_HQ_RESIZE_BATCH_WRAP_CUH +#include "WorkspaceUtil.hpp" #include "cvcuda/Workspace.hpp" #include -#include +#include +#include +#include #include #include #include -#include -#include -#include -#include -#include +#include +#include // This file contains three kind of helpers // 1. Helpers to wrap contigious batch with uniform sample stride into TensorWrap diff --git a/src/cvcuda/priv/OpHQResizeFilter.cuh b/src/cvcuda/priv/OpHQResizeFilter.cuh index e32f5d270..7c7b1dcdb 100644 --- a/src/cvcuda/priv/OpHQResizeFilter.cuh +++ b/src/cvcuda/priv/OpHQResizeFilter.cuh @@ -20,9 +20,9 @@ #include #include // for NVCVInterpolationType, etc. #include -#include -#include -#include +#include +#include +#include #include #include diff --git a/src/cvcuda/priv/OpHistogram.cpp b/src/cvcuda/priv/OpHistogram.cpp index 88ad2770b..d601c122b 100644 --- a/src/cvcuda/priv/OpHistogram.cpp +++ b/src/cvcuda/priv/OpHistogram.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpHistogramEq.cpp b/src/cvcuda/priv/OpHistogramEq.cpp index d9b8fff1f..1ccf1bb83 100644 --- a/src/cvcuda/priv/OpHistogramEq.cpp +++ b/src/cvcuda/priv/OpHistogramEq.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpInpaint.cpp b/src/cvcuda/priv/OpInpaint.cpp index bc00dca1a..a9e1105e5 100644 --- a/src/cvcuda/priv/OpInpaint.cpp +++ b/src/cvcuda/priv/OpInpaint.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpJointBilateralFilter.cpp b/src/cvcuda/priv/OpJointBilateralFilter.cpp index 189f24b3a..6e841cc38 100644 --- a/src/cvcuda/priv/OpJointBilateralFilter.cpp +++ b/src/cvcuda/priv/OpJointBilateralFilter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpLabel.cu b/src/cvcuda/priv/OpLabel.cu index 0ea67c5f8..374ae63b3 100644 --- a/src/cvcuda/priv/OpLabel.cu +++ b/src/cvcuda/priv/OpLabel.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -49,17 +49,17 @@ #include "OpLabel.hpp" #include +#include +#include +#include +#include +#include +#include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include #include diff --git a/src/cvcuda/priv/OpLabel.hpp b/src/cvcuda/priv/OpLabel.hpp index d397d90e8..4db4b88b8 100644 --- a/src/cvcuda/priv/OpLabel.hpp +++ b/src/cvcuda/priv/OpLabel.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/priv/OpLaplacian.cpp b/src/cvcuda/priv/OpLaplacian.cpp index a76eb9ac3..0b6c027a9 100644 --- a/src/cvcuda/priv/OpLaplacian.cpp +++ b/src/cvcuda/priv/OpLaplacian.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpMedianBlur.cpp b/src/cvcuda/priv/OpMedianBlur.cpp index 9a243f416..efc77a044 100644 --- a/src/cvcuda/priv/OpMedianBlur.cpp +++ b/src/cvcuda/priv/OpMedianBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpMinAreaRect.cpp b/src/cvcuda/priv/OpMinAreaRect.cpp index bf7dc782f..7cfe54fef 100644 --- a/src/cvcuda/priv/OpMinAreaRect.cpp +++ b/src/cvcuda/priv/OpMinAreaRect.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpMinMaxLoc.cu b/src/cvcuda/priv/OpMinMaxLoc.cu index 60107e85d..984ba2926 100644 --- a/src/cvcuda/priv/OpMinMaxLoc.cu +++ b/src/cvcuda/priv/OpMinMaxLoc.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,17 +17,17 @@ #include "OpMinMaxLoc.hpp" +#include +#include +#include +#include #include #include #include #include #include -#include -#include -#include -#include -#include -#include +#include +#include #include diff --git a/src/cvcuda/priv/OpMorphology.cpp b/src/cvcuda/priv/OpMorphology.cpp index fa547ce38..5c7c585cc 100644 --- a/src/cvcuda/priv/OpMorphology.cpp +++ b/src/cvcuda/priv/OpMorphology.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpNonMaximumSuppression.cu b/src/cvcuda/priv/OpNonMaximumSuppression.cu index d21f77d43..7d1ae495e 100644 --- a/src/cvcuda/priv/OpNonMaximumSuppression.cu +++ b/src/cvcuda/priv/OpNonMaximumSuppression.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,17 +17,17 @@ #include "OpNonMaximumSuppression.hpp" +#include +#include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include +#include +#include namespace cuda = nvcv::cuda; namespace util = nvcv::util; diff --git a/src/cvcuda/priv/OpNormalize.cpp b/src/cvcuda/priv/OpNormalize.cpp index 34f46fa22..5a9e5c211 100644 --- a/src/cvcuda/priv/OpNormalize.cpp +++ b/src/cvcuda/priv/OpNormalize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpOSD.cpp b/src/cvcuda/priv/OpOSD.cpp index 249bb148f..46f212de0 100644 --- a/src/cvcuda/priv/OpOSD.cpp +++ b/src/cvcuda/priv/OpOSD.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpPadAndStack.cpp b/src/cvcuda/priv/OpPadAndStack.cpp index eea5d9b08..0abf0ae89 100644 --- a/src/cvcuda/priv/OpPadAndStack.cpp +++ b/src/cvcuda/priv/OpPadAndStack.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpPairwiseMatcher.cu b/src/cvcuda/priv/OpPairwiseMatcher.cu index 676ec8657..9e6af5f6a 100644 --- a/src/cvcuda/priv/OpPairwiseMatcher.cu +++ b/src/cvcuda/priv/OpPairwiseMatcher.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,12 +18,12 @@ #include "Assert.h" #include "OpPairwiseMatcher.hpp" +#include +#include +#include #include -#include -#include -#include -#include -#include +#include +#include #include diff --git a/src/cvcuda/priv/OpPillowResize.cpp b/src/cvcuda/priv/OpPillowResize.cpp index a72fa7f9a..0625eb74f 100644 --- a/src/cvcuda/priv/OpPillowResize.cpp +++ b/src/cvcuda/priv/OpPillowResize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpRandomResizedCrop.cpp b/src/cvcuda/priv/OpRandomResizedCrop.cpp index f155962eb..fb1cb41a7 100644 --- a/src/cvcuda/priv/OpRandomResizedCrop.cpp +++ b/src/cvcuda/priv/OpRandomResizedCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpReformat.cpp b/src/cvcuda/priv/OpReformat.cpp index 946ad971a..74b38b55a 100644 --- a/src/cvcuda/priv/OpReformat.cpp +++ b/src/cvcuda/priv/OpReformat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpRemap.cu b/src/cvcuda/priv/OpRemap.cu index 36488589e..e56b8a981 100644 --- a/src/cvcuda/priv/OpRemap.cu +++ b/src/cvcuda/priv/OpRemap.cu @@ -17,18 +17,18 @@ #include "OpRemap.hpp" +#include +#include +#include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include namespace cuda = nvcv::cuda; namespace util = nvcv::util; diff --git a/src/cvcuda/priv/OpResize.cpp b/src/cvcuda/priv/OpResize.cpp index f136f7414..4abc21e7e 100644 --- a/src/cvcuda/priv/OpResize.cpp +++ b/src/cvcuda/priv/OpResize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpResize.cu b/src/cvcuda/priv/OpResize.cu index 6f18839ef..42193c87d 100644 --- a/src/cvcuda/priv/OpResize.cu +++ b/src/cvcuda/priv/OpResize.cu @@ -17,19 +17,19 @@ #include "OpResize.hpp" +#include +#include +#include +#include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include namespace { @@ -38,7 +38,12 @@ namespace util = nvcv::util; // Destination pack type given the source and destination type T template -using DPT = std::conditional_t; +using DPT = std::conditional_t == 3, uint3, uint4>; + +// Row alignment mask to determine if data pointer is aligned for vectorized write: +// note that in CUDA, uint3 has 4-byte (uint) alignment. +template +constexpr uint MSK = (sizeof(DPT) == sizeof(uint3) ? sizeof(uint) : sizeof(DPT)) - 1; // Number of items in x written by each thread template @@ -46,11 +51,19 @@ constexpr int NIX = sizeof(DPT) / sizeof(T); // Write a pack of N elements of type T as a different pack type DPT template -__device__ void WritePack(T &u, const T (&v)[NIX]) +__device__ __forceinline__ +void WritePack(T &u, const T (&v)[NIX]) { reinterpret_cast &>(u) = reinterpret_cast &>(v); } +// Check if destination row pointer is aligned for vector writes. +template +__device__ __forceinline__ +bool CheckRowAlign(T *row) { + return (static_cast(reinterpret_cast(row)) & MSK) == 0; +} + // Nearest --------------------------------------------------------------------- template @@ -86,7 +99,14 @@ inline __device__ void NearestInterpolatePack(T *dstRow, SrcWrapper src, int3 iS } } - WritePack(dstRow[dstCoordX], dstPack); + if (CheckRowAlign(dstRow)) // Branch is the same for all threads in warp. + WritePack(dstRow[dstCoordX], dstPack); // If row is aligned, write vector pack; + else { + T *dstPtr = dstRow + dstCoordX; // otherwise, write individual elements. +#pragma unroll + for (uint i = 0; i < NIX; ++i) dstPtr[i] = dstPack[i]; + } + // writePack(dstRow + dstCoordX, dstPack); } else { @@ -155,8 +175,8 @@ inline __device__ void LinearReadPack(SrcWrapper src, T (&srcPack)[4], int3 iSrc } template -inline __device__ T LinearInterpolatePack(T *dstRow, SrcWrapper src, int3 iSrcCoord, float srcCoordX, int srcSizeX, - int dstCoordX, int dstSizeX, float scaleRatioX, float2 w) +inline __device__ void LinearInterpolatePack(T *dstRow, SrcWrapper src, int3 iSrcCoord, float srcCoordX, int srcSizeX, + int dstCoordX, int dstSizeX, float scaleRatioX, float2 w) { float sx; int iPrevCoordX; @@ -171,8 +191,7 @@ inline __device__ T LinearInterpolatePack(T *dstRow, SrcWrapper src, int3 iSrcCo sx = srcCoordX + x * scaleRatioX; iSrcCoord.x = floor(sx); - w.x = sx - iSrcCoord.x; - w.x = (iSrcCoord.x < 0 || iSrcCoord.x >= srcSizeX - 1) ? 0 : w.x; + w.x = ((iSrcCoord.x < 0) ? 0 : ((iSrcCoord.x > srcSizeX - 2) ? 1 : sx - iSrcCoord.x)); iSrcCoord.x = cuda::max(0, cuda::min(iSrcCoord.x, srcSizeX - 2)); @@ -215,7 +234,14 @@ inline __device__ T LinearInterpolatePack(T *dstRow, SrcWrapper src, int3 iSrcCo } } - WritePack(dstRow[dstCoordX], dstPack); + if (CheckRowAlign(dstRow)) // Branch is the same for all threads in warp. + WritePack(dstRow[dstCoordX], dstPack); // If row is aligned, write vector pack; + else { + T *dstPtr = dstRow + dstCoordX; // otherwise, write individual elements. +#pragma unroll + for (uint i = 0; i < NIX; ++i) dstPtr[i] = dstPack[i]; + } + // writePack(dstRow + dstCoordX, dstPack, reinterpret_cast(dstRow) & DstMask) == 0); } else { @@ -227,8 +253,7 @@ inline __device__ T LinearInterpolatePack(T *dstRow, SrcWrapper src, int3 iSrcCo sx = srcCoordX + x * scaleRatioX; iSrcCoord.x = floor(sx); - w.x = sx - iSrcCoord.x; - w.x = (iSrcCoord.x < 0 || iSrcCoord.x >= srcSizeX - 1) ? 0 : w.x; + w.x = ((iSrcCoord.x < 0) ? 0 : ((iSrcCoord.x > srcSizeX - 2) ? 1 : sx - iSrcCoord.x)); iSrcCoord.x = cuda::max(0, cuda::min(iSrcCoord.x, srcSizeX - 2)); @@ -291,7 +316,8 @@ __global__ void LinearResize(SrcWrapper src, DstWrapper dst, int2 srcSize, int2 int3 iSrcCoord{0, (int)floor(srcCoord.y), dstCoord.z}; float2 w; - w.y = srcCoord.y - iSrcCoord.y; + + w.y = ((iSrcCoord.y < 0) ? 0 : ((iSrcCoord.y > srcSize.y - 2) ? 1 : srcCoord.y - iSrcCoord.y)); iSrcCoord.y = cuda::max(0, cuda::min(iSrcCoord.y, srcSize.y - 2)); diff --git a/src/cvcuda/priv/OpResize.hpp b/src/cvcuda/priv/OpResize.hpp index eaebaf932..6f066e0a3 100644 --- a/src/cvcuda/priv/OpResize.hpp +++ b/src/cvcuda/priv/OpResize.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/priv/OpResizeCropConvertReformat.cu b/src/cvcuda/priv/OpResizeCropConvertReformat.cu index 571c2d63f..be37bc8fb 100644 --- a/src/cvcuda/priv/OpResizeCropConvertReformat.cu +++ b/src/cvcuda/priv/OpResizeCropConvertReformat.cu @@ -19,17 +19,17 @@ #include "legacy/CvCudaLegacy.h" #include "legacy/CvCudaLegacyHelpers.hpp" +#include +#include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include +#include +#include #include // for numeric_limits #include @@ -192,8 +192,8 @@ __global__ void resizeCrop_bilinear(DstMap dst, SrcWrapper src, const int src_w, scale * cuda::SaturateCast((1-fy) * ((1-fx) * ptr0[0] + ptr0[sx1] * fx) + fy * ((1-fx) * ptr1[0] + ptr1[sx1] * fx)) + offset); else - dst(blockIdx.z, dst_y, dst_x, scale * (1-fy) * ((1-fx) * ptr0[0] + ptr0[sx1] * fx) - + fy * ((1-fx) * ptr1[0] + ptr1[sx1] * fx) + offset); + dst(blockIdx.z, dst_y, dst_x, scale * ((1-fy) * ((1-fx) * ptr0[0] + ptr0[sx1] * fx) + + fy * ((1-fx) * ptr1[0] + ptr1[sx1] * fx)) + offset); } } // resizeCrop_bilinear @@ -272,8 +272,8 @@ __global__ void resizeCrop_bilinear_varShape(DstMap dst, SrcWrapper src, scale * cuda::SaturateCast((1-fy) * ((1-fx) * ptr0[0] + ptr0[sx1] * fx) + fy * ((1-fx) * ptr1[0] + ptr1[sx1] * fx)) + offset); else - dst(blockIdx.z, dst_y, dst_x, scale * (1-fy) * ((1-fx) * ptr0[0] + ptr0[sx1] * fx) - + fy * ((1-fx) * ptr1[0] + ptr1[sx1] * fx) + offset); + dst(blockIdx.z, dst_y, dst_x, scale * ((1-fy) * ((1-fx) * ptr0[0] + ptr0[sx1] * fx) + + fy * ((1-fx) * ptr1[0] + ptr1[sx1] * fx)) + offset); } } // resizeCrop_bilinear_varShape diff --git a/src/cvcuda/priv/OpRotate.cpp b/src/cvcuda/priv/OpRotate.cpp index 19b859c38..f7ec89a40 100644 --- a/src/cvcuda/priv/OpRotate.cpp +++ b/src/cvcuda/priv/OpRotate.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpSIFT.cu b/src/cvcuda/priv/OpSIFT.cu index ec988b1e2..4ebffdc12 100644 --- a/src/cvcuda/priv/OpSIFT.cu +++ b/src/cvcuda/priv/OpSIFT.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,18 +17,18 @@ #include "OpSIFT.hpp" +#include +#include +#include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include #include diff --git a/src/cvcuda/priv/OpStack.cpp b/src/cvcuda/priv/OpStack.cpp index 3b1707a78..a2a59b3b1 100644 --- a/src/cvcuda/priv/OpStack.cpp +++ b/src/cvcuda/priv/OpStack.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,7 +20,7 @@ #include "nvcv/TensorDataAccess.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpThreshold.cpp b/src/cvcuda/priv/OpThreshold.cpp index 94c95a191..959adc28c 100644 --- a/src/cvcuda/priv/OpThreshold.cpp +++ b/src/cvcuda/priv/OpThreshold.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpWarpAffine.cpp b/src/cvcuda/priv/OpWarpAffine.cpp index 7691700ac..96e2564cd 100644 --- a/src/cvcuda/priv/OpWarpAffine.cpp +++ b/src/cvcuda/priv/OpWarpAffine.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/OpWarpPerspective.cpp b/src/cvcuda/priv/OpWarpPerspective.cpp index f2f3bf542..92e784aa4 100644 --- a/src/cvcuda/priv/OpWarpPerspective.cpp +++ b/src/cvcuda/priv/OpWarpPerspective.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "legacy/CvCudaLegacyHelpers.hpp" #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/SymbolVersioning.hpp b/src/cvcuda/priv/SymbolVersioning.hpp index e8e76695d..3dd87bd9d 100644 --- a/src/cvcuda/priv/SymbolVersioning.hpp +++ b/src/cvcuda/priv/SymbolVersioning.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #ifndef CVCUDA_PRIV_SYMBOLVERSIONING_HPP #define CVCUDA_PRIV_SYMBOLVERSIONING_HPP -#include +#include #define CVCUDA_DEFINE_API(...) NVCV_PROJ_DEFINE_API(CVCUDA, __VA_ARGS__) #define CVCUDA_DEFINE_OLD_API(...) NVCV_PROJ_DEFINE_OLD_API(CVCUDA, __VA_ARGS__) diff --git a/src/cvcuda/priv/Version.hpp b/src/cvcuda/priv/Version.hpp index 86d8e1653..bb1af55f8 100644 --- a/src/cvcuda/priv/Version.hpp +++ b/src/cvcuda/priv/Version.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #define CVCUDA_PRIV_VERSION_HPP #include -#include +#include namespace cvcuda::priv { diff --git a/src/cvcuda/priv/legacy/CMakeLists.txt b/src/cvcuda/priv/legacy/CMakeLists.txt index d05db4a9e..4a265c8ea 100644 --- a/src/cvcuda/priv/legacy/CMakeLists.txt +++ b/src/cvcuda/priv/legacy/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/priv/legacy/CvCudaLegacy.h b/src/cvcuda/priv/legacy/CvCudaLegacy.h index 657e608f1..fd9ad2515 100644 --- a/src/cvcuda/priv/legacy/CvCudaLegacy.h +++ b/src/cvcuda/priv/legacy/CvCudaLegacy.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/priv/legacy/CvCudaUtils.cuh b/src/cvcuda/priv/legacy/CvCudaUtils.cuh index a0588ed43..bd937d20c 100644 --- a/src/cvcuda/priv/legacy/CvCudaUtils.cuh +++ b/src/cvcuda/priv/legacy/CvCudaUtils.cuh @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,26 +18,26 @@ #ifndef CV_CUDA_UTILS_CUH #define CV_CUDA_UTILS_CUH +#include // for BorderVarShapeWrap, etc. +#include // for BorderWrap, etc. +#include // for DropCast, etc. +#include // for ImageBatchVarShapeWrap, etc. +#include // for InterpolationVarShapeWrap, etc. +#include // for InterpolationWrap, etc. +#include // for math operators +#include // for sqrt, etc. +#include // for SaturateCast, etc. +#include // for StaticCast, etc. +#include // for TensorWrap, etc. +#include // for BaseType, etc. +#include // for Vector, etc. #include #include #include // for ImageDataStridedCuda, etc. #include // for TensorDataStridedCuda, etc. #include -#include // for BorderVarShapeWrap, etc. -#include // for BorderWrap, etc. -#include // for DropCast, etc. -#include // for ImageBatchVarShapeWrap, etc. -#include // for InterpolationVarShapeWrap, etc. -#include // for InterpolationWrap, etc. -#include // for math operators -#include // for sqrt, etc. -#include // for SaturateCast, etc. -#include // for StaticCast, etc. -#include // for TensorWrap, etc. -#include // for BaseType, etc. -#include // for Vector, etc. -#include // for NVCV_ASSERT, etc. -#include // for NVCV_CHECK_LOG, etc. +#include // for NVCV_ASSERT, etc. +#include // for NVCV_CHECK_LOG, etc. #include #include diff --git a/src/cvcuda/priv/legacy/adaptive_threshold.cu b/src/cvcuda/priv/legacy/adaptive_threshold.cu index 455404655..8756cef3b 100644 --- a/src/cvcuda/priv/legacy/adaptive_threshold.cu +++ b/src/cvcuda/priv/legacy/adaptive_threshold.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/adaptive_threshold_var_shape.cu b/src/cvcuda/priv/legacy/adaptive_threshold_var_shape.cu index 092af389e..09f7d00c8 100644 --- a/src/cvcuda/priv/legacy/adaptive_threshold_var_shape.cu +++ b/src/cvcuda/priv/legacy/adaptive_threshold_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/bilateral_filter.cu b/src/cvcuda/priv/legacy/bilateral_filter.cu index 0bf2e8338..84838bc02 100644 --- a/src/cvcuda/priv/legacy/bilateral_filter.cu +++ b/src/cvcuda/priv/legacy/bilateral_filter.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -25,7 +25,7 @@ #include "CvCudaUtils.cuh" -#include +#include using namespace nvcv::legacy::cuda_op; using namespace nvcv::legacy::helpers; diff --git a/src/cvcuda/priv/legacy/box_blur.cu b/src/cvcuda/priv/legacy/box_blur.cu index 5dda7ff0a..c6be07144 100644 --- a/src/cvcuda/priv/legacy/box_blur.cu +++ b/src/cvcuda/priv/legacy/box_blur.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/calc_hist.cu b/src/cvcuda/priv/legacy/calc_hist.cu index 02a6ef931..4cceabeb1 100644 --- a/src/cvcuda/priv/legacy/calc_hist.cu +++ b/src/cvcuda/priv/legacy/calc_hist.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -23,8 +23,8 @@ #include "CvCudaUtils.cuh" -#include -#include +#include +#include using namespace nvcv::legacy::helpers; using namespace nvcv::legacy::cuda_op; diff --git a/src/cvcuda/priv/legacy/center_crop.cu b/src/cvcuda/priv/legacy/center_crop.cu index 668a80d6a..9e9ab9f8f 100644 --- a/src/cvcuda/priv/legacy/center_crop.cu +++ b/src/cvcuda/priv/legacy/center_crop.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/channel_reorder_var_shape.cu b/src/cvcuda/priv/legacy/channel_reorder_var_shape.cu index af21bd1a9..c2c386747 100644 --- a/src/cvcuda/priv/legacy/channel_reorder_var_shape.cu +++ b/src/cvcuda/priv/legacy/channel_reorder_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -24,7 +24,7 @@ #include "CvCudaUtils.cuh" -#include +#include #define BLOCK 32 diff --git a/src/cvcuda/priv/legacy/composite.cu b/src/cvcuda/priv/legacy/composite.cu index 4679c1a11..7434e40ae 100644 --- a/src/cvcuda/priv/legacy/composite.cu +++ b/src/cvcuda/priv/legacy/composite.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/composite_var_shape.cu b/src/cvcuda/priv/legacy/composite_var_shape.cu index c4ce78a64..9aac5c948 100644 --- a/src/cvcuda/priv/legacy/composite_var_shape.cu +++ b/src/cvcuda/priv/legacy/composite_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/convert_to.cu b/src/cvcuda/priv/legacy/convert_to.cu index 607934a89..358a2d1f7 100644 --- a/src/cvcuda/priv/legacy/convert_to.cu +++ b/src/cvcuda/priv/legacy/convert_to.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -23,9 +23,9 @@ #include "CvCudaUtils.cuh" +#include #include #include -#include #include #include diff --git a/src/cvcuda/priv/legacy/copy_make_border.cu b/src/cvcuda/priv/legacy/copy_make_border.cu index 15e3153a6..07d1cc771 100644 --- a/src/cvcuda/priv/legacy/copy_make_border.cu +++ b/src/cvcuda/priv/legacy/copy_make_border.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/copy_make_border_var_shape.cu b/src/cvcuda/priv/legacy/copy_make_border_var_shape.cu index c0323fdf3..ecb624e87 100644 --- a/src/cvcuda/priv/legacy/copy_make_border_var_shape.cu +++ b/src/cvcuda/priv/legacy/copy_make_border_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/custom_crop.cu b/src/cvcuda/priv/legacy/custom_crop.cu index 14d0aea44..6190af5fe 100644 --- a/src/cvcuda/priv/legacy/custom_crop.cu +++ b/src/cvcuda/priv/legacy/custom_crop.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/cvt_color.cu b/src/cvcuda/priv/legacy/cvt_color.cu index 2abb235f1..66b4d6728 100644 --- a/src/cvcuda/priv/legacy/cvt_color.cu +++ b/src/cvcuda/priv/legacy/cvt_color.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -73,250 +73,346 @@ static constexpr int ITUR_BT_601_CBU = 460324; static constexpr int ITUR_BT_601_CGV = -385875; static constexpr int ITUR_BT_601_CBV = -74448; -#define CV_DESCALE(x, n) (((x) + (1 << ((n)-1))) >> (n)) +#define CV_DESCALE(x, n) (((x) + (1 << ((n) - 1))) >> (n)) #define BLOCK 32 namespace nvcv::legacy::cuda_op { -template -__global__ void rgb_to_bgr_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int sch, int dch, int bidx) +template +struct CvtKernelPolicy { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); + static_assert(BlockWidth_ % 32 == 0); + static constexpr int BlockWidth = BlockWidth_; + static constexpr int BlockHeight = BlockHeight_; + static constexpr int BlockSize = BlockWidth * BlockHeight; + static constexpr int RowsPerThread = RowsPerThread_; + static constexpr int TileWidth = BlockWidth; + static constexpr int TileHeight = BlockHeight * RowsPerThread; + static constexpr int ThreadRowStride = BlockHeight; +}; + +template +__device__ __forceinline__ void load3_nhwc(const nvcv::cuda::Tensor3DWrap &src, EltT &C0, EltT &C1, + EltT &C2, int batch_idx, int x, int y) +{ + SrcT vec = *src.ptr(batch_idx, y, x); + C0 = vec.x; + C1 = vec.y; + C2 = vec.z; +} - T b = *src.ptr(batch_idx, dst_y, dst_x, bidx); - T g = *src.ptr(batch_idx, dst_y, dst_x, 1); - T r = *src.ptr(batch_idx, dst_y, dst_x, bidx ^ 2); +template +__device__ __forceinline__ void store3_nhwc(const nvcv::cuda::Tensor3DWrap &dst, EltT C0, EltT C1, + EltT C2, int batch_idx, int x, int y) +{ + DstT vec; + vec.x = C0; + vec.y = C1; + vec.z = C2; + *dst.ptr(batch_idx, y, x) = vec; +} - *dst.ptr(batch_idx, dst_y, dst_x, 0) = b; - *dst.ptr(batch_idx, dst_y, dst_x, 1) = g; - *dst.ptr(batch_idx, dst_y, dst_x, 2) = r; +template +__device__ __forceinline__ void load_bgra_nhwc(const nvcv::cuda::Tensor3DWrap &src, EltT &B, + EltT &G, EltT &R, EltT &A, int batch_idx, int x, int y, int bidx) +{ + SrcT vec = *src.ptr(batch_idx, y, x); + B = bidx == 0 ? vec.x : vec.z; + G = vec.y; + R = bidx == 0 ? vec.z : vec.x; + if constexpr (nvcv::cuda::NumComponents == 4) + { + A = vec.w; + } + else + { + A = std::is_floating_point_v ? EltT{1} : cuda::TypeTraits::max; + } +} - if (dch == 4) +template +__device__ __forceinline__ void store_bgra_nhwc(const nvcv::cuda::Tensor3DWrap &dst, EltT B, EltT G, + EltT R, EltT A, int batch_idx, int x, int y, int bidx) +{ + DstT vec; + vec.x = bidx == 0 ? B : R; + vec.y = G; + vec.z = bidx == 0 ? R : B; + if constexpr (nvcv::cuda::NumComponents == 4) { - T al = sch == 4 ? *src.ptr(batch_idx, dst_y, dst_x, 3) : cuda::TypeTraits::max; - *dst.ptr(batch_idx, dst_y, dst_x, 3) = al; + vec.w = A; } + *dst.ptr(batch_idx, y, x) = vec; } -template -__global__ void gray_to_bgr_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int dch) +template +__device__ __forceinline__ void color_conversion_common(LoadOpT load_op, ConvOpT conv_op, StoreOpT store_op, int2 size) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; + const int x = blockIdx.x * Policy::TileWidth + threadIdx.x; + const int y0 = blockIdx.y * Policy::TileHeight + threadIdx.y; const int batch_idx = get_batch_idx(); + if (x >= size.x) + { + return; + } - T g = *src.ptr(batch_idx, dst_y, dst_x, 0); + /* Branchless efficient path for inner blocks. */ + const bool is_inner = y0 + Policy::TileHeight <= size.y; + if (is_inner) + { + EltT r_in[Policy::RowsPerThread][N_IN]; + EltT r_out[Policy::RowsPerThread][N_OUT]; - *dst.ptr(batch_idx, dst_y, dst_x, 0) = g; - *dst.ptr(batch_idx, dst_y, dst_x, 1) = g; - *dst.ptr(batch_idx, dst_y, dst_x, 2) = g; - if (dch == 4) +#pragma unroll + for (int i = 0; i < Policy::RowsPerThread; i++) + { + const int y = y0 + Policy::ThreadRowStride * i; + load_op(r_in[i], batch_idx, x, y); + } +#pragma unroll + for (int i = 0; i < Policy::RowsPerThread; i++) + { + conv_op(r_in[i], r_out[i]); + } +#pragma unroll + for (int i = 0; i < Policy::RowsPerThread; i++) + { + const int y = y0 + Policy::ThreadRowStride * i; + store_op(r_out[i], batch_idx, x, y); + } + } + else { - *dst.ptr(batch_idx, dst_y, dst_x, 3) = g; + int y = y0; + for (int i = 0; i < Policy::RowsPerThread && y < size.y; i++) + { + EltT r_in[N_IN]; + EltT r_out[N_OUT]; + + load_op(r_in, batch_idx, x, y); + conv_op(r_in, r_out); + store_op(r_out, batch_idx, x, y); + + y += Policy::ThreadRowStride; + } } } -template -__global__ void bgr_to_gray_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int bidx) +template +__global__ __launch_bounds__(Policy::BlockSize) void rgb_to_bgr_nhwc( + const nvcv::cuda::Tensor3DWrap src, const nvcv::cuda::Tensor3DWrap dst, + int2 dstSize, int bidx) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); - int b = *src.ptr(batch_idx, dst_y, dst_x, bidx); - int g = *src.ptr(batch_idx, dst_y, dst_x, 1); - int r = *src.ptr(batch_idx, dst_y, dst_x, bidx ^ 2); - - T gray = (T)CV_DESCALE(b * BY15 + g * GY15 + r * RY15, gray_shift); - *dst.ptr(batch_idx, dst_y, dst_x, 0) = gray; + using EltT = nvcv::cuda::BaseType; + color_conversion_common( + [&src, bidx] __device__(EltT(&r_in)[4], int batch_idx, int x, int y) + { load_bgra_nhwc(src, r_in[0], r_in[1], r_in[2], r_in[3], batch_idx, x, y, bidx); }, + [] __device__(const EltT(&r_in)[4], EltT(&r_out)[4]) + { +#pragma unroll + for (int i = 0; i < 4; i++) + { + r_out[i] = r_in[i]; + } + }, + [&dst] __device__(const EltT(&r_out)[4], int batch_idx, int x, int y) + { store_bgra_nhwc(dst, r_out[0], r_out[1], r_out[2], r_out[3], batch_idx, x, y, 0); }, dstSize); } -template -__global__ void bgr_to_gray_float_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int bidx) +template +__global__ __launch_bounds__(Policy::BlockSize) void gray_to_bgr_nhwc( + const nvcv::cuda::Tensor3DWrap src, const nvcv::cuda::Tensor3DWrap dst, + int2 dstSize) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); - T b = *src.ptr(batch_idx, dst_y, dst_x, bidx); - T g = *src.ptr(batch_idx, dst_y, dst_x, 1); - T r = *src.ptr(batch_idx, dst_y, dst_x, bidx ^ 2); + using EltT = nvcv::cuda::BaseType; + color_conversion_common( + [&src] __device__(EltT(&r_gray)[1], int batch_idx, int x, int y) { r_gray[0] = *src.ptr(batch_idx, y, x); }, + [] __device__(const EltT(&r_gray)[1], EltT(&r_BGRA)[4]) + { +#pragma unroll + for (int i = 0; i < 4; i++) + { + r_BGRA[i] = r_gray[0]; + } + }, + [&dst] __device__(const EltT(&r_BGRA)[4], int batch_idx, int x, int y) + { store_bgra_nhwc(dst, r_BGRA[0], r_BGRA[1], r_BGRA[2], r_BGRA[3], batch_idx, x, y, 0); }, dstSize); +} - T gray = (T)(b * B2YF + g * G2YF + r * R2YF); - *dst.ptr(batch_idx, dst_y, dst_x, 0) = gray; +template +__global__ __launch_bounds__(Policy::BlockSize) void bgr_to_gray_nhwc( + const nvcv::cuda::Tensor3DWrap src, const nvcv::cuda::Tensor3DWrap dst, + int2 dstSize, int bidx) +{ + using EltT = nvcv::cuda::BaseType; + color_conversion_common( + [&src, bidx] __device__(EltT(&r_BGR)[3], int batch_idx, int x, int y) + { + EltT A; + load_bgra_nhwc(src, r_BGR[0], r_BGR[1], r_BGR[2], A, batch_idx, x, y, bidx); + }, + [] __device__(const EltT(&r_BGR)[3], EltT(&r_gray)[1]) + { + if constexpr (std::is_integral_v) + { + r_gray[0] + = (EltT)CV_DESCALE((int)r_BGR[0] * BY15 + (int)r_BGR[1] * GY15 + (int)r_BGR[2] * RY15, gray_shift); + } + else + { + r_gray[0] = (EltT)(r_BGR[0] * B2YF + r_BGR[1] * G2YF + r_BGR[2] * R2YF); + } + }, + [&dst] __device__(const EltT(&r_gray)[1], int batch_idx, int x, int y) + { *dst.ptr(batch_idx, y, x) = r_gray[0]; }, dstSize); } -template -__global__ void bgr_to_yuv_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int bidx) +template +__device__ __forceinline__ void bgr_to_yuv_int(T B_, T G_, T R_, T &Y_, T &Cb_, T &Cr_) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); - int B = *src.ptr(batch_idx, dst_y, dst_x, bidx); - int G = *src.ptr(batch_idx, dst_y, dst_x, 1); - int R = *src.ptr(batch_idx, dst_y, dst_x, bidx ^ 2); - - int C0 = R2Y, C1 = G2Y, C2 = B2Y, C3 = R2VI, C4 = B2UI; - int delta = ((T)(cuda::TypeTraits::max / 2 + 1)) * (1 << yuv_shift); - int Y = CV_DESCALE(R * C0 + G * C1 + B * C2, yuv_shift); - int Cr = CV_DESCALE((R - Y) * C3 + delta, yuv_shift); - int Cb = CV_DESCALE((B - Y) * C4 + delta, yuv_shift); - - *dst.ptr(batch_idx, dst_y, dst_x, 0) = cuda::SaturateCast(Y); - *dst.ptr(batch_idx, dst_y, dst_x, 1) = cuda::SaturateCast(Cb); - *dst.ptr(batch_idx, dst_y, dst_x, 2) = cuda::SaturateCast(Cr); + constexpr int C0 = R2Y, C1 = G2Y, C2 = B2Y, C3 = R2VI, C4 = B2UI; + constexpr int delta = ((T)(cuda::TypeTraits::max / 2 + 1)) * (1 << yuv_shift); + + const int B = B_, G = G_, R = R_; + + const int Y = CV_DESCALE(R * C0 + G * C1 + B * C2, yuv_shift); + const int Cr = CV_DESCALE((R - Y) * C3 + delta, yuv_shift); + const int Cb = CV_DESCALE((B - Y) * C4 + delta, yuv_shift); + + Y_ = cuda::SaturateCast(Y); + Cb_ = cuda::SaturateCast(Cb); + Cr_ = cuda::SaturateCast(Cr); } -template -__global__ void bgr_to_yuv_float_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int bidx) +__device__ __forceinline__ void bgr_to_yuv_float(float B, float G, float R, float &Y, float &Cb, float &Cr) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); - T B = *src.ptr(batch_idx, dst_y, dst_x, bidx); - T G = *src.ptr(batch_idx, dst_y, dst_x, 1); - T R = *src.ptr(batch_idx, dst_y, dst_x, bidx ^ 2); - - T C0 = R2YF, C1 = G2YF, C2 = B2YF, C3 = R2VF, C4 = B2UF; - T delta = 0.5f; - T Y = R * C0 + G * C1 + B * C2; - T Cr = (R - Y) * C3 + delta; - T Cb = (B - Y) * C4 + delta; - *dst.ptr(batch_idx, dst_y, dst_x, 0) = Y; - *dst.ptr(batch_idx, dst_y, dst_x, 1) = Cb; - *dst.ptr(batch_idx, dst_y, dst_x, 2) = Cr; + constexpr float C0 = R2YF, C1 = G2YF, C2 = B2YF, C3 = R2VF, C4 = B2UF; + constexpr float delta = 0.5f; + + Y = R * C0 + G * C1 + B * C2; + Cr = (R - Y) * C3 + delta; + Cb = (B - Y) * C4 + delta; } -template -__global__ void yuv_to_bgr_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int bidx) +template +__global__ __launch_bounds__(Policy::BlockSize) void bgr_to_yuv_nhwc( + const nvcv::cuda::Tensor3DWrap src, const nvcv::cuda::Tensor3DWrap dst, + int2 dstSize, int bidx) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); - T Y = *src.ptr(batch_idx, dst_y, dst_x, 0); - T Cb = *src.ptr(batch_idx, dst_y, dst_x, 1); - T Cr = *src.ptr(batch_idx, dst_y, dst_x, 2); - - int C0 = V2RI, C1 = V2GI, C2 = U2GI, C3 = U2BI; - int delta = ((T)(cuda::TypeTraits::max / 2 + 1)); - int b = Y + CV_DESCALE((Cb - delta) * C3, yuv_shift); - int g = Y + CV_DESCALE((Cb - delta) * C2 + (Cr - delta) * C1, yuv_shift); - int r = Y + CV_DESCALE((Cr - delta) * C0, yuv_shift); - - *dst.ptr(batch_idx, dst_y, dst_x, bidx) = cuda::SaturateCast(b); - *dst.ptr(batch_idx, dst_y, dst_x, 1) = cuda::SaturateCast(g); - *dst.ptr(batch_idx, dst_y, dst_x, bidx ^ 2) = cuda::SaturateCast(r); + using EltT = nvcv::cuda::BaseType; + color_conversion_common( + [&src, bidx] __device__(EltT(&r_BGR)[3], int batch_idx, int x, int y) + { + EltT A; + load_bgra_nhwc(src, r_BGR[0], r_BGR[1], r_BGR[2], A, batch_idx, x, y, bidx); + }, + [] __device__(const EltT(&r_BGR)[3], EltT(&r_YCbCr)[3]) + { + if constexpr (std::is_integral_v) + { + bgr_to_yuv_int(r_BGR[0], r_BGR[1], r_BGR[2], r_YCbCr[0], r_YCbCr[1], r_YCbCr[2]); + } + else + { + bgr_to_yuv_float(r_BGR[0], r_BGR[1], r_BGR[2], r_YCbCr[0], r_YCbCr[1], r_YCbCr[2]); + } + }, + [&dst] __device__(const EltT(&r_YCbCr)[3], int batch_idx, int x, int y) + { store3_nhwc(dst, r_YCbCr[0], r_YCbCr[1], r_YCbCr[2], batch_idx, x, y); }, dstSize); } -template -__global__ void yuv_to_bgr_float_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int bidx) +template +__device__ __forceinline__ void yuv_to_bgr_int(T Y_, T Cb_, T Cr_, T &B_, T &G_, T &R_) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); - T Y = *src.ptr(batch_idx, dst_y, dst_x, 0); - T Cb = *src.ptr(batch_idx, dst_y, dst_x, 1); - T Cr = *src.ptr(batch_idx, dst_y, dst_x, 2); + constexpr int C0 = V2RI, C1 = V2GI, C2 = U2GI, C3 = U2BI; + constexpr int delta = ((T)(cuda::TypeTraits::max / 2 + 1)); - T C0 = V2RF, C1 = V2GF, C2 = U2GF, C3 = U2BF; - T delta = 0.5f; - T b = Y + (Cb - delta) * C3; - T g = Y + (Cb - delta) * C2 + (Cr - delta) * C1; - T r = Y + (Cr - delta) * C0; + const int Y = Y_, Cb = Cb_, Cr = Cr_; + const int B = Y + CV_DESCALE((Cb - delta) * C3, yuv_shift); + const int G = Y + CV_DESCALE((Cb - delta) * C2 + (Cr - delta) * C1, yuv_shift); + const int R = Y + CV_DESCALE((Cr - delta) * C0, yuv_shift); - *dst.ptr(batch_idx, dst_y, dst_x, bidx) = b; - *dst.ptr(batch_idx, dst_y, dst_x, 1) = g; - *dst.ptr(batch_idx, dst_y, dst_x, bidx ^ 2) = r; + B_ = cuda::SaturateCast(B); + G_ = cuda::SaturateCast(G); + R_ = cuda::SaturateCast(R); } -template -__global__ void bgr_to_hsv_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int bidx, bool isFullRange) +__device__ __forceinline__ void yuv_to_bgr_float(float Y, float Cb, float Cr, float &B, float &G, float &R) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); + constexpr float C0 = V2RF, C1 = V2GF, C2 = U2GF, C3 = U2BF; + constexpr float delta = 0.5f; + + B = Y + (Cb - delta) * C3; + G = Y + (Cb - delta) * C2 + (Cr - delta) * C1; + R = Y + (Cr - delta) * C0; +} + +template +__global__ __launch_bounds__(Policy::BlockSize) void yuv_to_bgr_nhwc( + const nvcv::cuda::Tensor3DWrap src, const nvcv::cuda::Tensor3DWrap dst, + int2 dstSize, int bidx) +{ + using EltT = nvcv::cuda::BaseType; + color_conversion_common( + [&src] __device__(EltT(&r_YCbCr)[3], int batch_idx, int x, int y) + { load3_nhwc(src, r_YCbCr[0], r_YCbCr[1], r_YCbCr[2], batch_idx, x, y); }, + [] __device__(const EltT(&r_YCbCr)[3], EltT(&r_BGR)[3]) + { + if constexpr (std::is_integral_v) + { + yuv_to_bgr_int(r_YCbCr[0], r_YCbCr[1], r_YCbCr[2], r_BGR[0], r_BGR[1], r_BGR[2]); + } + else + { + yuv_to_bgr_float(r_YCbCr[0], r_YCbCr[1], r_YCbCr[2], r_BGR[0], r_BGR[1], r_BGR[2]); + } + }, + [&dst, bidx] __device__(const EltT(&r_BGR)[3], int batch_idx, int x, int y) + { store_bgra_nhwc(dst, r_BGR[0], r_BGR[1], r_BGR[2], EltT{0}, batch_idx, x, y, bidx); }, dstSize); +} - int b = *src.ptr(batch_idx, dst_y, dst_x, bidx); - int g = *src.ptr(batch_idx, dst_y, dst_x, 1); - int r = *src.ptr(batch_idx, dst_y, dst_x, bidx ^ 2); - int hrange = isFullRange ? 256 : 180; - int hr = hrange; +__device__ __forceinline__ void bgr_to_hsv_uchar(uchar b8, uchar g8, uchar r8, uchar &h8, uchar &s8, uchar &v8, + bool isFullRange) +{ + const int hrange = isFullRange ? 256 : 180; const int hsv_shift = 12; - int h, s, v = b; - int vmin = b; - int vr, vg; - - v = cuda::max(v, g); - v = cuda::max(v, r); - vmin = cuda::min(vmin, g); - vmin = cuda::min(vmin, r); - - unsigned char diff = cuda::SaturateCast(v - vmin); - vr = v == r ? -1 : 0; - vg = v == g ? -1 : 0; - - int hdiv_table = diff == 0 ? 0 : cuda::SaturateCast((hrange << hsv_shift) / (6. * diff)); - int sdiv_table = v == 0 ? 0 : cuda::SaturateCast((255 << hsv_shift) / (1. * v)); - s = (diff * sdiv_table + (1 << (hsv_shift - 1))) >> hsv_shift; - h = (vr & (g - b)) + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff)))); - h = (h * hdiv_table + (1 << (hsv_shift - 1))) >> hsv_shift; - h += h < 0 ? hr : 0; - - *dst.ptr(batch_idx, dst_y, dst_x, 0) = cuda::SaturateCast(h); - *dst.ptr(batch_idx, dst_y, dst_x, 1) = (unsigned char)s; - *dst.ptr(batch_idx, dst_y, dst_x, 2) = (unsigned char)v; + + const int b = (int)b8; + const int g = (int)g8; + const int r = (int)r8; + + const int vmin = cuda::min(b, cuda::min(g, r)); + const int v = cuda::max(b, cuda::max(g, r)); + + const int diff = v - vmin; + const int vr = v == r ? -1 : 0; + const int vg = v == g ? -1 : 0; + + const int hdiv_table = diff == 0 ? 0 : cuda::SaturateCast((hrange << hsv_shift) / (6.f * diff)); + const int sdiv_table = v == 0 ? 0 : cuda::SaturateCast((255 << hsv_shift) / (float)v); + const int s = (diff * sdiv_table + (1 << (hsv_shift - 1))) >> hsv_shift; + int h = (vr & (g - b)) + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff)))); + h = (h * hdiv_table + (1 << (hsv_shift - 1))) >> hsv_shift; + h += h < 0 ? hrange : 0; + + h8 = cuda::SaturateCast(h); + s8 = (unsigned char)s; + v8 = (unsigned char)v; } -template -__global__ void bgr_to_hsv_float_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int bidx) +__device__ __forceinline__ void bgr_to_hsv_float(float b, float g, float r, float &h, float &s, float &v, + bool isFullRange) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); + constexpr float hrange = 360.f; + constexpr float hscale = hrange * (1.f / 360.f); - float b = *src.ptr(batch_idx, dst_y, dst_x, bidx); - float g = *src.ptr(batch_idx, dst_y, dst_x, 1); - float r = *src.ptr(batch_idx, dst_y, dst_x, bidx ^ 2); - float h, s, v; - float hrange = 360.0; - float hscale = hrange * (1.f / 360.f); - - float vmin, diff; - - v = vmin = r; - if (v < g) - v = g; - if (v < b) - v = b; - if (vmin > g) - vmin = g; - if (vmin > b) - vmin = b; - - diff = v - vmin; - s = diff / (float)(fabs(v) + FLT_EPSILON); - diff = (float)(60. / (diff + FLT_EPSILON)); + float vmin = min(r, min(g, b)); + v = max(r, max(g, b)); + + float diff = v - vmin; + s = diff / (fabs(v) + FLT_EPSILON); + diff = 60.f / (diff + FLT_EPSILON); if (v == r) h = (g - b) * diff; else if (v == g) @@ -324,107 +420,116 @@ __global__ void bgr_to_hsv_float_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSi else h = (r - g) * diff + 240.f; - if (h < 0) + if (h < 0.f) h += 360.f; - *dst.ptr(batch_idx, dst_y, dst_x, 0) = h * hscale; - *dst.ptr(batch_idx, dst_y, dst_x, 1) = s; - *dst.ptr(batch_idx, dst_y, dst_x, 2) = v; + h = h * hscale; } -__device__ inline void HSV2RGB_native(float h, float s, float v, float &b, float &g, float &r, const float hscale) +template +__global__ __launch_bounds__(Policy::BlockSize) void bgr_to_hsv_nhwc( + const nvcv::cuda::Tensor3DWrap src, const nvcv::cuda::Tensor3DWrap dst, + int2 dstSize, int bidx, bool isFullRange) { - if (s == 0) + using EltT = nvcv::cuda::BaseType; + color_conversion_common( + [&src, bidx] __device__(EltT(&r_BGR)[3], int batch_idx, int x, int y) + { + EltT A; + load_bgra_nhwc(src, r_BGR[0], r_BGR[1], r_BGR[2], A, batch_idx, x, y, bidx); + }, + [isFullRange] __device__(const EltT(&r_BGR)[3], EltT(&r_HSV)[3]) + { + if constexpr (std::is_integral_v) + { + bgr_to_hsv_uchar(r_BGR[0], r_BGR[1], r_BGR[2], r_HSV[0], r_HSV[1], r_HSV[2], isFullRange); + } + else + { + bgr_to_hsv_float(r_BGR[0], r_BGR[1], r_BGR[2], r_HSV[0], r_HSV[1], r_HSV[2], isFullRange); + } + }, + [&dst] __device__(const EltT(&r_HSV)[3], int batch_idx, int x, int y) + { store3_nhwc(dst, r_HSV[0], r_HSV[1], r_HSV[2], batch_idx, x, y); }, dstSize); +} + +template +__device__ __forceinline__ T select4_reg(const T (&tab)[4], int idx) +{ + // Random access in a register array of size 4, with 6 instructions. + // The compiler was generating 10 instructions for tab[idx]. + T out; + out = idx == 1 ? tab[1] : tab[0]; + out = idx == 2 ? tab[2] : out; + out = idx == 3 ? tab[3] : out; + return out; +} + +__device__ __forceinline__ void hsv_to_bgr_float(float h, float s, float v, float &b, float &g, float &r, + const float hscale) +{ + if (s == 0.f) b = g = r = v; else { - static const int sector_data[][3] = { - {1, 3, 0}, - {1, 0, 2}, - {3, 0, 1}, - {0, 2, 1}, - {0, 1, 3}, - {2, 1, 0} - }; - float tab[4]; - int sector; h *= hscale; - h = fmod(h, 6.f); - sector = (int)floor(h); - h -= sector; - if ((unsigned)sector >= 6u) - { - sector = 0; - h = 0.f; - } + int hi = (int)h; + int sector = hi % 6; + h -= hi; + float tab[4]; tab[0] = v; tab[1] = v * (1.f - s); tab[2] = v * (1.f - s * h); tab[3] = v * (1.f - s * (1.f - h)); - b = tab[sector_data[sector][0]]; - g = tab[sector_data[sector][1]]; - r = tab[sector_data[sector][2]]; + constexpr int32_t sector_lut_b = 0x00200311; + constexpr int32_t sector_lut_g = 0x00112003; + constexpr int32_t sector_lut_r = 0x00031120; + const int sector_data_b = (sector_lut_b >> (4 * sector)) & 0xf; + const int sector_data_g = (sector_lut_g >> (4 * sector)) & 0xf; + const int sector_data_r = (sector_lut_r >> (4 * sector)) & 0xf; + b = select4_reg(tab, sector_data_b); + g = select4_reg(tab, sector_data_g); + r = select4_reg(tab, sector_data_r); } } -template -__global__ void hsv_to_bgr_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int bidx, int dcn, bool isFullRange) -{ - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); - - float h = *src.ptr(batch_idx, dst_y, dst_x, 0); - float s = *src.ptr(batch_idx, dst_y, dst_x, 1) * (1.0f / 255.0f); - float v = *src.ptr(batch_idx, dst_y, dst_x, 2) * (1.0f / 255.0f); - - float hrange = isFullRange ? 255 : 180; - unsigned char alpha = cuda::TypeTraits::max; - float hs = 6.f / hrange; - - float b, g, r; - HSV2RGB_native(h, s, v, b, g, r, hs); - - *dst.ptr(batch_idx, dst_y, dst_x, bidx) = cuda::SaturateCast(b * 255.0f); - *dst.ptr(batch_idx, dst_y, dst_x, 1) = cuda::SaturateCast(g * 255.0f); - *dst.ptr(batch_idx, dst_y, dst_x, bidx ^ 2) = cuda::SaturateCast(r * 255.0f); - if (dcn == 4) - *dst.ptr(batch_idx, dst_y, dst_x, 3) = alpha; -} - -template -__global__ void hsv_to_bgr_float_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int bidx, int dcn) +template +__global__ __launch_bounds__(Policy::BlockSize) void hsv_to_bgr_nhwc( + const nvcv::cuda::Tensor3DWrap src, const nvcv::cuda::Tensor3DWrap dst, + int2 dstSize, int bidx, bool isFullRange) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); - - float h = *src.ptr(batch_idx, dst_y, dst_x, 0); - float s = *src.ptr(batch_idx, dst_y, dst_x, 1); - float v = *src.ptr(batch_idx, dst_y, dst_x, 2); - - float hrange = 360.0; - float alpha = 1.f; - float hs = 6.f / hrange; - - float b, g, r; - HSV2RGB_native(h, s, v, b, g, r, hs); - - *dst.ptr(batch_idx, dst_y, dst_x, bidx) = b; - *dst.ptr(batch_idx, dst_y, dst_x, 1) = g; - *dst.ptr(batch_idx, dst_y, dst_x, bidx ^ 2) = r; - if (dcn == 4) - *dst.ptr(batch_idx, dst_y, dst_x, 3) = alpha; + using EltT = nvcv::cuda::BaseType; + color_conversion_common( + [&src] __device__(EltT(&r_HSV)[3], int batch_idx, int x, int y) + { load3_nhwc(src, r_HSV[0], r_HSV[1], r_HSV[2], batch_idx, x, y); }, + [isFullRange] __device__(const EltT(&r_HSV)[3], EltT(&r_BGR)[3]) + { + if constexpr (std::is_same_v) + { + const float hs = isFullRange ? (6.f / 255.f) : (6.f / 180.f); + float Bf, Gf, Rf; + hsv_to_bgr_float((float)r_HSV[0], r_HSV[1] / 255.f, r_HSV[2] / 255.f, Bf, Gf, Rf, hs); + r_BGR[0] = cuda::SaturateCast(Bf * 255.f); + r_BGR[1] = cuda::SaturateCast(Gf * 255.f); + r_BGR[2] = cuda::SaturateCast(Rf * 255.f); + } + else + { + constexpr float hs = 6.f / 360.f; + hsv_to_bgr_float(r_HSV[0], r_HSV[1], r_HSV[2], r_BGR[0], r_BGR[1], r_BGR[2], hs); + } + }, + [&dst, bidx] __device__(const EltT(&r_BGR)[3], int batch_idx, int x, int y) + { + constexpr EltT alpha = std::is_floating_point_v ? EltT{1} : cuda::TypeTraits::max; + store_bgra_nhwc(dst, r_BGR[0], r_BGR[1], r_BGR[2], alpha, batch_idx, x, y, bidx); + }, + dstSize); } -__device__ __forceinline__ void yuv42xxp_to_bgr_kernel(const int &Y, const int &U, const int &V, uchar &r, uchar &g, - uchar &b) +__device__ __forceinline__ void yuv42xxp_to_bgr(const int &Y, const int &U, const int &V, uchar &r, uchar &g, uchar &b) { //R = 1.164(Y - 16) + 1.596(V - 128) //G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) @@ -446,8 +551,8 @@ __device__ __forceinline__ void yuv42xxp_to_bgr_kernel(const int &Y, const int & b = cuda::SaturateCast(CV_DESCALE((yy + C4 * uu), yuv4xx_shift)); } -__device__ __forceinline__ void bgr_to_yuv42xxp_kernel(const uchar &r, const uchar &g, const uchar &b, uchar &Y, - uchar &U, uchar &V) +__device__ __forceinline__ void bgr_to_yuv42xxp(const uchar &r, const uchar &g, const uchar &b, uchar &Y, uchar &U, + uchar &V) { const int shifted16 = (16 << ITUR_BT_601_SHIFT); const int halfShift = (1 << (ITUR_BT_601_SHIFT - 1)); @@ -463,119 +568,102 @@ __device__ __forceinline__ void bgr_to_yuv42xxp_kernel(const uchar &r, const uch V = cuda::SaturateCast(vv >> ITUR_BT_601_SHIFT); } -template -__global__ void bgr_to_yuv420p_char_nhwc(SrcWrapper src, DstWrapper dst, int2 srcSize, int scn, int bidx, int uidx) +template +__device__ __forceinline__ void store_yuv420(const nvcv::cuda::Tensor4DWrap &dst, EltT Y, EltT U, EltT V, + int2 srcSize, int batch_idx, int x, int y, int uidx) { - int src_x = blockIdx.x * blockDim.x + threadIdx.x; - int src_y = blockIdx.y * blockDim.y + threadIdx.y; - if (src_x >= srcSize.x || src_y >= srcSize.y) - return; - const int batch_idx = get_batch_idx(); - int plane_y_step = srcSize.y * srcSize.x; - int plane_uv_step = plane_y_step / 4; - int uv_x = (src_y % 4 < 2) ? src_x / 2 : (src_x / 2 + srcSize.x / 2); - - uchar b = static_cast(*src.ptr(batch_idx, src_y, src_x, bidx)); - uchar g = static_cast(*src.ptr(batch_idx, src_y, src_x, 1)); - uchar r = static_cast(*src.ptr(batch_idx, src_y, src_x, bidx ^ 2)); - // Ignore gray channel if input is RGBA - - uchar Y{0}, U{0}, V{0}; - bgr_to_yuv42xxp_kernel(r, g, b, Y, U, V); + if constexpr (IsSemiPlanar) + { + const int uv_x = (x % 2 == 0) ? x : (x - 1); - *dst.ptr(batch_idx, src_y, src_x, 0) = Y; - if (src_y % 2 == 0 && src_x % 2 == 0) + *dst.ptr(batch_idx, y, x, 0) = Y; + if (y % 2 == 0 && x % 2 == 0) + { + *dst.ptr(batch_idx, srcSize.y + y / 2, uv_x + uidx) = U; + *dst.ptr(batch_idx, srcSize.y + y / 2, uv_x + (1 - uidx)) = V; + } + } + else { - *dst.ptr(batch_idx, srcSize.y + src_y / 4, uv_x + plane_uv_step * uidx) = U; - *dst.ptr(batch_idx, srcSize.y + src_y / 4, uv_x + plane_uv_step * (1 - uidx)) = V; + const int plane_y_step = srcSize.y * srcSize.x; + const int plane_uv_step = plane_y_step / 4; + const int uv_x = (y % 4 < 2) ? x / 2 : (x / 2 + srcSize.x / 2); + + *dst.ptr(batch_idx, y, x, 0) = Y; + if (y % 2 == 0 && x % 2 == 0) + { + *dst.ptr(batch_idx, srcSize.y + y / 4, uv_x + plane_uv_step * uidx) = U; + *dst.ptr(batch_idx, srcSize.y + y / 4, uv_x + plane_uv_step * (1 - uidx)) = V; + } } } -template -__global__ void bgr_to_yuv420sp_char_nhwc(SrcWrapper src, DstWrapper dst, int2 srcSize, int scn, int bidx, int uidx) +template +__global__ void bgr_to_yuv420_char_nhwc(const nvcv::cuda::Tensor3DWrap src, + const nvcv::cuda::Tensor4DWrap dst, int2 srcSize, int bidx, + int uidx) { - int src_x = blockIdx.x * blockDim.x + threadIdx.x; - int src_y = blockIdx.y * blockDim.y + threadIdx.y; - if (src_x >= srcSize.x || src_y >= srcSize.y) - return; - const int batch_idx = get_batch_idx(); - int uv_x = (src_x % 2 == 0) ? src_x : (src_x - 1); - - uchar b = static_cast(*src.ptr(batch_idx, src_y, src_x, bidx)); - uchar g = static_cast(*src.ptr(batch_idx, src_y, src_x, 1)); - uchar r = static_cast(*src.ptr(batch_idx, src_y, src_x, bidx ^ 2)); - // Ignore gray channel if input is RGBA - - uchar Y{0}, U{0}, V{0}; - bgr_to_yuv42xxp_kernel(r, g, b, Y, U, V); - - *dst.ptr(batch_idx, src_y, src_x, 0) = Y; - if (src_y % 2 == 0 && src_x % 2 == 0) - { - *dst.ptr(batch_idx, srcSize.y + src_y / 2, uv_x + uidx) = U; - *dst.ptr(batch_idx, srcSize.y + src_y / 2, uv_x + (1 - uidx)) = V; - } + static_assert(std::is_same_v, EltT>); + color_conversion_common( + [&src, bidx] __device__(EltT(&r_BGR)[3], int batch_idx, int x, int y) + { + EltT A; + load_bgra_nhwc(src, r_BGR[0], r_BGR[1], r_BGR[2], A, batch_idx, x, y, bidx); + }, + [] __device__(const EltT(&r_BGR)[3], EltT(&r_YUV)[3]) + { bgr_to_yuv42xxp(r_BGR[0], r_BGR[1], r_BGR[2], r_YUV[0], r_YUV[1], r_YUV[2]); }, + [&dst, uidx, srcSize] __device__(const EltT(&r_YUV)[3], int batch_idx, int x, int y) + { store_yuv420(dst, r_YUV[0], r_YUV[1], r_YUV[2], srcSize, batch_idx, x, y, uidx); }, srcSize); } -template -__global__ void yuv420sp_to_bgr_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int dcn, int bidx, int uidx) +template +__device__ __forceinline__ void load_yuv420(const nvcv::cuda::Tensor4DWrap &src, EltT &Y, EltT &U, + EltT &V, int2 dstSize, int batch_idx, int x, int y, int uidx) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); - int uv_x = (dst_x % 2 == 0) ? dst_x : (dst_x - 1); - - T Y = *src.ptr(batch_idx, dst_y, dst_x, 0); - T U = *src.ptr(batch_idx, dstSize.y + dst_y / 2, uv_x + uidx); - T V = *src.ptr(batch_idx, dstSize.y + dst_y / 2, uv_x + 1 - uidx); - - uchar r{0}, g{0}, b{0}, a{0xff}; - yuv42xxp_to_bgr_kernel(int(Y), int(U), int(V), r, g, b); + if constexpr (IsSemiPlanar) + { + const int uv_x = (x % 2 == 0) ? x : (x - 1); - *dst.ptr(batch_idx, dst_y, dst_x, bidx) = b; - *dst.ptr(batch_idx, dst_y, dst_x, 1) = g; - *dst.ptr(batch_idx, dst_y, dst_x, bidx ^ 2) = r; - if (dcn == 4) + Y = *src.ptr(batch_idx, y, x, 0); + U = *src.ptr(batch_idx, dstSize.y + y / 2, uv_x + uidx); + V = *src.ptr(batch_idx, dstSize.y + y / 2, uv_x + 1 - uidx); + } + else { - *dst.ptr(batch_idx, dst_y, dst_x, 3) = a; + const int plane_y_step = dstSize.y * dstSize.x; + const int plane_uv_step = plane_y_step / 4; + const int uv_x = (y % 4 < 2) ? x / 2 : (x / 2 + dstSize.x / 2); + + Y = *src.ptr(batch_idx, y, x, 0); + U = *src.ptr(batch_idx, dstSize.y + y / 4, uv_x + plane_uv_step * uidx); + V = *src.ptr(batch_idx, dstSize.y + y / 4, uv_x + plane_uv_step * (1 - uidx)); } } -template -__global__ void yuv420p_to_bgr_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int dcn, int bidx, int uidx) +template +__global__ __launch_bounds__(Policy::BlockSize) void yuv420_to_bgr_char_nhwc( + const nvcv::cuda::Tensor4DWrap src, const nvcv::cuda::Tensor3DWrap dst, + int2 dstSize, int bidx, int uidx) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - - const int batch_idx = get_batch_idx(); - int plane_y_step = dstSize.y * dstSize.x; - int plane_uv_step = plane_y_step / 4; - int uv_x = (dst_y % 4 < 2) ? dst_x / 2 : (dst_x / 2 + dstSize.x / 2); - - T Y = *src.ptr(batch_idx, dst_y, dst_x, 0); - T U = *src.ptr(batch_idx, dstSize.y + dst_y / 4, uv_x + plane_uv_step * uidx); - T V = *src.ptr(batch_idx, dstSize.y + dst_y / 4, uv_x + plane_uv_step * (1 - uidx)); - - uchar r{0}, g{0}, b{0}, a{0xff}; - yuv42xxp_to_bgr_kernel(int(Y), int(U), int(V), r, g, b); - - *dst.ptr(batch_idx, dst_y, dst_x, bidx) = b; - *dst.ptr(batch_idx, dst_y, dst_x, 1) = g; - *dst.ptr(batch_idx, dst_y, dst_x, bidx ^ 2) = r; - if (dcn == 4) - { - *dst.ptr(batch_idx, dst_y, dst_x, 3) = a; - } + static_assert(std::is_same_v, EltT>); + color_conversion_common( + [&src, uidx, dstSize] __device__(EltT(&r_YUV)[3], int batch_idx, int x, int y) + { load_yuv420(src, r_YUV[0], r_YUV[1], r_YUV[2], dstSize, batch_idx, x, y, uidx); }, + [] __device__(const EltT(&r_YUV)[3], EltT(&r_BGR)[3]) + { + yuv42xxp_to_bgr(static_cast(r_YUV[0]), static_cast(r_YUV[1]), static_cast(r_YUV[2]), + r_BGR[0], r_BGR[1], r_BGR[2]); + }, + [&dst, bidx] __device__(const EltT(&r_BGR)[3], int batch_idx, int x, int y) + { store_bgra_nhwc(dst, r_BGR[0], r_BGR[1], r_BGR[2], EltT{0xffu}, batch_idx, x, y, bidx); }, dstSize); } -template +template __global__ void yuv422_to_bgr_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int dcn, int bidx, int yidx, int uidx) { + using T = typename SrcWrapper::ValueType; + int dst_x = blockIdx.x * blockDim.x + threadIdx.x; int dst_y = blockIdx.y * blockDim.y + threadIdx.y; if (dst_x >= dstSize.x || dst_y >= dstSize.y) @@ -588,7 +676,7 @@ __global__ void yuv422_to_bgr_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dst T V = *src.ptr(batch_idx, dst_y, uv_x, (1 - yidx) + uidx ^ 2); uchar r{0}, g{0}, b{0}, a{0xff}; - yuv42xxp_to_bgr_kernel(int(Y), int(U), int(V), r, g, b); + yuv42xxp_to_bgr(int(Y), int(U), int(V), r, g, b); *dst.ptr(batch_idx, dst_y, dst_x, bidx) = b; *dst.ptr(batch_idx, dst_y, dst_x, 1) = g; @@ -600,27 +688,34 @@ __global__ void yuv422_to_bgr_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dst } template -__global__ void yuv420_to_gray_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize) +__global__ void yuv422_to_gray_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int yidx) { int dst_x = blockIdx.x * blockDim.x + threadIdx.x; int dst_y = blockIdx.y * blockDim.y + threadIdx.y; if (dst_x >= dstSize.x || dst_y >= dstSize.y) return; const int batch_idx = get_batch_idx(); - T Y = *src.ptr(batch_idx, dst_y, dst_x, 0); + T Y = *src.ptr(batch_idx, dst_y, dst_x, yidx); *dst.ptr(batch_idx, dst_y, dst_x, 0) = Y; } -template -__global__ void yuv422_to_gray_char_nhwc(SrcWrapper src, DstWrapper dst, int2 dstSize, int yidx) +template +inline ErrorCode Launch_BGR_to_RGB(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, + NVCVColorConversionCode code, cuda_op::DataShape shape, int bidx, + cudaStream_t stream) { - int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - if (dst_x >= dstSize.x || dst_y >= dstSize.y) - return; - const int batch_idx = get_batch_idx(); - T Y = *src.ptr(batch_idx, dst_y, dst_x, yidx); - *dst.ptr(batch_idx, dst_y, dst_x, 0) = Y; + using Policy = CvtKernelPolicy<32, 4, 4>; + + dim3 blockSize(Policy::BlockWidth, Policy::BlockHeight); + dim3 gridSize(divUp(shape.W, Policy::TileWidth), divUp(shape.H, Policy::TileHeight), shape.N); + int2 dstSize{shape.W, shape.H}; + + auto srcWrap = cuda::CreateTensorWrapNHW(inData); + auto dstWrap = cuda::CreateTensorWrapNHW(outData); + rgb_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); + checkKernelErrors(); + + return ErrorCode::SUCCESS; } inline ErrorCode BGR_to_RGB(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, @@ -662,57 +757,55 @@ inline ErrorCode BGR_to_RGB(const TensorDataStridedCuda &inData, const TensorDat return ErrorCode::INVALID_DATA_SHAPE; } - dim3 blockSize(BLOCK, BLOCK / 4, 1); - dim3 gridSize(divUp(inputShape.W, blockSize.x), divUp(inputShape.H, blockSize.y), inputShape.N); - - int2 dstSize{outputShape.W, outputShape.H}; +#define CVCUDA_BGR2RGB_IF(SCH, DCH, SRC_T, DST_T) \ + if (sch == SCH && dch == DCH) \ + return Launch_BGR_to_RGB(inData, outData, code, inputShape, bidx, stream) switch (inDataType) { +#define CVCUDA_BGR2RGB_CASE(T3, T4) \ + CVCUDA_BGR2RGB_IF(3, 3, T3, T3); \ + else CVCUDA_BGR2RGB_IF(3, 4, T3, T4); \ + else CVCUDA_BGR2RGB_IF(4, 3, T4, T3); \ + else CVCUDA_BGR2RGB_IF(4, 4, T4, T4); \ + else return ErrorCode::INVALID_DATA_SHAPE + case kCV_8U: case kCV_8S: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - rgb_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, sch, dch, bidx); - checkKernelErrors(); - } - break; + CVCUDA_BGR2RGB_CASE(uchar3, uchar4); case kCV_16U: case kCV_16F: case kCV_16S: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - rgb_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, sch, dch, bidx); - checkKernelErrors(); - } - break; + CVCUDA_BGR2RGB_CASE(ushort3, ushort4); case kCV_32S: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - rgb_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, sch, dch, bidx); - checkKernelErrors(); - } - break; + CVCUDA_BGR2RGB_CASE(int3, int4); case kCV_32F: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - rgb_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, sch, dch, bidx); - checkKernelErrors(); - } - break; + CVCUDA_BGR2RGB_CASE(float3, float4); case kCV_64F: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - rgb_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, sch, dch, bidx); - checkKernelErrors(); - } - break; + CVCUDA_BGR2RGB_CASE(double3, double4); + +#undef CVCUDA_BGR2RGB_CASE } +#undef CVCUDA_BGR2RGB_IF + return ErrorCode::SUCCESS; +} + +template +inline ErrorCode Launch_GRAY_to_BGR(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, + cuda_op::DataShape shape, cudaStream_t stream) +{ + using Policy = CvtKernelPolicy<32, 4, 8>; + + dim3 blockSize(Policy::BlockWidth, Policy::BlockHeight); + dim3 gridSize(divUp(shape.W, Policy::TileWidth), divUp(shape.H, Policy::TileHeight), shape.N); + + int2 dstSize{shape.W, shape.H}; + + auto srcWrap = cuda::CreateTensorWrapNHW(inData); + auto dstWrap = cuda::CreateTensorWrapNHW(outData); + gray_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize); + checkKernelErrors(); + return ErrorCode::SUCCESS; } @@ -750,57 +843,53 @@ inline ErrorCode GRAY_to_BGR(const TensorDataStridedCuda &inData, const TensorDa return ErrorCode::INVALID_DATA_SHAPE; } - dim3 blockSize(BLOCK, BLOCK / 4, 1); - dim3 gridSize(divUp(inputShape.W, blockSize.x), divUp(inputShape.H, blockSize.y), inputShape.N); - - int2 dstSize{outputShape.W, outputShape.H}; +#define CVCUDA_GRAY2BGR_IF(DCH, SRC_T, DST_T) \ + if (dch == DCH) \ + return Launch_GRAY_to_BGR(inData, outData, inputShape, stream) switch (inDataType) { +#define CVCUDA_GRAY2BGR_CASE(T, T3, T4) \ + CVCUDA_GRAY2BGR_IF(3, T, T3); \ + else CVCUDA_GRAY2BGR_IF(4, T, T4); \ + else return ErrorCode::INVALID_DATA_SHAPE + case kCV_8U: case kCV_8S: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - gray_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, dch); - checkKernelErrors(); - } - break; + CVCUDA_GRAY2BGR_CASE(uchar, uchar3, uchar4); case kCV_16U: case kCV_16F: case kCV_16S: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - gray_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, dch); - checkKernelErrors(); - } - break; + CVCUDA_GRAY2BGR_CASE(ushort, ushort3, ushort4); case kCV_32S: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - gray_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, dch); - checkKernelErrors(); - } - break; + CVCUDA_GRAY2BGR_CASE(int, int3, int4); case kCV_32F: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - gray_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, dch); - checkKernelErrors(); - } - break; + CVCUDA_GRAY2BGR_CASE(float, float3, float4); case kCV_64F: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - gray_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, dch); - checkKernelErrors(); - } - break; + CVCUDA_GRAY2BGR_CASE(double, double3, double4); + +#undef CVCUDA_GRAY2BGR_CASE } +#undef CVCUDA_GRAY2BGR_IF + return ErrorCode::SUCCESS; +} + +template +inline ErrorCode Launch_BGR_to_GRAY(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, + cuda_op::DataShape shape, int bidx, cudaStream_t stream) +{ + using Policy = CvtKernelPolicy<32, 4, 4>; + + dim3 blockSize(Policy::BlockWidth, Policy::BlockHeight); + dim3 gridSize(divUp(shape.W, Policy::TileWidth), divUp(shape.H, Policy::TileHeight), shape.N); + + int2 dstSize{shape.W, shape.H}; + + auto srcWrap = cuda::CreateTensorWrapNHW(inData); + auto dstWrap = cuda::CreateTensorWrapNHW(outData); + bgr_to_gray_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); + checkKernelErrors(); + return ErrorCode::SUCCESS; } @@ -839,41 +928,49 @@ inline ErrorCode BGR_to_GRAY(const TensorDataStridedCuda &inData, const TensorDa return ErrorCode::INVALID_DATA_SHAPE; } - dim3 blockSize(BLOCK, BLOCK / 4, 1); - dim3 gridSize(divUp(inputShape.W, blockSize.x), divUp(inputShape.H, blockSize.y), inputShape.N); - - int2 dstSize{outputShape.W, outputShape.H}; +#define CVCUDA_BGR2GRAY_IF(SCH, SRC_T, DST_T) \ + if (sch == SCH) \ + return Launch_BGR_to_GRAY(inData, outData, inputShape, bidx, stream) switch (inDataType) { +#define CVCUDA_BGR2GRAY_CASE(T, T3, T4) \ + CVCUDA_BGR2GRAY_IF(3, T3, T); \ + else CVCUDA_BGR2GRAY_IF(4, T4, T); \ + else return ErrorCode::INVALID_DATA_SHAPE + case kCV_8U: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - bgr_to_gray_char_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); - checkKernelErrors(); - } - break; + CVCUDA_BGR2GRAY_CASE(uchar, uchar3, uchar4); case kCV_16U: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - bgr_to_gray_char_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); - checkKernelErrors(); - } - break; + CVCUDA_BGR2GRAY_CASE(ushort, ushort3, ushort4); case kCV_32F: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - bgr_to_gray_float_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); - checkKernelErrors(); - } - break; + CVCUDA_BGR2GRAY_CASE(float, float3, float4); default: LOG_ERROR("Unsupported DataType " << inDataType); return ErrorCode::INVALID_DATA_TYPE; + +#undef CVCUDA_BGR2GRAY_CASE } +#undef CVCUDA_BGR2GRAY_IF + return ErrorCode::SUCCESS; +} + +template +inline ErrorCode Launch_BGR_to_YUV(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, + cuda_op::DataShape shape, int bidx, cudaStream_t stream) +{ + using Policy = CvtKernelPolicy<32, 4, 4>; + + dim3 blockSize(Policy::BlockWidth, Policy::BlockHeight); + dim3 gridSize(divUp(shape.W, Policy::TileWidth), divUp(shape.H, Policy::TileHeight), shape.N); + + int2 dstSize{shape.W, shape.H}; + + auto srcWrap = cuda::CreateTensorWrapNHW(inData); + auto dstWrap = cuda::CreateTensorWrapNHW(outData); + bgr_to_yuv_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); + checkKernelErrors(); + return ErrorCode::SUCCESS; } @@ -910,44 +1007,44 @@ inline ErrorCode BGR_to_YUV(const TensorDataStridedCuda &inData, const TensorDat return ErrorCode::INVALID_DATA_SHAPE; } - dim3 blockSize(BLOCK, BLOCK / 4, 1); - dim3 gridSize(divUp(inputShape.W, blockSize.x), divUp(inputShape.H, blockSize.y), inputShape.N); - - int2 dstSize{outputShape.W, outputShape.H}; - switch (inDataType) { +#define CVCUDA_BGR2YUV_CASE(T3) return Launch_BGR_to_YUV(inData, outData, inputShape, bidx, stream) + case kCV_8U: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - bgr_to_yuv_char_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); - checkKernelErrors(); - } - break; + CVCUDA_BGR2YUV_CASE(uchar3); case kCV_16U: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - bgr_to_yuv_char_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); - checkKernelErrors(); - } - break; + CVCUDA_BGR2YUV_CASE(ushort3); case kCV_32F: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - bgr_to_yuv_float_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); - checkKernelErrors(); - } - break; + CVCUDA_BGR2YUV_CASE(float3); default: LOG_ERROR("Unsupported DataType " << inDataType); return ErrorCode::INVALID_DATA_TYPE; + +#undef CVCUDA_BGR2YUV_CASE } return ErrorCode::SUCCESS; } +template +inline ErrorCode Launch_YUV_to_BGR(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, + cuda_op::DataShape shape, int bidx, cudaStream_t stream) +{ + using Policy = CvtKernelPolicy<32, 4, 4>; + + dim3 blockSize(Policy::BlockWidth, Policy::BlockHeight); + dim3 gridSize(divUp(shape.W, Policy::TileWidth), divUp(shape.H, Policy::TileHeight), shape.N); + + int2 dstSize{shape.W, shape.H}; + + auto srcWrap = cuda::CreateTensorWrapNHW(inData); + auto dstWrap = cuda::CreateTensorWrapNHW(outData); + yuv_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); + checkKernelErrors(); + + return ErrorCode::SUCCESS; +} + inline ErrorCode YUV_to_BGR(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, NVCVColorConversionCode code, cudaStream_t stream) { @@ -981,41 +1078,51 @@ inline ErrorCode YUV_to_BGR(const TensorDataStridedCuda &inData, const TensorDat return ErrorCode::INVALID_DATA_SHAPE; } - dim3 blockSize(BLOCK, BLOCK / 4, 1); - dim3 gridSize(divUp(inputShape.W, blockSize.x), divUp(inputShape.H, blockSize.y), inputShape.N); - - int2 dstSize{outputShape.W, outputShape.H}; - switch (inDataType) { +#define CVCUDA_YUV2BGR_CASE(T3) return Launch_YUV_to_BGR(inData, outData, inputShape, bidx, stream) + case kCV_8U: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - yuv_to_bgr_char_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); - checkKernelErrors(); - } - break; + CVCUDA_YUV2BGR_CASE(uchar3); case kCV_16U: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - yuv_to_bgr_char_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); - checkKernelErrors(); - } - break; + CVCUDA_YUV2BGR_CASE(ushort3); case kCV_32F: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - yuv_to_bgr_float_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); - checkKernelErrors(); - } - break; + CVCUDA_YUV2BGR_CASE(float3); default: LOG_ERROR("Unsupported DataType " << inDataType); return ErrorCode::INVALID_DATA_TYPE; + +#undef CVCUDA_YUV2BGR_CASE + } + return ErrorCode::SUCCESS; +} + +template +inline ErrorCode Launch_BGR_to_HSV(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, + cuda_op::DataShape shape, int bidx, bool isFullRange, bool strides_64b, + cudaStream_t stream) +{ + using Policy = CvtKernelPolicy<32, 4, 4>; + + dim3 blockSize(Policy::BlockWidth, Policy::BlockHeight); + dim3 gridSize(divUp(shape.W, Policy::TileWidth), divUp(shape.H, Policy::TileHeight), shape.N); + + int2 dstSize{shape.W, shape.H}; + + if (strides_64b) + { + auto srcWrap = cuda::CreateTensorWrapNHW(inData); + auto dstWrap = cuda::CreateTensorWrapNHW(outData); + bgr_to_hsv_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx, isFullRange); + } + else + { + auto srcWrap = cuda::CreateTensorWrapNHW(inData); + auto dstWrap = cuda::CreateTensorWrapNHW(outData); + bgr_to_hsv_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx, isFullRange); } + checkKernelErrors(); + return ErrorCode::SUCCESS; } @@ -1053,36 +1160,57 @@ inline ErrorCode BGR_to_HSV(const TensorDataStridedCuda &inData, const TensorDat return ErrorCode::INVALID_DATA_SHAPE; } - dim3 blockSize(BLOCK, BLOCK / 4, 1); - dim3 gridSize(divUp(inputShape.W, blockSize.x), divUp(inputShape.H, blockSize.y), inputShape.N); - - int2 dstSize{outputShape.W, outputShape.H}; + const bool strides_64b = std::max(inAccess->sampleStride() * inAccess->numSamples(), + outAccess->sampleStride() * outAccess->numSamples()) + > nvcv::cuda::TypeTraits::max; switch (inDataType) { +#define CVCUDA_BGR2HSV_CASE(T3) \ + return Launch_BGR_to_HSV(inData, outData, inputShape, bidx, isFullRange, strides_64b, stream) + case kCV_8U: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - bgr_to_hsv_char_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx, isFullRange); - checkKernelErrors(); - } - break; + CVCUDA_BGR2HSV_CASE(uchar3); case kCV_32F: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - bgr_to_hsv_float_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx); - checkKernelErrors(); - } - break; + CVCUDA_BGR2HSV_CASE(float3); default: LOG_ERROR("Unsupported DataType " << inDataType); return ErrorCode::INVALID_DATA_TYPE; + +#undef CVCUDA_BGR2HSV_CASE } return ErrorCode::SUCCESS; } +template +inline ErrorCode Launch_HSV_to_BGR(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, + cuda_op::DataShape shape, int bidx, bool isFullRange, bool strides_64b, + cudaStream_t stream) +{ + using Policy = CvtKernelPolicy<32, 4, 4>; + + dim3 blockSize(Policy::BlockWidth, Policy::BlockHeight); + dim3 gridSize(divUp(shape.W, Policy::TileWidth), divUp(shape.H, Policy::TileHeight), shape.N); + + int2 dstSize{shape.W, shape.H}; + + if (strides_64b) + { + auto srcWrap = cuda::CreateTensorWrapNHW(inData); + auto dstWrap = cuda::CreateTensorWrapNHW(outData); + hsv_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx, isFullRange); + } + else + { + auto srcWrap = cuda::CreateTensorWrapNHW(inData); + auto dstWrap = cuda::CreateTensorWrapNHW(outData); + hsv_to_bgr_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx, isFullRange); + } + checkKernelErrors(); + + return ErrorCode::SUCCESS; +} + inline ErrorCode HSV_to_BGR(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, NVCVColorConversionCode code, cudaStream_t stream) { @@ -1122,34 +1250,64 @@ inline ErrorCode HSV_to_BGR(const TensorDataStridedCuda &inData, const TensorDat return ErrorCode::INVALID_DATA_SHAPE; } - dim3 blockSize(BLOCK, BLOCK / 4, 1); - dim3 gridSize(divUp(inputShape.W, blockSize.x), divUp(inputShape.H, blockSize.y), inputShape.N); - - int2 dstSize{outputShape.W, outputShape.H}; - int dcn = outputShape.C; + const int dcn = outputShape.C; + const bool strides_64b = std::max(inAccess->sampleStride() * inAccess->numSamples(), + outAccess->sampleStride() * outAccess->numSamples()) + > nvcv::cuda::TypeTraits::max; switch (inDataType) { +#define CVCUDA_HSV2BGR_CASE(T3, T4) \ + if (dcn == 3) \ + return Launch_HSV_to_BGR(inData, outData, inputShape, bidx, isFullRange, strides_64b, stream); \ + else \ + return Launch_HSV_to_BGR(inData, outData, inputShape, bidx, isFullRange, strides_64b, stream) + case kCV_8U: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - hsv_to_bgr_char_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx, dcn, isFullRange); - checkKernelErrors(); - } - break; + CVCUDA_HSV2BGR_CASE(uchar3, uchar4); case kCV_32F: - { - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); - hsv_to_bgr_float_nhwc<<>>(srcWrap, dstWrap, dstSize, bidx, dcn); - checkKernelErrors(); - } - break; + CVCUDA_HSV2BGR_CASE(float3, float4); default: LOG_ERROR("Unsupported DataType " << inDataType); return ErrorCode::INVALID_DATA_TYPE; + +#undef CVCUDA_HSV2BGR_CASE + } + return ErrorCode::SUCCESS; +} + +template +inline ErrorCode Launch_YUV420xp_to_BGR(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, + cuda_op::DataShape shape, int bidx, int uidx, bool strides_64b, + cudaStream_t stream) +{ + using Policy = CvtKernelPolicy<32, 4, 4>; + + dim3 blockSize(Policy::BlockWidth, Policy::BlockHeight); + dim3 gridSize(divUp(shape.W, Policy::TileWidth), divUp(shape.H, Policy::TileHeight), shape.N); + + int2 dstSize{shape.W, shape.H}; + + if (strides_64b) + { + // YUV420 input: 4D tensor with scalar type. + auto srcWrap = cuda::CreateTensorWrapNHWC(inData); + // BGR output: 3D tensor with vector type. + auto dstWrap = cuda::CreateTensorWrapNHW(outData); + yuv420_to_bgr_char_nhwc + <<>>(srcWrap, dstWrap, dstSize, bidx, uidx); } + else + { + // YUV420 input: 4D tensor with scalar type. + auto srcWrap = cuda::CreateTensorWrapNHWC(inData); + // BGR output: 3D tensor with vector type. + auto dstWrap = cuda::CreateTensorWrapNHW(outData); + yuv420_to_bgr_char_nhwc + <<>>(srcWrap, dstWrap, dstSize, bidx, uidx); + } + checkKernelErrors(); + return ErrorCode::SUCCESS; } @@ -1182,7 +1340,8 @@ inline ErrorCode YUV420xp_to_BGR(const TensorDataStridedCuda &inData, const Tens cuda_op::DataType outDataType = helpers::GetLegacyDataType(outData.dtype()); cuda_op::DataShape outputShape = helpers::GetLegacyDataShape(outAccess->infoShape()); - if (outputShape.C != 3 && outputShape.C != 4) + if ((code != NVCV_COLOR_YUV2GRAY_420 && outputShape.C != 3 && outputShape.C != 4) + || (code == NVCV_COLOR_YUV2GRAY_420 && outputShape.C != 1)) { LOG_ERROR("Invalid output channel number " << outputShape.C); return ErrorCode::INVALID_DATA_SHAPE; @@ -1212,24 +1371,16 @@ inline ErrorCode YUV420xp_to_BGR(const TensorDataStridedCuda &inData, const Tens return ErrorCode::INVALID_DATA_SHAPE; } - dim3 blockSize(BLOCK, BLOCK / 1, 1); - dim3 gridSize(divUp(rgb_width, blockSize.x), divUp(rgb_height, blockSize.y), inputShape.N); - - int2 dstSize{outputShape.W, outputShape.H}; - int dcn = outputShape.C; - - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); + const int dcn = outputShape.C; + const bool strides_64b = std::max(inAccess->sampleStride() * inAccess->numSamples(), + outAccess->sampleStride() * outAccess->numSamples()) + > nvcv::cuda::TypeTraits::max; switch (code) { case NVCV_COLOR_YUV2GRAY_420: { - /* Method 1 */ - // yuv420_to_gray_char_nhwc<<>>(srcWrap, dstWrap, dstSize); - // checkKernelErrors(); - - /* Method 2 (Better performance, but only works with fixed input shapes) */ + /* Better performance than a kernel, but only works with fixed input shapes */ int dpitch = static_cast(outAccess->sampleStride()); int spitch = static_cast(inAccess->sampleStride()); int cpy_width = static_cast(outAccess->sampleStride()); @@ -1247,11 +1398,16 @@ inline ErrorCode YUV420xp_to_BGR(const TensorDataStridedCuda &inData, const Tens case NVCV_COLOR_YUV2RGB_NV21: case NVCV_COLOR_YUV2RGBA_NV12: case NVCV_COLOR_YUV2RGBA_NV21: - { - yuv420sp_to_bgr_char_nhwc<<>>(srcWrap, dstWrap, dstSize, dcn, bidx, uidx); - checkKernelErrors(); - } - break; + if (dcn == 3) + { + return Launch_YUV420xp_to_BGR(inData, outData, outputShape, bidx, uidx, strides_64b, + stream); + } + else + { + return Launch_YUV420xp_to_BGR(inData, outData, outputShape, bidx, uidx, strides_64b, + stream); + } case NVCV_COLOR_YUV2BGR_YV12: case NVCV_COLOR_YUV2BGR_IYUV: case NVCV_COLOR_YUV2BGRA_YV12: @@ -1260,11 +1416,16 @@ inline ErrorCode YUV420xp_to_BGR(const TensorDataStridedCuda &inData, const Tens case NVCV_COLOR_YUV2RGB_IYUV: case NVCV_COLOR_YUV2RGBA_YV12: case NVCV_COLOR_YUV2RGBA_IYUV: - { - yuv420p_to_bgr_char_nhwc<<>>(srcWrap, dstWrap, dstSize, dcn, bidx, uidx); - checkKernelErrors(); - } - break; + if (dcn == 3) + { + return Launch_YUV420xp_to_BGR(inData, outData, outputShape, bidx, uidx, strides_64b, + stream); + } + else + { + return Launch_YUV420xp_to_BGR(inData, outData, outputShape, bidx, uidx, strides_64b, + stream); + } default: LOG_ERROR("Unsupported conversion code " << code); return ErrorCode::INVALID_PARAMETER; @@ -1370,34 +1531,38 @@ inline ErrorCode YUV422_to_BGR(const TensorDataStridedCuda &inData, const Tensor return ErrorCode::SUCCESS; } -template -inline static void bgr_to_yuv420p_launcher(SrcWrapper srcWrap, DstWrapper dstWrap, DataShape inputShape, int bidx, - int uidx, cudaStream_t stream) +template +inline ErrorCode Launch_BGR_to_YUV420xp(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, + DataShape inputShape, int bidx, int uidx, bool strides_64b, cudaStream_t stream) { + using Policy = CvtKernelPolicy<32, 4, 4>; + int2 srcSize{inputShape.W, inputShape.H}; - // method 1 - dim3 blockSize(BLOCK, BLOCK / 1, 1); - dim3 gridSize(divUp(inputShape.W, blockSize.x), divUp(inputShape.H, blockSize.y), inputShape.N); - bgr_to_yuv420p_char_nhwc<<>>(srcWrap, dstWrap, srcSize, inputShape.C, bidx, uidx); - checkKernelErrors(); - // method 2 (TODO) - // NPP -} + dim3 blockSize(Policy::BlockWidth, Policy::BlockHeight); + dim3 gridSize(divUp(inputShape.W, Policy::TileWidth), divUp(inputShape.H, Policy::TileHeight), inputShape.N); -template -inline static void bgr_to_yuv420sp_launcher(SrcWrapper srcWrap, DstWrapper dstWrap, DataShape inputShape, int bidx, - int uidx, cudaStream_t stream) -{ - int2 srcSize{inputShape.W, inputShape.H}; - // method 1 - dim3 blockSize(BLOCK, BLOCK / 1, 1); - dim3 gridSize(divUp(inputShape.W, blockSize.x), divUp(inputShape.H, blockSize.y), inputShape.N); - bgr_to_yuv420sp_char_nhwc<<>>(srcWrap, dstWrap, srcSize, inputShape.C, bidx, uidx); + if (strides_64b) + { + // BGR input: 3D tensor with vector type. + auto srcWrap = cuda::CreateTensorWrapNHW(inData); + // YUV420 output: 4D tensor with scalar type. + auto dstWrap = cuda::CreateTensorWrapNHWC(outData); + bgr_to_yuv420_char_nhwc + <<>>(srcWrap, dstWrap, srcSize, bidx, uidx); + } + else + { + // BGR input: 3D tensor with vector type. + auto srcWrap = cuda::CreateTensorWrapNHW(inData); + // YUV420 output: 4D tensor with scalar type. + auto dstWrap = cuda::CreateTensorWrapNHWC(outData); + bgr_to_yuv420_char_nhwc + <<>>(srcWrap, dstWrap, srcSize, bidx, uidx); + } checkKernelErrors(); - // method 2 (TODO) - // NPP + return ErrorCode::SUCCESS; } inline ErrorCode BGR_to_YUV420xp(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, @@ -1454,10 +1619,9 @@ inline ErrorCode BGR_to_YUV420xp(const TensorDataStridedCuda &inData, const Tens return ErrorCode::INVALID_DATA_SHAPE; } - // BGR input - auto srcWrap = cuda::CreateTensorWrapNHWC(inData); - // YUV420 output - auto dstWrap = cuda::CreateTensorWrapNHWC(outData); + const bool strides_64b = std::max(inAccess->sampleStride() * inAccess->numSamples(), + outAccess->sampleStride() * outAccess->numSamples()) + > nvcv::cuda::TypeTraits::max; switch (code) { @@ -1469,11 +1633,16 @@ inline ErrorCode BGR_to_YUV420xp(const TensorDataStridedCuda &inData, const Tens case NVCV_COLOR_RGB2YUV_NV21: case NVCV_COLOR_RGBA2YUV_NV12: case NVCV_COLOR_RGBA2YUV_NV21: - { - bgr_to_yuv420sp_launcher(srcWrap, dstWrap, inputShape, bidx, uidx, stream); - checkKernelErrors(); - } - break; + if (inputShape.C == 3) + { + return Launch_BGR_to_YUV420xp(inData, outData, inputShape, bidx, uidx, strides_64b, + stream); + } + else + { + return Launch_BGR_to_YUV420xp(inData, outData, inputShape, bidx, uidx, strides_64b, + stream); + } case NVCV_COLOR_BGR2YUV_YV12: case NVCV_COLOR_BGR2YUV_IYUV: case NVCV_COLOR_BGRA2YUV_YV12: @@ -1483,8 +1652,16 @@ inline ErrorCode BGR_to_YUV420xp(const TensorDataStridedCuda &inData, const Tens case NVCV_COLOR_RGBA2YUV_YV12: case NVCV_COLOR_RGBA2YUV_IYUV: { - bgr_to_yuv420p_launcher(srcWrap, dstWrap, inputShape, bidx, uidx, stream); - checkKernelErrors(); + if (inputShape.C == 3) + { + return Launch_BGR_to_YUV420xp(inData, outData, inputShape, bidx, uidx, strides_64b, + stream); + } + else + { + return Launch_BGR_to_YUV420xp(inData, outData, inputShape, bidx, uidx, strides_64b, + stream); + } } break; default: diff --git a/src/cvcuda/priv/legacy/cvt_color_var_shape.cu b/src/cvcuda/priv/legacy/cvt_color_var_shape.cu index 8113fe43a..d40fa847c 100644 --- a/src/cvcuda/priv/legacy/cvt_color_var_shape.cu +++ b/src/cvcuda/priv/legacy/cvt_color_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/erase.cu b/src/cvcuda/priv/legacy/erase.cu index 336440a8e..226b4d462 100644 --- a/src/cvcuda/priv/legacy/erase.cu +++ b/src/cvcuda/priv/legacy/erase.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/erase_var_shape.cu b/src/cvcuda/priv/legacy/erase_var_shape.cu index e08c405c8..528f7084d 100644 --- a/src/cvcuda/priv/legacy/erase_var_shape.cu +++ b/src/cvcuda/priv/legacy/erase_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/filter.cu b/src/cvcuda/priv/legacy/filter.cu index 643b5dbe4..0487d3f10 100644 --- a/src/cvcuda/priv/legacy/filter.cu +++ b/src/cvcuda/priv/legacy/filter.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -26,7 +26,7 @@ #include "CvCudaUtils.cuh" #include "filter_utils.cuh" -#include +#include using namespace nvcv::legacy::cuda_op; using namespace nvcv::legacy::helpers; diff --git a/src/cvcuda/priv/legacy/filter_utils.cu b/src/cvcuda/priv/legacy/filter_utils.cu index 5a88b24b4..8d0636476 100644 --- a/src/cvcuda/priv/legacy/filter_utils.cu +++ b/src/cvcuda/priv/legacy/filter_utils.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,11 +17,11 @@ #include "../Assert.h" -#include // for DropCast, etc. -#include // for math operators -#include // for sqrt, etc. -#include // for SaturateCast, etc. -#include // for TensorWrap, etc. +#include // for DropCast, etc. +#include // for math operators +#include // for sqrt, etc. +#include // for SaturateCast, etc. +#include // for TensorWrap, etc. namespace nvcv::legacy::cuda_op { diff --git a/src/cvcuda/priv/legacy/filter_utils.cuh b/src/cvcuda/priv/legacy/filter_utils.cuh index a6248dc44..48776ca89 100644 --- a/src/cvcuda/priv/legacy/filter_utils.cuh +++ b/src/cvcuda/priv/legacy/filter_utils.cuh @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #ifndef FILTER_UTILS_CUH #define FILTER_UTILS_CUH -#include // for TensorWrap, etc. +#include // for TensorWrap, etc. namespace nvcv::legacy::cuda_op { diff --git a/src/cvcuda/priv/legacy/filter_var_shape.cu b/src/cvcuda/priv/legacy/filter_var_shape.cu index 9be8f0434..4cea24125 100644 --- a/src/cvcuda/priv/legacy/filter_var_shape.cu +++ b/src/cvcuda/priv/legacy/filter_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/flip.cu b/src/cvcuda/priv/legacy/flip.cu index f7fded805..7d37f325e 100644 --- a/src/cvcuda/priv/legacy/flip.cu +++ b/src/cvcuda/priv/legacy/flip.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/flip_or_copy_var_shape.cu b/src/cvcuda/priv/legacy/flip_or_copy_var_shape.cu index 4cf143b65..5a7b929ef 100644 --- a/src/cvcuda/priv/legacy/flip_or_copy_var_shape.cu +++ b/src/cvcuda/priv/legacy/flip_or_copy_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/gaussian_noise.cu b/src/cvcuda/priv/legacy/gaussian_noise.cu index 2af397b5b..91b097e32 100644 --- a/src/cvcuda/priv/legacy/gaussian_noise.cu +++ b/src/cvcuda/priv/legacy/gaussian_noise.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/gaussian_noise_var_shape.cu b/src/cvcuda/priv/legacy/gaussian_noise_var_shape.cu index 526e4ad84..7ae517ad4 100644 --- a/src/cvcuda/priv/legacy/gaussian_noise_var_shape.cu +++ b/src/cvcuda/priv/legacy/gaussian_noise_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/histogram_eq.cu b/src/cvcuda/priv/legacy/histogram_eq.cu index 89ce26f05..ca497c7cf 100644 --- a/src/cvcuda/priv/legacy/histogram_eq.cu +++ b/src/cvcuda/priv/legacy/histogram_eq.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/histogram_eq_var_shape.cu b/src/cvcuda/priv/legacy/histogram_eq_var_shape.cu index 3d81c13fc..e75499dd1 100644 --- a/src/cvcuda/priv/legacy/histogram_eq_var_shape.cu +++ b/src/cvcuda/priv/legacy/histogram_eq_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -24,7 +24,7 @@ #include "CvCudaUtils.cuh" //#include // for CVCUDA_NORMALIZE_SCALE_IS_STDDEV, etc. -#include +#include using namespace nvcv::legacy::cuda_op; using namespace nvcv::legacy::helpers; diff --git a/src/cvcuda/priv/legacy/inpaint.cu b/src/cvcuda/priv/legacy/inpaint.cu index 19c77e64f..91cabcd46 100644 --- a/src/cvcuda/priv/legacy/inpaint.cu +++ b/src/cvcuda/priv/legacy/inpaint.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -26,7 +26,7 @@ #include "inpaint_utils.cuh" #include "reduce_kernel_utils.cuh" -#include +#include using namespace nvcv::legacy::helpers; diff --git a/src/cvcuda/priv/legacy/inpaint_var_shape.cu b/src/cvcuda/priv/legacy/inpaint_var_shape.cu index b4304323e..71ae34dae 100644 --- a/src/cvcuda/priv/legacy/inpaint_var_shape.cu +++ b/src/cvcuda/priv/legacy/inpaint_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/joint_bilateral_filter.cu b/src/cvcuda/priv/legacy/joint_bilateral_filter.cu index 226d9182a..e5458b415 100644 --- a/src/cvcuda/priv/legacy/joint_bilateral_filter.cu +++ b/src/cvcuda/priv/legacy/joint_bilateral_filter.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/median_blur.cu b/src/cvcuda/priv/legacy/median_blur.cu index 9ee0540bc..bf9b26b18 100644 --- a/src/cvcuda/priv/legacy/median_blur.cu +++ b/src/cvcuda/priv/legacy/median_blur.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/median_blur_var_shape.cu b/src/cvcuda/priv/legacy/median_blur_var_shape.cu index f73216373..6bc55d8c1 100644 --- a/src/cvcuda/priv/legacy/median_blur_var_shape.cu +++ b/src/cvcuda/priv/legacy/median_blur_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/morphology.cu b/src/cvcuda/priv/legacy/morphology.cu index 4f4074265..2c7744788 100644 --- a/src/cvcuda/priv/legacy/morphology.cu +++ b/src/cvcuda/priv/legacy/morphology.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -23,8 +23,8 @@ #include "CvCudaUtils.cuh" -#include -#include +#include +#include using namespace nvcv::legacy::helpers; using namespace nvcv::legacy::cuda_op; diff --git a/src/cvcuda/priv/legacy/morphology_var_shape.cu b/src/cvcuda/priv/legacy/morphology_var_shape.cu index 2792412fe..9edfa9077 100644 --- a/src/cvcuda/priv/legacy/morphology_var_shape.cu +++ b/src/cvcuda/priv/legacy/morphology_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -23,8 +23,8 @@ #include "CvCudaUtils.cuh" -#include -#include +#include +#include using namespace nvcv::legacy::helpers; using namespace nvcv::legacy::cuda_op; diff --git a/src/cvcuda/priv/legacy/normalize.cu b/src/cvcuda/priv/legacy/normalize.cu index d9498996e..5a4f1c580 100644 --- a/src/cvcuda/priv/legacy/normalize.cu +++ b/src/cvcuda/priv/legacy/normalize.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -23,8 +23,8 @@ #include "CvCudaUtils.cuh" -#include // for CVCUDA_NORMALIZE_SCALE_IS_STDDEV, etc. -#include // for TypeTraits +#include // for CVCUDA_NORMALIZE_SCALE_IS_STDDEV, etc. +#include // for TypeTraits using namespace nvcv::legacy::cuda_op; using namespace nvcv::legacy::helpers; diff --git a/src/cvcuda/priv/legacy/normalize_var_shape.cu b/src/cvcuda/priv/legacy/normalize_var_shape.cu index d42b98ffb..f635c9895 100644 --- a/src/cvcuda/priv/legacy/normalize_var_shape.cu +++ b/src/cvcuda/priv/legacy/normalize_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -24,7 +24,7 @@ #include "CvCudaUtils.cuh" #include // for CVCUDA_NORMALIZE_SCALE_IS_STDDEV, etc. -#include +#include namespace nvcv::legacy::cuda_op { diff --git a/src/cvcuda/priv/legacy/osd.cu b/src/cvcuda/priv/legacy/osd.cu index 81c0fcc04..b61199544 100644 --- a/src/cvcuda/priv/legacy/osd.cu +++ b/src/cvcuda/priv/legacy/osd.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/pad_and_stack.cu b/src/cvcuda/priv/legacy/pad_and_stack.cu index 98641b9c3..8a613a1f5 100644 --- a/src/cvcuda/priv/legacy/pad_and_stack.cu +++ b/src/cvcuda/priv/legacy/pad_and_stack.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/pillow_resize.cu b/src/cvcuda/priv/legacy/pillow_resize.cu index 6c05ef187..9cbd12c31 100644 --- a/src/cvcuda/priv/legacy/pillow_resize.cu +++ b/src/cvcuda/priv/legacy/pillow_resize.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/random_resized_crop.cu b/src/cvcuda/priv/legacy/random_resized_crop.cu index af270b46e..8280595d3 100644 --- a/src/cvcuda/priv/legacy/random_resized_crop.cu +++ b/src/cvcuda/priv/legacy/random_resized_crop.cu @@ -23,7 +23,7 @@ #include "CvCudaUtils.cuh" -#include +#include #include #include @@ -109,7 +109,8 @@ __global__ void resize_linear_v1(const SrcWrapper src, DstWrapper dst, int2 srcS //y coordinate float fy = (float)((dst_y + 0.5f) * scale_y - 0.5f + top); int sy = cuda::round(fy); - fy -= sy; + + fy = ((sy < 0) ? 0 : ((sy > height - 2) ? 1 : fy - sy)); sy = cuda::max(0, cuda::min(sy, height - 2)); //row pointers @@ -119,8 +120,8 @@ __global__ void resize_linear_v1(const SrcWrapper src, DstWrapper dst, int2 srcS { //compute source data position and weight for [x0] components float fx = (float)((dst_x + 0.5f) * scale_x - 0.5f + left); int sx = cuda::round(fx); - fx -= sx; - fx *= ((sx >= 0) && (sx < width - 1)); + + fx = ((sx < 0) ? 0 : ((sx > width - 2) ? 1 : fx - sx)); sx = cuda::max(0, cuda::min(sx, width - 2)); *dst.ptr(batch_idx, dst_y, dst_x) diff --git a/src/cvcuda/priv/legacy/random_resized_crop_var_shape.cu b/src/cvcuda/priv/legacy/random_resized_crop_var_shape.cu index 0c65b0be4..dbd3a49e8 100644 --- a/src/cvcuda/priv/legacy/random_resized_crop_var_shape.cu +++ b/src/cvcuda/priv/legacy/random_resized_crop_var_shape.cu @@ -23,7 +23,7 @@ #include "CvCudaUtils.cuh" -#include +#include #include #include @@ -113,7 +113,8 @@ __global__ void resize_linear_v1(const SrcWrapper src, DstWrapper dst, const int //y coordinate float fy = (float)((dst_y + 0.5f) * scale_y - 0.5f + top); int sy = cuda::round(fy); - fy -= sy; + + fy = ((sy < 0) ? 0 : ((sy > height - 2) ? 1 : fy - sy)); sy = cuda::max(0, cuda::min(sy, height - 2)); //row pointers @@ -123,8 +124,8 @@ __global__ void resize_linear_v1(const SrcWrapper src, DstWrapper dst, const int { //compute source data position and weight for [x0] components float fx = (float)((dst_x + 0.5f) * scale_x - 0.5f + left); int sx = cuda::round(fx); - fx -= sx; - fx *= ((sx >= 0) && (sx < width - 1)); + + fx = ((sx < 0) ? 0 : ((sx > width - 2) ? 1 : fx - sx)); sx = cuda::max(0, cuda::min(sx, width - 2)); *dst.ptr(batch_idx, dst_y, dst_x) diff --git a/src/cvcuda/priv/legacy/reformat.cu b/src/cvcuda/priv/legacy/reformat.cu index a37f81471..6817a61f8 100644 --- a/src/cvcuda/priv/legacy/reformat.cu +++ b/src/cvcuda/priv/legacy/reformat.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/resize.cu b/src/cvcuda/priv/legacy/resize.cu deleted file mode 100644 index 4c0996181..000000000 --- a/src/cvcuda/priv/legacy/resize.cu +++ /dev/null @@ -1,367 +0,0 @@ -/* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//$$$ replace these with the new (non-legacy) nvcv approach - -#include "CvCudaLegacy.h" -#include "CvCudaLegacyHelpers.hpp" - -#include "CvCudaUtils.cuh" - -#include - -using namespace nvcv::legacy::cuda_op; -using namespace nvcv::legacy::helpers; - -namespace nvcv::legacy::cuda_op { - -//private internal API - -#define MAX_BUFFER_BYTES 128 //multiple of 4 for word-aligned read, multiple of 16 for cacheline alignment (float4) -#define MAX_BUFFER_WORDS (MAX_BUFFER_BYTES / 4) //extra bytes for cache alignment - -#define LEGACY_BICUBIC_MATH //apparently the legacy code has an abs() that needs to be matched - -// Replaced below 15 to 0 due to a reported regression -#define CACHE_MEMORY_ALIGNMENT 0 //this is 'M' for _cacheAlignedBufferedRead - -//legal values for CACHE_MEMORY_ALIGNMENT are: -// 31: 256-bit alignment -// 15: 128-bit alignment <-- should be ideal for Ampere -// 7: 64-bit alignment -// 3: 32-bit alignment (word) -// 0: disable buffering -template -inline const __device__ ValueType *_cacheAlignedBufferedRead(SrcWrapper srcImage, int width, uint *pReadBuffer, - uint nReadBufferWordsMax, int nBatch, int nYPos, - int nXPosMin, int nXPosMax) -{ - const ValueType *lineStartPtr = srcImage.ptr(nBatch, nYPos); //do not access prior to this address - const ValueType *pixSrcPtr = &lineStartPtr[nXPosMin]; - if (M == 0) - return pixSrcPtr; //return GMEM pointer instead - else - { - uint *memSrcPtr = (uint *)(((size_t)pixSrcPtr) & (~M)); //(M+1) byte alignment - const ValueType *pixBeyondPtr = &lineStartPtr[nXPosMax + 1]; - const int functionalWidth = ((size_t)pixBeyondPtr + M) & (~M) - ((size_t)lineStartPtr); - const int nWordsToRead = (((size_t)pixBeyondPtr + M) & (~M) - (size_t)memSrcPtr) / 4; - - if (((size_t)memSrcPtr < (size_t)lineStartPtr) || (width * sizeof(ValueType) < functionalWidth) - || (nWordsToRead > nReadBufferWordsMax)) - return pixSrcPtr; //return GMEM pointer instead if running off the image - else - { //copy out source data, aligned based upon M (31, 15, 7, 3) - const int skew = ((size_t)pixSrcPtr) & M; //byte offset for nXPosMin - int i = 0; - if (M >= 31) //256-bit align, 32 bytes at a time - for (; i < nWordsToRead; i += 8) *((double4 *)(&pReadBuffer[i])) = *((double4 *)(&memSrcPtr[i])); - if (M == 15) //128-bit align, 16 bytes at a time - for (; i < nWordsToRead; i += 4) *((float4 *)(&pReadBuffer[i])) = *((float4 *)(&memSrcPtr[i])); - if (M == 7) //64-bit align, 8 bytes at a time - for (; i < nWordsToRead; i += 2) *((float2 *)(&pReadBuffer[i])) = *((float2 *)(&memSrcPtr[i])); - //32-bit align, 4 bytes at a time - for (; i < nWordsToRead; ++i) pReadBuffer[i] = memSrcPtr[i]; - - return (const ValueType *)(((size_t)pReadBuffer) + skew); //buffered pixel data - } - } -} //_cacheAlignedBufferedRead - -//******************** NN = Nearest Neighbor - -template -__global__ void resize_NN(SrcWrapper src, DstWrapper dst, int2 srcSize, int2 dstSize, const float scale_x, - const float scale_y) -{ - const int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - const int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - const int batch_idx = get_batch_idx(); - int out_height = dstSize.y, out_width = dstSize.x; - - if ((dst_x < out_width) && (dst_y < out_height)) - { //generic copy pixel to pixel - const int sx = cuda::min(cuda::round(dst_x * scale_x), srcSize.x - 1); - const int sy = cuda::min(cuda::round(dst_y * scale_y), srcSize.y - 1); - *dst.ptr(batch_idx, dst_y, dst_x) = *src.ptr(batch_idx, sy, sx); - } -} //resize_NN - -//******************** Bilinear - -template -__global__ void resize_bilinear(SrcWrapper src, DstWrapper dst, int2 srcSize, int2 dstSize, const float scale_x, - const float scale_y) -{ - const int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - const int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - const int batch_idx = get_batch_idx(); - int height = srcSize.y, width = srcSize.x, out_height = dstSize.y, out_width = dstSize.x; - - if ((dst_x < out_width) && (dst_y < out_height)) - { - //float space for weighted addition - using work_type = cuda::ConvertBaseTypeTo; - - //y coordinate - float fy = (float)((dst_y + 0.5f) * scale_y - 0.5f); - int sy = cuda::round(fy); - fy -= sy; - sy = cuda::max(0, cuda::min(sy, height - 2)); - - //row pointers - const T *aPtr = src.ptr(batch_idx, sy, 0); //start of upper row - const T *bPtr = src.ptr(batch_idx, sy + 1, 0); //start of lower row - - { //compute source data position and weight for [x0] components - float fx = (float)((dst_x + 0.5f) * scale_x - 0.5f); - int sx = cuda::round(fx); - fx -= sx; - fx *= ((sx >= 0) && (sx < width - 1)); - sx = cuda::max(0, cuda::min(sx, width - 2)); - - *dst.ptr(batch_idx, dst_y, dst_x) - = cuda::SaturateCast((1.0f - fx) * (aPtr[sx] * (1.0f - fy) + bPtr[sx] * fy) - + fx * (aPtr[sx + 1] * (1.0f - fy) + bPtr[sx + 1] * fy)); - } - } -} //resize_bilinear - -//******************** Bicubic - -template -__global__ void resize_bicubic(SrcWrapper src, DstWrapper dst, int2 srcSize, int2 dstSize, const float scale_x, - const float scale_y) -{ //optimized for aligned read - const int dst_x = blockIdx.x * blockDim.x + threadIdx.x; - const int dst_y = blockIdx.y * blockDim.y + threadIdx.y; - const int batch_idx = get_batch_idx(); - int height = srcSize.y, width = srcSize.x, out_height = dstSize.y, out_width = dstSize.x; - - if ((dst_x < out_width) & (dst_y < out_height)) - { - //float space for weighted addition - using work_type = cuda::ConvertBaseTypeTo; - - uint readBuffer[MAX_BUFFER_WORDS]; - - //y coordinate - float fy = (float)((dst_y + 0.5f) * scale_y - 0.5f); - int sy = cuda::round(fy); - fy -= sy; - sy = cuda::max(1, cuda::min(sy, height - 3)); - - const float A = -0.75f; - - float cY[4]; - cY[0] = ((A * (fy + 1) - 5 * A) * (fy + 1) + 8 * A) * (fy + 1) - 4 * A; - cY[1] = ((A + 2) * fy - (A + 3)) * fy * fy + 1; - cY[2] = ((A + 2) * (1 - fy) - (A + 3)) * (1 - fy) * (1 - fy) + 1; - cY[3] = 1.f - cY[0] - cY[1] - cY[2]; - - work_type accum = cuda::SetAll(0); - - float fx = (float)((dst_x + 0.5f) * scale_x - 0.5f); - int sx = cuda::round(fx); - fx -= sx; - fx *= ((sx >= 1) && (sx < width - 3)); - sx = cuda::max(1, cuda::min(sx, width - 3)); - - float cX[4]; - cX[0] = ((A * (fx + 1.0f) - 5.0f * A) * (fx + 1.0f) + 8.0f * A) * (fx + 1.0f) - 4.0f * A; - cX[1] = ((A + 2.0f) * fx - (A + 3.0f)) * fx * fx + 1.0f; - cX[2] = ((A + 2.0f) * (1.0f - fx) - (A + 3.0f)) * (1.0f - fx) * (1.0f - fx) + 1.0f; - cX[3] = 1.0f - cX[0] - cX[1] - cX[2]; -#pragma unroll - for (int row = 0; row < 4; ++row) - { - //1 - load each sub row from sx-1 to sx+3 inclusive, aligned - //const T * aPtr = src.ptr(batch_idx, sy + row - 1, sx-1); - const T *aPtr = _cacheAlignedBufferedRead( - src, srcSize.x, readBuffer, MAX_BUFFER_WORDS, batch_idx, sy + row - 1, sx - 1, sx + 2); - - //2 - do a pixel's partial on this row - accum += cY[row] * (cX[0] * aPtr[0] + cX[1] * aPtr[1] + cX[2] * aPtr[2] + cX[3] * aPtr[3]); - } //for row -#ifndef LEGACY_BICUBIC_MATH - //correct math - *dst.ptr(batch_idx, dst_y, dst_x) = cuda::SaturateCast(accum); -#else - //abs() needed to match legacy operator. - *dst.ptr(batch_idx, dst_y, dst_x) = cuda::SaturateCast(cuda::abs(accum)); -#endif - } -} //resize_bicubic - -template -__global__ void resize_area_ocv_align(SrcWrapper src, DstWrapper dst, int2 dstSize) -{ - const int x = blockDim.x * blockIdx.x + threadIdx.x; - const int y = blockDim.y * blockIdx.y + threadIdx.y; - const int batch_idx = get_batch_idx(); - int out_height = dstSize.y, out_width = dstSize.x; - - if (x >= out_width || y >= out_height) - return; - - const int3 coord{x, y, batch_idx}; - - dst[coord] = src[cuda::StaticCast(coord)]; -} - -template -void resize(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, - NVCVInterpolationType interpolation, cudaStream_t stream) - -{ - auto inAccess = TensorDataAccessStridedImagePlanar::Create(inData); - NVCV_ASSERT(inAccess); - - auto outAccess = TensorDataAccessStridedImagePlanar::Create(outData); - NVCV_ASSERT(outAccess); - - const int batch_size = inAccess->numSamples(); - const int in_width = inAccess->numCols(); - const int in_height = inAccess->numRows(); - const int out_width = outAccess->numCols(); - const int out_height = outAccess->numRows(); - - float scale_x = ((float)in_width) / out_width; - float scale_y = ((float)in_height) / out_height; - - int2 srcSize{in_width, in_height}; - int2 dstSize{out_width, out_height}; - - auto src = cuda::CreateTensorWrapNHW(inData); - auto dst = cuda::CreateTensorWrapNHW(outData); - - const int THREADS_PER_BLOCK = 256; //256? 64? - const int BLOCK_WIDTH = 8; //as in 32x4 or 32x8. 16x8 and 16x16 are also viable - - const dim3 blockSize(BLOCK_WIDTH, THREADS_PER_BLOCK / BLOCK_WIDTH, 1); - const dim3 gridSize(divUp(out_width, blockSize.x), divUp(out_height, blockSize.y), batch_size); - - //Note: resize is fundamentally a gather memory operation, with a little bit of compute - // our goals are to (a) maximize throughput, and (b) minimize occupancy for the same performance - - switch (interpolation) - { - case NVCV_INTERP_NEAREST: - resize_NN<<>>(src, dst, srcSize, dstSize, scale_x, scale_y); - break; - - case NVCV_INTERP_LINEAR: - resize_bilinear<<>>(src, dst, srcSize, dstSize, scale_x, scale_y); - break; - - case NVCV_INTERP_CUBIC: - resize_bicubic<<>>(src, dst, srcSize, dstSize, scale_x, scale_y); - break; - - case NVCV_INTERP_AREA: - { - auto src = cuda::CreateInterpolationWrapNHW(inData, T{}, - scale_x, scale_y); - auto dst = cuda::CreateTensorWrapNHW(outData); - - resize_area_ocv_align<<>>(src, dst, dstSize); - } - break; - - default: - //$$$ need to throw or log an error here - break; - } //switch - - checkKernelErrors(); -#ifdef CUDA_DEBUG_LOG - checkCudaErrors(cudaStreamSynchronize(stream)); - checkCudaErrors(cudaGetLastError()); -#endif -} //resize - -ErrorCode Resize::infer(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, - const NVCVInterpolationType interpolation, cudaStream_t stream) -{ - DataFormat input_format = GetLegacyDataFormat(inData.layout()); - DataFormat output_format = GetLegacyDataFormat(outData.layout()); - - if (input_format != output_format) - { - LOG_ERROR("Invalid DataFormat between input (" << input_format << ") and output (" << output_format << ")"); - return ErrorCode::INVALID_DATA_FORMAT; - } - - DataFormat format = input_format; - - if (!(format == kNHWC || format == kHWC)) - { - LOG_ERROR("Invalid DataFormat " << format); - return ErrorCode::INVALID_DATA_FORMAT; - } - - auto inAccess = TensorDataAccessStridedImagePlanar::Create(inData); - NVCV_ASSERT(inAccess); - - cuda_op::DataType data_type = GetLegacyDataType(inData.dtype()); - cuda_op::DataShape input_shape = GetLegacyDataShape(inAccess->infoShape()); - - int channels = input_shape.C; - - if (channels > 4) - { - LOG_ERROR("Invalid channel number " << channels); - return ErrorCode::INVALID_DATA_SHAPE; - } - - if (!(data_type == kCV_8U || data_type == kCV_16U || data_type == kCV_16S || data_type == kCV_32F)) - { - LOG_ERROR("Invalid DataType " << data_type); - return ErrorCode::INVALID_DATA_TYPE; - } - - typedef void (*func_t)(const TensorDataStridedCuda &inData, const TensorDataStridedCuda &outData, - const NVCVInterpolationType interpolation, cudaStream_t stream); - - static const func_t funcs[6][4] = { - { resize, 0 /*resize*/, resize, resize}, - {0 /*resize*/, 0 /*resize*/, 0 /*resize*/, 0 /*resize*/}, - { resize, 0 /*resize*/, resize, resize}, - { resize, 0 /*resize*/, resize, resize}, - { 0 /*resize*/, 0 /*resize*/, 0 /*resize*/, 0 /*resize*/}, - { resize, 0 /*resize*/, resize, resize} - }; - - //note: schar1,3,4 should all work... - - if (interpolation == NVCV_INTERP_NEAREST || interpolation == NVCV_INTERP_LINEAR - || interpolation == NVCV_INTERP_CUBIC || interpolation == NVCV_INTERP_AREA) - { - const func_t func = funcs[data_type][channels - 1]; - NVCV_ASSERT(func != 0); - - func(inData, outData, interpolation, stream); - } - else - { - LOG_ERROR("Invalid interpolation " << interpolation); - return ErrorCode::INVALID_PARAMETER; - } - return SUCCESS; -} //Resize::infer - -} // namespace nvcv::legacy::cuda_op diff --git a/src/cvcuda/priv/legacy/resize_var_shape.cu b/src/cvcuda/priv/legacy/resize_var_shape.cu index bec72da1f..f0d7a2930 100644 --- a/src/cvcuda/priv/legacy/resize_var_shape.cu +++ b/src/cvcuda/priv/legacy/resize_var_shape.cu @@ -25,8 +25,8 @@ #include "CvCudaUtils.cuh" -#include -#include +#include +#include using namespace nvcv::legacy::cuda_op; using namespace nvcv::legacy::helpers; @@ -136,7 +136,8 @@ __global__ void resize_bilinear(cuda::ImageBatchVarShapeWrap src, cuda: //y coordinate float fy = (float)((dst_y + 0.5f) * scale_y - 0.5f); int sy = cuda::round(fy); - fy -= sy; + + fy = ((sy < 0) ? 0 : ((sy > height - 2) ? 1 : fy - sy)); sy = cuda::max(0, cuda::min(sy, height - 2)); //row pointers @@ -146,8 +147,8 @@ __global__ void resize_bilinear(cuda::ImageBatchVarShapeWrap src, cuda: { //compute source data position and weight for [x0] components float fx = (float)((dst_x + 0.5f) * scale_x - 0.5f); int sx = cuda::round(fx); - fx -= sx; - fx *= ((sx >= 0) && (sx < width - 1)); + + fx = ((sx < 0) ? 0 : ((sx > width - 2) ? 1 : fx - sx)); sx = cuda::max(0, cuda::min(sx, width - 2)); *dst.ptr(batch_idx, dst_y, dst_x) diff --git a/src/cvcuda/priv/legacy/rotate.cu b/src/cvcuda/priv/legacy/rotate.cu index 90bb893fc..2360c36b0 100644 --- a/src/cvcuda/priv/legacy/rotate.cu +++ b/src/cvcuda/priv/legacy/rotate.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/rotate_var_shape.cu b/src/cvcuda/priv/legacy/rotate_var_shape.cu index 3ac80828a..75a2f4c95 100644 --- a/src/cvcuda/priv/legacy/rotate_var_shape.cu +++ b/src/cvcuda/priv/legacy/rotate_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/threshold.cu b/src/cvcuda/priv/legacy/threshold.cu index 443fa6c26..c002d0121 100644 --- a/src/cvcuda/priv/legacy/threshold.cu +++ b/src/cvcuda/priv/legacy/threshold.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/threshold_util.cu b/src/cvcuda/priv/legacy/threshold_util.cu index 6956d124c..cb50f560a 100644 --- a/src/cvcuda/priv/legacy/threshold_util.cu +++ b/src/cvcuda/priv/legacy/threshold_util.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/threshold_util.cuh b/src/cvcuda/priv/legacy/threshold_util.cuh index 374be7bf4..6e34739cc 100644 --- a/src/cvcuda/priv/legacy/threshold_util.cuh +++ b/src/cvcuda/priv/legacy/threshold_util.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/threshold_var_shape.cu b/src/cvcuda/priv/legacy/threshold_var_shape.cu index 737278876..e0c865101 100644 --- a/src/cvcuda/priv/legacy/threshold_var_shape.cu +++ b/src/cvcuda/priv/legacy/threshold_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/priv/legacy/warp.cu b/src/cvcuda/priv/legacy/warp.cu index 2c89e2c2b..b12a86cd8 100644 --- a/src/cvcuda/priv/legacy/warp.cu +++ b/src/cvcuda/priv/legacy/warp.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/cvcuda/priv/legacy/warp_var_shape.cu b/src/cvcuda/priv/legacy/warp_var_shape.cu index 58ecb0a26..cc8148a27 100644 --- a/src/cvcuda/priv/legacy/warp_var_shape.cu +++ b/src/cvcuda/priv/legacy/warp_var_shape.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 diff --git a/src/cvcuda/util/CMakeLists.txt b/src/cvcuda/util/CMakeLists.txt new file mode 100644 index 000000000..346ab73b4 --- /dev/null +++ b/src/cvcuda/util/CMakeLists.txt @@ -0,0 +1,29 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +find_package(CUDAToolkit REQUIRED) + +add_library(cvcuda_util STATIC + Event.cpp + Stream.cpp + StreamId.cpp +) + +target_link_libraries(cvcuda_util + PUBLIC + nvcv_util + CUDA::cudart_static + -lrt +) diff --git a/src/util/Event.cpp b/src/cvcuda/util/Event.cpp similarity index 93% rename from src/util/Event.cpp rename to src/cvcuda/util/Event.cpp index 65ca3123b..db5cbb82e 100644 --- a/src/util/Event.cpp +++ b/src/cvcuda/util/Event.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,8 @@ #include "Event.hpp" -#include "CheckError.hpp" - #include +#include namespace nvcv::util { diff --git a/src/util/Event.hpp b/src/cvcuda/util/Event.hpp similarity index 96% rename from src/util/Event.hpp rename to src/cvcuda/util/Event.hpp index 64e8adc5d..c9fc724b0 100644 --- a/src/util/Event.hpp +++ b/src/cvcuda/util/Event.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/PerStreamCache.hpp b/src/cvcuda/util/PerStreamCache.hpp similarity index 97% rename from src/util/PerStreamCache.hpp rename to src/cvcuda/util/PerStreamCache.hpp index ce7bf460b..3d4edf1a7 100644 --- a/src/util/PerStreamCache.hpp +++ b/src/cvcuda/util/PerStreamCache.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,11 +18,12 @@ #ifndef NVCV_UTIL_PER_STREAM_CACHE_HPP #define NVCV_UTIL_PER_STREAM_CACHE_HPP -#include "CheckError.hpp" #include "Event.hpp" #include "SimpleCache.hpp" #include "StreamId.hpp" +#include + #include #include #include diff --git a/src/util/PerStreamCacheImpl.hpp b/src/cvcuda/util/PerStreamCacheImpl.hpp similarity index 98% rename from src/util/PerStreamCacheImpl.hpp rename to src/cvcuda/util/PerStreamCacheImpl.hpp index 8a1525629..dd4ab194f 100644 --- a/src/util/PerStreamCacheImpl.hpp +++ b/src/cvcuda/util/PerStreamCacheImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/SimpleCache.hpp b/src/cvcuda/util/SimpleCache.hpp similarity index 97% rename from src/util/SimpleCache.hpp rename to src/cvcuda/util/SimpleCache.hpp index f9091c5af..4434033dc 100644 --- a/src/util/SimpleCache.hpp +++ b/src/cvcuda/util/SimpleCache.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/Stream.cpp b/src/cvcuda/util/Stream.cpp similarity index 95% rename from src/util/Stream.cpp rename to src/cvcuda/util/Stream.cpp index a2aaab123..76338d33f 100644 --- a/src/util/Stream.cpp +++ b/src/cvcuda/util/Stream.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +17,9 @@ #include "Stream.hpp" -#include "CheckError.hpp" - #include #include +#include namespace nvcv::util { diff --git a/src/util/Stream.hpp b/src/cvcuda/util/Stream.hpp similarity index 96% rename from src/util/Stream.hpp rename to src/cvcuda/util/Stream.hpp index 44399a3b2..5bfd3f748 100644 --- a/src/util/Stream.hpp +++ b/src/cvcuda/util/Stream.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/StreamId.cpp b/src/cvcuda/util/StreamId.cpp similarity index 97% rename from src/util/StreamId.cpp rename to src/cvcuda/util/StreamId.cpp index 27fc0f7e8..baff3f54c 100644 --- a/src/util/StreamId.cpp +++ b/src/cvcuda/util/StreamId.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/StreamId.hpp b/src/cvcuda/util/StreamId.hpp similarity index 93% rename from src/util/StreamId.hpp rename to src/cvcuda/util/StreamId.hpp index dbed99d9e..0f014dafc 100644 --- a/src/util/StreamId.hpp +++ b/src/cvcuda/util/StreamId.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/UniqueHandle.hpp b/src/cvcuda/util/UniqueHandle.hpp similarity index 98% rename from src/util/UniqueHandle.hpp rename to src/cvcuda/util/UniqueHandle.hpp index dcfb4b105..5a2392007 100644 --- a/src/util/UniqueHandle.hpp +++ b/src/cvcuda/util/UniqueHandle.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv/CMakeLists.txt b/src/nvcv/CMakeLists.txt new file mode 100644 index 000000000..2f964dacd --- /dev/null +++ b/src/nvcv/CMakeLists.txt @@ -0,0 +1,40 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.20.1) + +project(nvcv + LANGUAGES C CXX + VERSION 0.10.0 + DESCRIPTION "NVCV is NVIDIA Computer Vision library" +) + +# Used when creating special builds +set(PROJECT_VERSION_SUFFIX "-beta") + +option(NVCV_ENABLE_INSTALL "Enables creation of NVCV installers using cpack" ON) + +# Configure build tree ====================== + +list(PREPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") + +include(ConfigVersion) +include(ConfigBuildTree) + +# NVCV currently supports only shared build +set(CMAKE_POSITION_INDEPENDENT_CODE on) + +add_subdirectory(util) +add_subdirectory(src) diff --git a/src/nvcv/cmake/ConfigBuildTree.cmake b/src/nvcv/cmake/ConfigBuildTree.cmake new file mode 100644 index 000000000..ab8082ba2 --- /dev/null +++ b/src/nvcv/cmake/ConfigBuildTree.cmake @@ -0,0 +1,100 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set(CMAKE_DEBUG_POSTFIX "_d") + +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Release) +endif() + +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) + +include(GNUInstallDirs) + +set(CMAKE_INSTALL_LIBDIR "lib/${CMAKE_LIBRARY_ARCHITECTURE}") +set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}) + +# Executables try to find libnvvpi library relative to themselves. +set(CMAKE_BUILD_RPATH_USE_ORIGIN true) + +# Whether assert dumps expose code +if(CMAKE_BUILD_TYPE STREQUAL "Release") + set(DEFAULT_EXPOSE_CODE OFF) +else() + set(DEFAULT_EXPOSE_CODE ON) +endif() + +if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") + set(PLATFORM_IS_ARM64 ON) +else() + set(PLATFORM_IS_ARM64 OFF) +endif() + +include(CMakeDependentOption) + +option(EXPOSE_CODE "Expose in resulting binaries parts of our code" ${DEFAULT_EXPOSE_CODE}) +option(WARNINGS_AS_ERRORS "Treat compilation warnings as errors" OFF) +cmake_dependent_option(ENABLE_TEGRA "Enable tegra support" ON "PLATFORM_IS_ARM64" OFF) +cmake_dependent_option(ENABLE_COMPAT_OLD_GLIBC "Generates binaries that work with old distros, with old glibc" ON "NOT ENABLE_TEGRA" OFF) + +# Needed to get cuda version +find_package(CUDAToolkit REQUIRED) + +# Are we inside a git repo and it has submodules enabled? +if(EXISTS ${CMAKE_SOURCE_DIR}/.git AND EXISTS ${CMAKE_SOURCE_DIR}/.gitmodules) + if(NOT EXISTS ${CMAKE_SOURCE_DIR}/.git/modules) + message(FATAL_ERROR "git submodules not initialized. Did you forget to run 'git submodule update --init'?") + endif() +endif() + +if(UNIX) + set(NVCV_SYSTEM_NAME "${CMAKE_SYSTEM_PROCESSOR}-linux") +else() + message(FATAL_ERROR "Architecture not supported") +endif() + +set(NVCV_BUILD_SUFFIX "cuda${CUDAToolkit_VERSION_MAJOR}-${NVCV_SYSTEM_NAME}") + +function(setup_dso target version) + string(REGEX MATCHALL "[0-9]+" version_list "${version}") + list(GET version_list 0 VERSION_MAJOR) + list(GET version_list 1 VERSION_MINOR) + list(GET version_list 2 VERSION_PATCH) + + set_target_properties(${target} PROPERTIES + VERSION "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}" + SOVERSION "${VERSION_MAJOR}" + ) + + # Reduce executable size ========================== + + # Configure the library linker to remove unused code + target_link_options(${target} PRIVATE -Wl,--exclude-libs,ALL -Wl,--no-undefined -Wl,--gc-sections -Wl,--as-needed) + # Put each function and it's data into separate linker sections + target_compile_options(${target} PRIVATE -ffunction-sections -fdata-sections) + + # Link with static C/C++ libs ========================== + target_link_libraries(${target} PRIVATE + -static-libstdc++ + -static-libgcc + ) + + # Configure symbol visibility --------------------------------------------- + set_target_properties(${target} PROPERTIES VISIBILITY_INLINES_HIDDEN on + C_VISIBILITY_PRESET hidden + CXX_VISIBILITY_PRESET hidden + CUDA_VISIBILITY_PRESET hidden) +endfunction() diff --git a/src/nvcv/cmake/ConfigVersion.cmake b/src/nvcv/cmake/ConfigVersion.cmake new file mode 100644 index 000000000..9392261b8 --- /dev/null +++ b/src/nvcv/cmake/ConfigVersion.cmake @@ -0,0 +1,109 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# We must run the following at "include" time, not at function call time, +# to find the path to this module rather than the path to a calling list file +get_filename_component(config_version_script_path ${CMAKE_CURRENT_LIST_FILE} PATH) + +set(PROJECT_VERSION "${PROJECT_VERSION}${PROJECT_VERSION_SUFFIX}") + +function(configure_version target LIBPREFIX incpath VERSION_FULL) + string(TOUPPER "${target}" TARGET) + + string(REGEX MATCH "-(.*)$" version_suffix "${VERSION_FULL}") + set(VERSION_SUFFIX ${CMAKE_MATCH_1}) + + string(REGEX MATCHALL "[0-9]+" version_list "${VERSION_FULL}") + list(GET version_list 0 VERSION_MAJOR) + list(GET version_list 1 VERSION_MINOR) + list(GET version_list 2 VERSION_PATCH) + + list(LENGTH version_list num_version_components) + + if(num_version_components EQUAL 3) + set(VERSION_TWEAK 0) + elseif(num_version_components EQUAL 4) + list(GET version_list 3 VERSION_TWEAK) + else() + message(FATAL_ERROR "Version must have either 3 or 4 components") + endif() + + math(EXPR VERSION_API_CODE "${VERSION_MAJOR}*100 + ${VERSION_MINOR}") + + string(REPLACE "-" "_" tmp ${VERSION_FULL}) + set(VERSION_BUILD "${tmp}-${NVCV_BUILD_SUFFIX}") + + configure_file(${config_version_script_path}/VersionDef.h.in include/${incpath}/VersionDef.h @ONLY ESCAPE_QUOTES) + configure_file(${config_version_script_path}/VersionUtils.h.in include/${incpath}/detail/VersionUtils.h @ONLY ESCAPE_QUOTES) + + set(${LIBPREFIX}_VERSION_FULL ${VERSION_FULL} CACHE INTERNAL "${TARGET} full version") + set(${LIBPREFIX}_VERSION_MAJOR ${VERSION_MAJOR} CACHE INTERNAL "${TARGET} major version") + set(${LIBPREFIX}_VERSION_MINOR ${VERSION_MINOR} CACHE INTERNAL "${TARGET} minor version") + set(${LIBPREFIX}_VERSION_PATCH ${VERSION_PATCH} CACHE INTERNAL "${TARGET} patch version") + set(${LIBPREFIX}_VERSION_TWEAK ${VERSION_TWEAK} CACHE INTERNAL "${TARGET} tweak version") + set(${LIBPREFIX}_VERSION_SUFFIX ${VERSION_SUFFIX} CACHE INTERNAL "${TARGET} version suffix") + set(${LIBPREFIX}_VERSION_API ${VERSION_MAJOR}.${VERSION_MINOR} CACHE INTERNAL "${TARGET} API version") + set(${LIBPREFIX}_VERSION_API_CODE ${VERSION_API_CODE} CACHE INTERNAL "${TARGET} API code") + set(${LIBPREFIX}_VERSION_BUILD ${VERSION_BUILD} CACHE INTERNAL "${TARGET} build version") + + # So that the generated headers are found + target_include_directories(${target} + PUBLIC + $ + ) + + if(NVCV_ENABLE_INSTALL) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/include/${incpath}/VersionDef.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${incpath} + COMPONENT dev) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/include/${incpath}/detail/VersionUtils.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${incpath}/detail + COMPONENT dev) + endif() +endfunction() + +function(configure_symbol_versioning dso_target VERPREFIX input_targets) + # Create exports file for symbol versioning --------------------------------- + set(EXPORTS_OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/exports.ldscript") + target_link_libraries(${dso_target} + PRIVATE + -Wl,--version-script ${EXPORTS_OUTPUT} + ) + set(ALL_SOURCES "") + foreach(tgt ${input_targets}) + get_target_property(tgt_sources ${tgt} SOURCES) + get_target_property(tgt_srcdir ${tgt} SOURCE_DIR) + + foreach(src ${tgt_sources}) + if(${src} MATCHES "^/") # absolute paths? + list(APPEND ALL_SOURCES ${src}) + else() + list(APPEND ALL_SOURCES ${tgt_srcdir}/${src}) + endif() + endforeach() + endforeach() + + set(GEN_EXPORTS_SCRIPT "${config_version_script_path}/CreateExportsFile.cmake") + + add_custom_command(OUTPUT ${EXPORTS_OUTPUT} + COMMAND ${CMAKE_COMMAND} -DSOURCES="${ALL_SOURCES}" + -DVERPREFIX=${VERPREFIX} + -DOUTPUT=${EXPORTS_OUTPUT} + -P "${GEN_EXPORTS_SCRIPT}" + DEPENDS ${GEN_EXPORTS_SCRIPT} ${ALL_SOURCES}) + + add_custom_target(create_${dso_target}_exports_file DEPENDS ${EXPORTS_OUTPUT}) + add_dependencies(${dso_target} create_${dso_target}_exports_file) +endfunction() diff --git a/src/nvcv/cmake/CreateExportsFile.cmake b/src/nvcv/cmake/CreateExportsFile.cmake new file mode 100644 index 000000000..50b4b1a3d --- /dev/null +++ b/src/nvcv/cmake/CreateExportsFile.cmake @@ -0,0 +1,86 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if(NOT OUTPUT) + message(FATAL_ERROR "No output exports file specified") +endif() + +if(NOT SOURCES) + message(FATAL_ERROR "No source files specified") +endif() + +if(NOT VERPREFIX) + message(FATAL_ERROR "No version prefix specified") +endif() + +string(REPLACE " " ";" SOURCES ${SOURCES}) + +# Create an empty file +file(WRITE ${OUTPUT} "") + +set(all_versions "") + +foreach(src ${SOURCES}) + file(STRINGS ${src} funcdef_list REGEX "_DEFINE_API.*") + + foreach(func_def ${funcdef_list}) + if(func_def MATCHES "^[A-Z_]+_DEFINE_API\\(+([^,]+),([^,]+),[^,]+,([^,]+).*$") + string(STRIP "${CMAKE_MATCH_1}" ver_major) + string(STRIP "${CMAKE_MATCH_2}" ver_minor) + string(STRIP "${CMAKE_MATCH_3}" func) + list(APPEND all_versions ${ver_major}.${ver_minor}) + list(APPEND funcs_${ver_major}_${ver_minor} ${func}) + else() + message(FATAL_ERROR "I don't understand ${func_def}") + endif() + endforeach() +endforeach() + +list(SORT all_versions COMPARE NATURAL) +list(REMOVE_DUPLICATES all_versions) + +if(all_versions) + set(prev_version "") + foreach(ver ${all_versions}) + if(ver MATCHES "([0-9]+)\\.([0-9]+)") + set(ver_major ${CMAKE_MATCH_1}) + set(ver_minor ${CMAKE_MATCH_2}) + + file(APPEND ${OUTPUT} "${VERPREFIX}_${ver} {\nglobal:\n") + + if(NOT funcs_${ver_major}_${ver_minor}) + message(FATAL_ERROR "funcs_${ver_major}_${ver_minor} must not be empty") + endif() + + list(SORT funcs_${ver_major}_${ver_minor}) + + foreach(func ${funcs_${ver_major}_${ver_minor}}) + file(APPEND ${OUTPUT} " ${func};\n") + endforeach() + + if(prev_version) + file(APPEND ${OUTPUT} "} ${VERPREFIX}_${prev_version};\n\n") + else() + file(APPEND ${OUTPUT} "local: *;\n};\n\n") + endif() + + set(prev_version ${ver}) + else() + message(FATAL_ERROR "I don't version ${ver}") + endif() + endforeach() +else() + file(APPEND ${OUTPUT} "${VERPREFIX} {\nlocal: *;\n};\n") +endif() diff --git a/src/nvcv/cmake/VersionDef.h.in b/src/nvcv/cmake/VersionDef.h.in new file mode 100644 index 000000000..0c8eb76b4 --- /dev/null +++ b/src/nvcv/cmake/VersionDef.h.in @@ -0,0 +1,200 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file VersionDef.h + * + * Functions and structures for handling @LIBPREFIX@ library version. + */ + +#ifndef @LIBPREFIX@_VERSIONDEF_H +#define @LIBPREFIX@_VERSIONDEF_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Declarations of entities to handle @LIBPREFIX@ versioning. + * + * These utilities allow querying the @LIBPREFIX@ header and library versions and + * properly handle @LIBPREFIX@ forward- or backward-compatibility . + * + * @defgroup @LIBPREFIX@_CPP_UTIL_VERSION Versioning + * @{ + */ + +/** Make a @LIBPREFIX@ version identifier with four components. + * @param[in] major,minor,patch,tweak Version components to be converted to a number. + * @returns The numeric version representation. + */ +#define @LIBPREFIX@_MAKE_VERSION4(major, minor, patch, tweak) \ + ((uint32_t)((major)*1000000 + (minor)*10000 + (patch)*100 + (tweak))) + +/** Make a @LIBPREFIX@ version identifier with three components. + * + * The tweak version component is considered to be 0. + * + * @param[in] major,minor,patch Version components to be converted to a number. + * @returns The numeric version representation. + */ +#define @LIBPREFIX@_MAKE_VERSION3(major, minor, patch) \ + @LIBPREFIX@_MAKE_VERSION4(major, minor, patch, 0) + +/** Make a @LIBPREFIX@ version identifier with two components. + * + * The patch and tweak version components are considered to be 0. + * + * @param[in] major,minor Version components to be converted to a number. + * @returns The numeric version representation. + */ +#define @LIBPREFIX@_MAKE_VERSION2(major, minor) \ + @LIBPREFIX@_MAKE_VERSION4(major, minor, 0, 0) + +/** Make a @LIBPREFIX@ version identifier with one component. + * + * The minor, patch and tweak version components are considered to be 0. + * + * @param[in] major Major version component to be converted to a number. + * @returns The numeric version representation. + */ +#define @LIBPREFIX@_MAKE_VERSION1(major) \ + @LIBPREFIX@_MAKE_VERSION4(major, 0, 0, 0) + +/** Assemble an integer version from its components. + * This makes it easy to conditionally compile code for different @LIBPREFIX@ versions, e.g: + * \code + * #if @LIBPREFIX@_VERSION < @LIBPREFIX@_MAKE_VERSION(1,0,0) + * // code that runs on versions prior 1.0.0 + * #else + * // code that runs on versions after that, including 1.0.0 + * #endif + * \endcode + * + * @param[in] major Major version component, mandatory. + * @param[in] minor Minor version component. If ommitted, it's considered to be 0. + * @param[in] patch Patch version component. If ommitted, it's considered to be 0. + * @param[in] tweak Tweak version component. If ommitted, it's considered to be 0. + * @returns The numeric version representation. + */ +#if NVCV_DOXYGEN +# define @LIBPREFIX@_MAKE_VERSION(major,minor,patch,tweak) +#else +#define @LIBPREFIX@_DETAIL_GET_MACRO(_1,_2,_3,_4,NAME,...) NAME +#define @LIBPREFIX@_MAKE_VERSION(...) \ + @LIBPREFIX@_DETAIL_GET_MACRO(__VA_ARGS__, @LIBPREFIX@_MAKE_VERSION4, @LIBPREFIX@_MAKE_VERSION3, @LIBPREFIX@_MAKE_VERSION2, @LIBPREFIX@_MAKE_VERSION1)(__VA_ARGS__) +#endif + +/** Major version number component. + * This is incremented every time there's a incompatible ABI change. + * In the special case of major version 0, compatibility between minor versions + * is not guaranteed. + */ +#define @LIBPREFIX@_VERSION_MAJOR @VERSION_MAJOR@ + +/** Minor version number component. + * This is incremented every time there's a new feature added to @LIBPREFIX@ that + * doesn't break backward compatibility. This number is reset to zero when + * major version changes. + */ +#define @LIBPREFIX@_VERSION_MINOR @VERSION_MINOR@ + +/** Patch version number component. + * This is incremented every time a bug is fixed, but no new functionality is added + * to the library. This number is reset to zero when minor version changes. + */ +#define @LIBPREFIX@_VERSION_PATCH @VERSION_PATCH@ + +/** Tweak version number component. + * Incremented for packaging or documentation updates, etc. The library itself isn't updated. + * Gets reset to zero when patch version changes. + */ +#define @LIBPREFIX@_VERSION_TWEAK @VERSION_TWEAK@ + +/** Version suffix. + * String appended to version number to designate special builds. + */ +#define @LIBPREFIX@_VERSION_SUFFIX "@VERSION_SUFFIX@" + +/** @LIBPREFIX@ library version. + * It's an integer value computed from `MAJOR*1000000 + MINOR*10000 + PATCH*100 + TWEAK`. + * Integer versions can be compared, recent versions are greater than older ones. + */ +#define @LIBPREFIX@_VERSION @LIBPREFIX@_MAKE_VERSION(@LIBPREFIX@_VERSION_MAJOR, @LIBPREFIX@_VERSION_MINOR, @LIBPREFIX@_VERSION_PATCH, @LIBPREFIX@_VERSION_TWEAK) + +/** @LIBPREFIX@ library version number represented as a string. */ +#define @LIBPREFIX@_VERSION_STRING "@VERSION_FULL@" + +/** Selected API version to use. + * This macro selects which of the supported APIs the code will use. + * + * By default this equals to the highest supported API, corresponding to the current major and + * minor versions of the library. + * + * User can override the version by defining this macro before including @LIBPREFIX@ headers. + */ +#if NVCV_DOXYGEN +# define @LIBPREFIX@_VERSION_API +#else +#ifdef @LIBPREFIX@_VERSION_API +# if @LIBPREFIX@_VERSION_API < @LIBPREFIX@_MAKE_VERSION(@LIBPREFIX@_VERSION_MAJOR) || \ + @LIBPREFIX@_VERSION_API > @LIBPREFIX@_MAKE_VERSION(@LIBPREFIX@_VERSION_MAJOR, @LIBPREFIX@_VERSION_MINOR) +# error Selected @LIBPREFIX@ API version not supported. +# endif +#else +# define @LIBPREFIX@_VERSION_API @LIBPREFIX@_MAKE_VERSION(@LIBPREFIX@_VERSION_MAJOR, @LIBPREFIX@_VERSION_MINOR) +#endif +#endif + +/** Conditionally enable code when selected API version is exactly given version. + * + * @param[in] major,minor API version that will be considered. + */ +#define @LIBPREFIX@_VERSION_API_IS(major,minor) \ + (@LIBPREFIX@_MAKE_VERSION(major,minor) == @LIBPREFIX@_VERSION_API) + +/** Conditionally enable code when selected API version is at least given version. + * + * @param[in] major,minor Minimum API version that will be considered. + */ +#define @LIBPREFIX@_VERSION_API_AT_LEAST(major,minor) \ + (@LIBPREFIX@_MAKE_VERSION(major,minor) <= @LIBPREFIX@_VERSION_API) + +/** Conditionally enable code when selected API version is at most given version. + * + * @param[in] major,minor Maximum API version that will be considered. + */ +#define @LIBPREFIX@_VERSION_API_AT_MOST(major,minor) \ + (@LIBPREFIX@_MAKE_VERSION(major,minor) >= @LIBPREFIX@_VERSION_API) + +/** Conditionally enable code when selected API version is between two versions. + * + * @param[in] min_major,min_minor Minimum API version that will be considered. + * @param[in] max_major,max_minor Maximum API version that will be considered. + */ +#define @LIBPREFIX@_VERSION_API_IN_RANGE(min_major,min_minor,max_major,max_minor) \ + (@LIBPREFIX@_VERSION_API_AT_LEAST(min_major, min_minor) && @LIBPREFIX@_VERSION_API_AT_MOST(max_major, max_minor)) + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif // @LIBPREFIX@_VERSION_H diff --git a/src/nvcv/cmake/VersionUtils.h.in b/src/nvcv/cmake/VersionUtils.h.in new file mode 100644 index 000000000..23cd00152 --- /dev/null +++ b/src/nvcv/cmake/VersionUtils.h.in @@ -0,0 +1,24 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef @LIBPREFIX@_DETAIL_VERSIONUTILS_H +#define @LIBPREFIX@_DETAIL_VERSIONUTILS_H + +/** For internal use only. */ +#define @LIBPREFIX@_COMMIT "@REPO_COMMIT@" + +#endif // @LIBPREFIX@_DETAIL_VERSIONUTILS_H diff --git a/src/nvcv_types/Allocator.cpp b/src/nvcv/src/Allocator.cpp similarity index 98% rename from src/nvcv_types/Allocator.cpp rename to src/nvcv/src/Allocator.cpp index 223eaf455..fdc4f12ed 100644 --- a/src/nvcv_types/Allocator.cpp +++ b/src/nvcv/src/Allocator.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,8 +24,8 @@ #include "priv/TLS.hpp" #include -#include -#include +#include +#include #include diff --git a/src/nvcv_types/Array.cpp b/src/nvcv/src/Array.cpp similarity index 99% rename from src/nvcv_types/Array.cpp rename to src/nvcv/src/Array.cpp index be7d98a27..f316d3667 100644 --- a/src/nvcv_types/Array.cpp +++ b/src/nvcv/src/Array.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/CMakeLists.txt b/src/nvcv/src/CMakeLists.txt similarity index 62% rename from src/nvcv_types/CMakeLists.txt rename to src/nvcv/src/CMakeLists.txt index 2d5617046..5253b21d3 100644 --- a/src/nvcv_types/CMakeLists.txt +++ b/src/nvcv/src/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -60,43 +60,45 @@ target_include_directories(nvcv_types_headers INTERFACE ${CMAKE_CURRENT_SOURCE_D # Installer -install(TARGETS nvcv_types - EXPORT nvcv_types - COMPONENT lib - LIBRARY NAMELINK_COMPONENT dev) +if(NVCV_ENABLE_INSTALL) + install(TARGETS nvcv_types + EXPORT nvcv_types + COMPONENT lib + LIBRARY NAMELINK_COMPONENT dev) -install(DIRECTORY include/nvcv - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} - COMPONENT dev - FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp" PATTERN "*.inc") + install(DIRECTORY include/nvcv + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + COMPONENT dev + FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp" PATTERN "*.inc") -include(CMakePackageConfigHelpers) -write_basic_package_version_file(nvcv_types-config-version.cmake - COMPATIBILITY SameMajorVersion) + include(CMakePackageConfigHelpers) + write_basic_package_version_file(nvcv_types-config-version.cmake + COMPATIBILITY SameMajorVersion) -if(UNIX) - install(EXPORT nvcv_types - DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/nvcv_types" - FILE nvcv_types-config.cmake - COMPONENT dev) + if(UNIX) + install(EXPORT nvcv_types + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/nvcv_types" + FILE nvcv_types-config.cmake + COMPONENT dev) # WAR for https://gitlab.kitware.com/cmake/cmake/-/issues/23563 - install(CODE "set(CVCUDA_CONFIG_PATH \"\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}/cmake/nvcv_types/nvcv_types-config.cmake\")" + install(CODE "set(NVCV_CONFIG_PATH \"\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}/cmake/nvcv_types/nvcv_types-config.cmake\")" COMPONENT dev) install(CODE [=[ - file(READ "${CVCUDA_CONFIG_PATH}" contents) + file(READ "${NVCV_CONFIG_PATH}" contents) string(REPLACE "get_filename_component(_IMPORT_PREFIX \"\${CMAKE_CURRENT_LIST_FILE}\" PATH)" [[ get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" REALPATH) ]] contents "${contents}") - file(WRITE "${CVCUDA_CONFIG_PATH}" "${contents}") + file(WRITE "${NVCV_CONFIG_PATH}" "${contents}") ]=] COMPONENT dev) - install(FILES ${CMAKE_CURRENT_BINARY_DIR}/nvcv_types-config-version.cmake - DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/nvcv_types" - COMPONENT dev) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/nvcv_types-config-version.cmake + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/nvcv_types" + COMPONENT dev) + endif() endif() diff --git a/src/nvcv_types/ColorSpec.cpp b/src/nvcv/src/ColorSpec.cpp similarity index 99% rename from src/nvcv_types/ColorSpec.cpp rename to src/nvcv/src/ColorSpec.cpp index 0c2810b1d..69f3e3929 100644 --- a/src/nvcv_types/ColorSpec.cpp +++ b/src/nvcv/src/ColorSpec.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +23,7 @@ #include "priv/TLS.hpp" #include -#include +#include #include diff --git a/src/nvcv_types/Config.cpp b/src/nvcv/src/Config.cpp similarity index 97% rename from src/nvcv_types/Config.cpp rename to src/nvcv/src/Config.cpp index c9c4b0358..c8397bb3a 100644 --- a/src/nvcv_types/Config.cpp +++ b/src/nvcv/src/Config.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/DataLayout.cpp b/src/nvcv/src/DataLayout.cpp similarity index 98% rename from src/nvcv_types/DataLayout.cpp rename to src/nvcv/src/DataLayout.cpp index 06179f893..4c0982418 100644 --- a/src/nvcv_types/DataLayout.cpp +++ b/src/nvcv/src/DataLayout.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +23,7 @@ #include #include -#include +#include #include diff --git a/src/nvcv_types/DataType.cpp b/src/nvcv/src/DataType.cpp similarity index 97% rename from src/nvcv_types/DataType.cpp rename to src/nvcv/src/DataType.cpp index fd07843e6..eabe48960 100644 --- a/src/nvcv_types/DataType.cpp +++ b/src/nvcv/src/DataType.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,8 +23,8 @@ #include "priv/TLS.hpp" #include -#include -#include +#include +#include #include diff --git a/src/nvcv_types/Image.cpp b/src/nvcv/src/Image.cpp similarity index 99% rename from src/nvcv_types/Image.cpp rename to src/nvcv/src/Image.cpp index 24a106c15..3ec564c38 100644 --- a/src/nvcv_types/Image.cpp +++ b/src/nvcv/src/Image.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/ImageBatch.cpp b/src/nvcv/src/ImageBatch.cpp similarity index 99% rename from src/nvcv_types/ImageBatch.cpp rename to src/nvcv/src/ImageBatch.cpp index 0de96b234..0403a875a 100644 --- a/src/nvcv_types/ImageBatch.cpp +++ b/src/nvcv/src/ImageBatch.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/ImageFormat.cpp b/src/nvcv/src/ImageFormat.cpp similarity index 99% rename from src/nvcv_types/ImageFormat.cpp rename to src/nvcv/src/ImageFormat.cpp index 3344d616e..e0a7b45fa 100644 --- a/src/nvcv_types/ImageFormat.cpp +++ b/src/nvcv/src/ImageFormat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +23,7 @@ #include "priv/SymbolVersioning.hpp" #include "priv/TLS.hpp" -#include +#include #include diff --git a/src/nvcv_types/Requirements.cpp b/src/nvcv/src/Requirements.cpp similarity index 96% rename from src/nvcv_types/Requirements.cpp rename to src/nvcv/src/Requirements.cpp index f3a449f29..c1cf1558c 100644 --- a/src/nvcv_types/Requirements.cpp +++ b/src/nvcv/src/Requirements.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/Status.cpp b/src/nvcv/src/Status.cpp similarity index 94% rename from src/nvcv_types/Status.cpp rename to src/nvcv/src/Status.cpp index 6621a89b0..e462db456 100644 --- a/src/nvcv_types/Status.cpp +++ b/src/nvcv/src/Status.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "priv/SymbolVersioning.hpp" #include -#include +#include namespace priv = nvcv::priv; diff --git a/src/nvcv_types/Tensor.cpp b/src/nvcv/src/Tensor.cpp similarity index 99% rename from src/nvcv_types/Tensor.cpp rename to src/nvcv/src/Tensor.cpp index 146b4d3cd..b741ed4ee 100644 --- a/src/nvcv_types/Tensor.cpp +++ b/src/nvcv/src/Tensor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/TensorBatch.cpp b/src/nvcv/src/TensorBatch.cpp similarity index 99% rename from src/nvcv_types/TensorBatch.cpp rename to src/nvcv/src/TensorBatch.cpp index a8f608d0d..024f745b8 100644 --- a/src/nvcv_types/TensorBatch.cpp +++ b/src/nvcv/src/TensorBatch.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/TensorLayout.cpp b/src/nvcv/src/TensorLayout.cpp similarity index 96% rename from src/nvcv_types/TensorLayout.cpp rename to src/nvcv/src/TensorLayout.cpp index a75ba8c03..ea0144a4c 100644 --- a/src/nvcv_types/TensorLayout.cpp +++ b/src/nvcv/src/TensorLayout.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/TensorShape.cpp b/src/nvcv/src/TensorShape.cpp similarity index 90% rename from src/nvcv_types/TensorShape.cpp rename to src/nvcv/src/TensorShape.cpp index 28c8256b9..37e185edd 100644 --- a/src/nvcv_types/TensorShape.cpp +++ b/src/nvcv/src/TensorShape.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/Version.cpp b/src/nvcv/src/Version.cpp similarity index 87% rename from src/nvcv_types/Version.cpp rename to src/nvcv/src/Version.cpp index fc00f9543..17c0fff54 100644 --- a/src/nvcv_types/Version.cpp +++ b/src/nvcv/src/Version.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Array.h b/src/nvcv/src/include/nvcv/Array.h similarity index 99% rename from src/nvcv_types/include/nvcv/Array.h rename to src/nvcv/src/include/nvcv/Array.h index 077e6f095..b47f4d873 100644 --- a/src/nvcv_types/include/nvcv/Array.h +++ b/src/nvcv/src/include/nvcv/Array.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Array.hpp b/src/nvcv/src/include/nvcv/Array.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/Array.hpp rename to src/nvcv/src/include/nvcv/Array.hpp index 81b99675b..719f2f067 100644 --- a/src/nvcv_types/include/nvcv/Array.hpp +++ b/src/nvcv/src/include/nvcv/Array.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/ArrayData.h b/src/nvcv/src/include/nvcv/ArrayData.h similarity index 97% rename from src/nvcv_types/include/nvcv/ArrayData.h rename to src/nvcv/src/include/nvcv/ArrayData.h index f9caa4ef7..1c11c2b2b 100644 --- a/src/nvcv_types/include/nvcv/ArrayData.h +++ b/src/nvcv/src/include/nvcv/ArrayData.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/ArrayData.hpp b/src/nvcv/src/include/nvcv/ArrayData.hpp similarity index 96% rename from src/nvcv_types/include/nvcv/ArrayData.hpp rename to src/nvcv/src/include/nvcv/ArrayData.hpp index b20ea5eec..5a1494094 100644 --- a/src/nvcv_types/include/nvcv/ArrayData.hpp +++ b/src/nvcv/src/include/nvcv/ArrayData.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/ArrayDataAccess.hpp b/src/nvcv/src/include/nvcv/ArrayDataAccess.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/ArrayDataAccess.hpp rename to src/nvcv/src/include/nvcv/ArrayDataAccess.hpp index c21a0f14e..6450cbfba 100644 --- a/src/nvcv_types/include/nvcv/ArrayDataAccess.hpp +++ b/src/nvcv/src/include/nvcv/ArrayDataAccess.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/BorderType.h b/src/nvcv/src/include/nvcv/BorderType.h similarity index 94% rename from src/nvcv_types/include/nvcv/BorderType.h rename to src/nvcv/src/include/nvcv/BorderType.h index ce09409f4..deb181551 100644 --- a/src/nvcv_types/include/nvcv/BorderType.h +++ b/src/nvcv/src/include/nvcv/BorderType.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Casts.hpp b/src/nvcv/src/include/nvcv/Casts.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/Casts.hpp rename to src/nvcv/src/include/nvcv/Casts.hpp index c7ae80a10..ede4f1c7c 100644 --- a/src/nvcv_types/include/nvcv/Casts.hpp +++ b/src/nvcv/src/include/nvcv/Casts.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/ColorSpec.h b/src/nvcv/src/include/nvcv/ColorSpec.h similarity index 99% rename from src/nvcv_types/include/nvcv/ColorSpec.h rename to src/nvcv/src/include/nvcv/ColorSpec.h index 95a67d4c4..4ce49b034 100644 --- a/src/nvcv_types/include/nvcv/ColorSpec.h +++ b/src/nvcv/src/include/nvcv/ColorSpec.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/ColorSpec.hpp b/src/nvcv/src/include/nvcv/ColorSpec.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/ColorSpec.hpp rename to src/nvcv/src/include/nvcv/ColorSpec.hpp index c748cb618..08de090fa 100644 --- a/src/nvcv_types/include/nvcv/ColorSpec.hpp +++ b/src/nvcv/src/include/nvcv/ColorSpec.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Config.h b/src/nvcv/src/include/nvcv/Config.h similarity index 98% rename from src/nvcv_types/include/nvcv/Config.h rename to src/nvcv/src/include/nvcv/Config.h index f5d64d32b..da91ca17b 100644 --- a/src/nvcv_types/include/nvcv/Config.h +++ b/src/nvcv/src/include/nvcv/Config.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Config.hpp b/src/nvcv/src/include/nvcv/Config.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/Config.hpp rename to src/nvcv/src/include/nvcv/Config.hpp index 45ba7495e..b53f1609b 100644 --- a/src/nvcv_types/include/nvcv/Config.hpp +++ b/src/nvcv/src/include/nvcv/Config.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/CoreResource.hpp b/src/nvcv/src/include/nvcv/CoreResource.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/CoreResource.hpp rename to src/nvcv/src/include/nvcv/CoreResource.hpp index 55e2a9f3e..1c7d1ac6a 100644 --- a/src/nvcv_types/include/nvcv/CoreResource.hpp +++ b/src/nvcv/src/include/nvcv/CoreResource.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/DataLayout.h b/src/nvcv/src/include/nvcv/DataLayout.h similarity index 99% rename from src/nvcv_types/include/nvcv/DataLayout.h rename to src/nvcv/src/include/nvcv/DataLayout.h index ffeb626a6..c6951c0c7 100644 --- a/src/nvcv_types/include/nvcv/DataLayout.h +++ b/src/nvcv/src/include/nvcv/DataLayout.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/DataLayout.hpp b/src/nvcv/src/include/nvcv/DataLayout.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/DataLayout.hpp rename to src/nvcv/src/include/nvcv/DataLayout.hpp index 0fed0d75f..b7dcc48c1 100644 --- a/src/nvcv_types/include/nvcv/DataLayout.hpp +++ b/src/nvcv/src/include/nvcv/DataLayout.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/DataType.h b/src/nvcv/src/include/nvcv/DataType.h similarity index 99% rename from src/nvcv_types/include/nvcv/DataType.h rename to src/nvcv/src/include/nvcv/DataType.h index bc0c87847..189d6be5e 100644 --- a/src/nvcv_types/include/nvcv/DataType.h +++ b/src/nvcv/src/include/nvcv/DataType.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -44,7 +44,7 @@ typedef struct * Data types defines the geometry of value elements, i.e., pixels in a image plane without taking into account what the value represents. * For example, a \ref NVCV_IMAGE_FORMAT_NV12 is composed of 2 planes, each one with the following data types: * + \ref NVCV_DATA_TYPE_U8 representing pixels as 8-bit unsigned values. - * + \ref NVCV_DATA_TYPE_2U8 representing pixels as two interleaved 32-bit floating-point values. + * + \ref NVCV_DATA_TYPE_2U8 representing pixels as two interleaved 8-bit unsigned values. * * @defgroup NVCV_C_CORE_DATATYPE Data types * @{ diff --git a/src/nvcv_types/include/nvcv/DataType.hpp b/src/nvcv/src/include/nvcv/DataType.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/DataType.hpp rename to src/nvcv/src/include/nvcv/DataType.hpp index a1dd76856..68d85ebe8 100644 --- a/src/nvcv_types/include/nvcv/DataType.hpp +++ b/src/nvcv/src/include/nvcv/DataType.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Exception.hpp b/src/nvcv/src/include/nvcv/Exception.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/Exception.hpp rename to src/nvcv/src/include/nvcv/Exception.hpp index 96a80dfe2..9f729ec76 100644 --- a/src/nvcv_types/include/nvcv/Exception.hpp +++ b/src/nvcv/src/include/nvcv/Exception.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Export.h b/src/nvcv/src/include/nvcv/Export.h similarity index 91% rename from src/nvcv_types/include/nvcv/Export.h rename to src/nvcv/src/include/nvcv/Export.h index 7ec01547e..611bcf4f7 100644 --- a/src/nvcv_types/include/nvcv/Export.h +++ b/src/nvcv/src/include/nvcv/Export.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Fwd.h b/src/nvcv/src/include/nvcv/Fwd.h similarity index 94% rename from src/nvcv_types/include/nvcv/Fwd.h rename to src/nvcv/src/include/nvcv/Fwd.h index c9702cb1f..8d7a350ca 100644 --- a/src/nvcv_types/include/nvcv/Fwd.h +++ b/src/nvcv/src/include/nvcv/Fwd.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Fwd.hpp b/src/nvcv/src/include/nvcv/Fwd.hpp similarity index 95% rename from src/nvcv_types/include/nvcv/Fwd.hpp rename to src/nvcv/src/include/nvcv/Fwd.hpp index bf8fa6908..47695fd8a 100644 --- a/src/nvcv_types/include/nvcv/Fwd.hpp +++ b/src/nvcv/src/include/nvcv/Fwd.hpp @@ -1,6 +1,6 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/HandleWrapper.hpp b/src/nvcv/src/include/nvcv/HandleWrapper.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/HandleWrapper.hpp rename to src/nvcv/src/include/nvcv/HandleWrapper.hpp index 67208d167..faf8dc2b5 100644 --- a/src/nvcv_types/include/nvcv/HandleWrapper.hpp +++ b/src/nvcv/src/include/nvcv/HandleWrapper.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Image.h b/src/nvcv/src/include/nvcv/Image.h similarity index 99% rename from src/nvcv_types/include/nvcv/Image.h rename to src/nvcv/src/include/nvcv/Image.h index b2b3c47a8..92a7f3783 100644 --- a/src/nvcv_types/include/nvcv/Image.h +++ b/src/nvcv/src/include/nvcv/Image.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Image.hpp b/src/nvcv/src/include/nvcv/Image.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/Image.hpp rename to src/nvcv/src/include/nvcv/Image.hpp index 29b17db75..65d67fcab 100644 --- a/src/nvcv_types/include/nvcv/Image.hpp +++ b/src/nvcv/src/include/nvcv/Image.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -125,7 +125,7 @@ class Image : public CoreResource }; // ImageWrapData definition ------------------------------------- -// Image that wraps an image data allocated outside cv-cuda +// Image that wraps an image data allocated outside NVCV using ImageDataCleanupFunc = void(const ImageData &); diff --git a/src/nvcv_types/include/nvcv/ImageBatch.h b/src/nvcv/src/include/nvcv/ImageBatch.h similarity index 99% rename from src/nvcv_types/include/nvcv/ImageBatch.h rename to src/nvcv/src/include/nvcv/ImageBatch.h index 3673b25be..14d655c13 100644 --- a/src/nvcv_types/include/nvcv/ImageBatch.h +++ b/src/nvcv/src/include/nvcv/ImageBatch.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/ImageBatch.hpp b/src/nvcv/src/include/nvcv/ImageBatch.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/ImageBatch.hpp rename to src/nvcv/src/include/nvcv/ImageBatch.hpp index 55904891a..6d4e2c03f 100644 --- a/src/nvcv_types/include/nvcv/ImageBatch.hpp +++ b/src/nvcv/src/include/nvcv/ImageBatch.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/ImageBatchData.h b/src/nvcv/src/include/nvcv/ImageBatchData.h similarity index 98% rename from src/nvcv_types/include/nvcv/ImageBatchData.h rename to src/nvcv/src/include/nvcv/ImageBatchData.h index 20245a470..6ac31108d 100644 --- a/src/nvcv_types/include/nvcv/ImageBatchData.h +++ b/src/nvcv/src/include/nvcv/ImageBatchData.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/ImageBatchData.hpp b/src/nvcv/src/include/nvcv/ImageBatchData.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/ImageBatchData.hpp rename to src/nvcv/src/include/nvcv/ImageBatchData.hpp index c1bdd19fc..9ad06bfeb 100644 --- a/src/nvcv_types/include/nvcv/ImageBatchData.hpp +++ b/src/nvcv/src/include/nvcv/ImageBatchData.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/ImageData.h b/src/nvcv/src/include/nvcv/ImageData.h similarity index 98% rename from src/nvcv_types/include/nvcv/ImageData.h rename to src/nvcv/src/include/nvcv/ImageData.h index c34fb226b..5d06fb196 100644 --- a/src/nvcv_types/include/nvcv/ImageData.h +++ b/src/nvcv/src/include/nvcv/ImageData.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/ImageData.hpp b/src/nvcv/src/include/nvcv/ImageData.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/ImageData.hpp rename to src/nvcv/src/include/nvcv/ImageData.hpp index c7938bee3..0a5e9670c 100644 --- a/src/nvcv_types/include/nvcv/ImageData.hpp +++ b/src/nvcv/src/include/nvcv/ImageData.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/ImageFormat.h b/src/nvcv/src/include/nvcv/ImageFormat.h similarity index 99% rename from src/nvcv_types/include/nvcv/ImageFormat.h rename to src/nvcv/src/include/nvcv/ImageFormat.h index bd33a38ef..acb058b0a 100644 --- a/src/nvcv_types/include/nvcv/ImageFormat.h +++ b/src/nvcv/src/include/nvcv/ImageFormat.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/ImageFormat.hpp b/src/nvcv/src/include/nvcv/ImageFormat.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/ImageFormat.hpp rename to src/nvcv/src/include/nvcv/ImageFormat.hpp index 5f39f273f..a0fbc114d 100644 --- a/src/nvcv_types/include/nvcv/ImageFormat.hpp +++ b/src/nvcv/src/include/nvcv/ImageFormat.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Optional.hpp b/src/nvcv/src/include/nvcv/Optional.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/Optional.hpp rename to src/nvcv/src/include/nvcv/Optional.hpp index 475f9c2e4..5017fa552 100644 --- a/src/nvcv_types/include/nvcv/Optional.hpp +++ b/src/nvcv/src/include/nvcv/Optional.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Rect.h b/src/nvcv/src/include/nvcv/Rect.h similarity index 90% rename from src/nvcv_types/include/nvcv/Rect.h rename to src/nvcv/src/include/nvcv/Rect.h index 33fa4c1c9..69bcc1066 100644 --- a/src/nvcv_types/include/nvcv/Rect.h +++ b/src/nvcv/src/include/nvcv/Rect.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Shape.hpp b/src/nvcv/src/include/nvcv/Shape.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/Shape.hpp rename to src/nvcv/src/include/nvcv/Shape.hpp index c07898f92..f8daf8369 100644 --- a/src/nvcv_types/include/nvcv/Shape.hpp +++ b/src/nvcv/src/include/nvcv/Shape.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Size.h b/src/nvcv/src/include/nvcv/Size.h similarity index 89% rename from src/nvcv_types/include/nvcv/Size.h rename to src/nvcv/src/include/nvcv/Size.h index fe6db0062..4c3e5d551 100644 --- a/src/nvcv_types/include/nvcv/Size.h +++ b/src/nvcv/src/include/nvcv/Size.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Size.hpp b/src/nvcv/src/include/nvcv/Size.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/Size.hpp rename to src/nvcv/src/include/nvcv/Size.hpp index cce55e048..f857d59b9 100644 --- a/src/nvcv_types/include/nvcv/Size.hpp +++ b/src/nvcv/src/include/nvcv/Size.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Status.h b/src/nvcv/src/include/nvcv/Status.h similarity index 87% rename from src/nvcv_types/include/nvcv/Status.h rename to src/nvcv/src/include/nvcv/Status.h index 8971efdc5..6adb15547 100644 --- a/src/nvcv_types/include/nvcv/Status.h +++ b/src/nvcv/src/include/nvcv/Status.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -82,17 +82,17 @@ typedef enum NVCV_PUBLIC const char *nvcvStatusGetName(NVCVStatus code); /** - * @brief Returns and resets the error status of the last CV-CUDA function call that failed in current thread. + * @brief Returns and resets the error status of the last NVCV function call that failed in current thread. * * A new call to this function will return \ref NVCV_SUCCESS, as the thread-specific * status was reset. This operation doesn't affect the statuses in other threads. * - * @returns The status of the last CV-CUDA function call that failed in current thread. + * @returns The status of the last NVCV function call that failed in current thread. */ NVCV_PUBLIC NVCVStatus nvcvGetLastError(); /** - * @brief Returns and resets the error status code and message of the last CV-CUDA function call that failed in current thread. + * @brief Returns and resets the error status code and message of the last NVCV function call that failed in current thread. * * A new call to this function will return \ref NVCV_SUCCESS, as the thread-specific * status was reset. This operation doesn't affect the status in other threads. @@ -106,21 +106,21 @@ NVCV_PUBLIC NVCVStatus nvcvGetLastError(); * @param[in] lenBuffer Size in bytes of msgBuffer. * + If less than zero, \p lenBuffer is assumed to be 0. * - * @returns The status of the last CV-CUDA function call that failed in current thread. + * @returns The status of the last NVCV function call that failed in current thread. */ NVCV_PUBLIC NVCVStatus nvcvGetLastErrorMessage(char *msgBuffer, int32_t lenBuffer); /** - * @brief Returns the error status of the last CV-CUDA function call that failed in current thread. + * @brief Returns the error status of the last NVCV function call that failed in current thread. * * The internal status code and message of current thread won't be reset. * - * @returns The status of the last CV-CUDA function call that failed in current thread. + * @returns The status of the last NVCV function call that failed in current thread. */ NVCV_PUBLIC NVCVStatus nvcvPeekAtLastError(); /** - * @brief Returns the status code and message of the last CV-CUDA function call that failed in current thread. + * @brief Returns the status code and message of the last NVCV function call that failed in current thread. * * The internal status code and message of current thread won't be reset. * @@ -133,7 +133,7 @@ NVCV_PUBLIC NVCVStatus nvcvPeekAtLastError(); * @param[in] lenBuffer Size in bytes of msgBuffer. * + If less than zero, lenBuffer is assumed to be 0. * - * @returns The status of the last CV-CUDA function call that failed in current thread. + * @returns The status of the last NVCV function call that failed in current thread. */ NVCV_PUBLIC NVCVStatus nvcvPeekAtLastErrorMessage(char *msgBuffer, int32_t lenBuffer); diff --git a/src/nvcv_types/include/nvcv/Status.hpp b/src/nvcv/src/include/nvcv/Status.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/Status.hpp rename to src/nvcv/src/include/nvcv/Status.hpp index cdac456ae..f681ff29f 100644 --- a/src/nvcv_types/include/nvcv/Status.hpp +++ b/src/nvcv/src/include/nvcv/Status.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Tensor.h b/src/nvcv/src/include/nvcv/Tensor.h similarity index 99% rename from src/nvcv_types/include/nvcv/Tensor.h rename to src/nvcv/src/include/nvcv/Tensor.h index fef89e670..7ae1b4b46 100644 --- a/src/nvcv_types/include/nvcv/Tensor.h +++ b/src/nvcv/src/include/nvcv/Tensor.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Tensor.hpp b/src/nvcv/src/include/nvcv/Tensor.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/Tensor.hpp rename to src/nvcv/src/include/nvcv/Tensor.hpp index acb7e5b9b..b2b955044 100644 --- a/src/nvcv_types/include/nvcv/Tensor.hpp +++ b/src/nvcv/src/include/nvcv/Tensor.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorBatch.h b/src/nvcv/src/include/nvcv/TensorBatch.h similarity index 99% rename from src/nvcv_types/include/nvcv/TensorBatch.h rename to src/nvcv/src/include/nvcv/TensorBatch.h index 597f8dc1c..c552acab0 100644 --- a/src/nvcv_types/include/nvcv/TensorBatch.h +++ b/src/nvcv/src/include/nvcv/TensorBatch.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorBatch.hpp b/src/nvcv/src/include/nvcv/TensorBatch.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/TensorBatch.hpp rename to src/nvcv/src/include/nvcv/TensorBatch.hpp index 4aee9e148..636c2a191 100644 --- a/src/nvcv_types/include/nvcv/TensorBatch.hpp +++ b/src/nvcv/src/include/nvcv/TensorBatch.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorBatchData.h b/src/nvcv/src/include/nvcv/TensorBatchData.h similarity index 93% rename from src/nvcv_types/include/nvcv/TensorBatchData.h rename to src/nvcv/src/include/nvcv/TensorBatchData.h index 9b980e7cc..a86a6cfb5 100644 --- a/src/nvcv_types/include/nvcv/TensorBatchData.h +++ b/src/nvcv/src/include/nvcv/TensorBatchData.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorBatchData.hpp b/src/nvcv/src/include/nvcv/TensorBatchData.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/TensorBatchData.hpp rename to src/nvcv/src/include/nvcv/TensorBatchData.hpp index b9c425d1c..a601372b3 100644 --- a/src/nvcv_types/include/nvcv/TensorBatchData.hpp +++ b/src/nvcv/src/include/nvcv/TensorBatchData.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorData.h b/src/nvcv/src/include/nvcv/TensorData.h similarity index 95% rename from src/nvcv_types/include/nvcv/TensorData.h rename to src/nvcv/src/include/nvcv/TensorData.h index d2120eabe..51bbce6ba 100644 --- a/src/nvcv_types/include/nvcv/TensorData.h +++ b/src/nvcv/src/include/nvcv/TensorData.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorData.hpp b/src/nvcv/src/include/nvcv/TensorData.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/TensorData.hpp rename to src/nvcv/src/include/nvcv/TensorData.hpp index 26d9cbfe8..ded1562cc 100644 --- a/src/nvcv_types/include/nvcv/TensorData.hpp +++ b/src/nvcv/src/include/nvcv/TensorData.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorDataAccess.hpp b/src/nvcv/src/include/nvcv/TensorDataAccess.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/TensorDataAccess.hpp rename to src/nvcv/src/include/nvcv/TensorDataAccess.hpp index 23de915b7..9371d6878 100644 --- a/src/nvcv_types/include/nvcv/TensorDataAccess.hpp +++ b/src/nvcv/src/include/nvcv/TensorDataAccess.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorLayout.h b/src/nvcv/src/include/nvcv/TensorLayout.h similarity index 99% rename from src/nvcv_types/include/nvcv/TensorLayout.h rename to src/nvcv/src/include/nvcv/TensorLayout.h index cc16acea0..eaa8b2c7c 100644 --- a/src/nvcv_types/include/nvcv/TensorLayout.h +++ b/src/nvcv/src/include/nvcv/TensorLayout.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorLayout.hpp b/src/nvcv/src/include/nvcv/TensorLayout.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/TensorLayout.hpp rename to src/nvcv/src/include/nvcv/TensorLayout.hpp index d5c36a8aa..ad6639daa 100644 --- a/src/nvcv_types/include/nvcv/TensorLayout.hpp +++ b/src/nvcv/src/include/nvcv/TensorLayout.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorLayoutDef.inc b/src/nvcv/src/include/nvcv/TensorLayoutDef.inc similarity index 93% rename from src/nvcv_types/include/nvcv/TensorLayoutDef.inc rename to src/nvcv/src/include/nvcv/TensorLayoutDef.inc index 5b8251a52..adae2e0fe 100644 --- a/src/nvcv_types/include/nvcv/TensorLayoutDef.inc +++ b/src/nvcv/src/include/nvcv/TensorLayoutDef.inc @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorLayoutInfo.hpp b/src/nvcv/src/include/nvcv/TensorLayoutInfo.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/TensorLayoutInfo.hpp rename to src/nvcv/src/include/nvcv/TensorLayoutInfo.hpp index 202c6edb0..b213ee8b0 100644 --- a/src/nvcv_types/include/nvcv/TensorLayoutInfo.hpp +++ b/src/nvcv/src/include/nvcv/TensorLayoutInfo.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorShape.h b/src/nvcv/src/include/nvcv/TensorShape.h similarity index 94% rename from src/nvcv_types/include/nvcv/TensorShape.h rename to src/nvcv/src/include/nvcv/TensorShape.h index 622315d15..aae14564f 100644 --- a/src/nvcv_types/include/nvcv/TensorShape.h +++ b/src/nvcv/src/include/nvcv/TensorShape.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorShape.hpp b/src/nvcv/src/include/nvcv/TensorShape.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/TensorShape.hpp rename to src/nvcv/src/include/nvcv/TensorShape.hpp index 998b88083..3bf3d6fc1 100644 --- a/src/nvcv_types/include/nvcv/TensorShape.hpp +++ b/src/nvcv/src/include/nvcv/TensorShape.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/TensorShapeInfo.hpp b/src/nvcv/src/include/nvcv/TensorShapeInfo.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/TensorShapeInfo.hpp rename to src/nvcv/src/include/nvcv/TensorShapeInfo.hpp index 1ec0d42fa..75ef9148f 100644 --- a/src/nvcv_types/include/nvcv/TensorShapeInfo.hpp +++ b/src/nvcv/src/include/nvcv/TensorShapeInfo.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/Version.h b/src/nvcv/src/include/nvcv/Version.h similarity index 92% rename from src/nvcv_types/include/nvcv/Version.h rename to src/nvcv/src/include/nvcv/Version.h index fc9dc71de..68cb740c2 100644 --- a/src/nvcv_types/include/nvcv/Version.h +++ b/src/nvcv/src/include/nvcv/Version.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/alloc/Allocator.h b/src/nvcv/src/include/nvcv/alloc/Allocator.h similarity index 99% rename from src/nvcv_types/include/nvcv/alloc/Allocator.h rename to src/nvcv/src/include/nvcv/alloc/Allocator.h index c6343f25a..67c67f950 100644 --- a/src/nvcv_types/include/nvcv/alloc/Allocator.h +++ b/src/nvcv/src/include/nvcv/alloc/Allocator.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/alloc/Allocator.hpp b/src/nvcv/src/include/nvcv/alloc/Allocator.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/alloc/Allocator.hpp rename to src/nvcv/src/include/nvcv/alloc/Allocator.hpp index 8c725183b..d5a388059 100644 --- a/src/nvcv_types/include/nvcv/alloc/Allocator.hpp +++ b/src/nvcv/src/include/nvcv/alloc/Allocator.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/alloc/AllocatorImpl.hpp b/src/nvcv/src/include/nvcv/alloc/AllocatorImpl.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/alloc/AllocatorImpl.hpp rename to src/nvcv/src/include/nvcv/alloc/AllocatorImpl.hpp index bb9411094..ca439be92 100644 --- a/src/nvcv_types/include/nvcv/alloc/AllocatorImpl.hpp +++ b/src/nvcv/src/include/nvcv/alloc/AllocatorImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/alloc/Fwd.h b/src/nvcv/src/include/nvcv/alloc/Fwd.h similarity index 93% rename from src/nvcv_types/include/nvcv/alloc/Fwd.h rename to src/nvcv/src/include/nvcv/alloc/Fwd.h index 9b2a78f4a..3920eec1d 100644 --- a/src/nvcv_types/include/nvcv/alloc/Fwd.h +++ b/src/nvcv/src/include/nvcv/alloc/Fwd.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/alloc/Fwd.hpp b/src/nvcv/src/include/nvcv/alloc/Fwd.hpp similarity index 88% rename from src/nvcv_types/include/nvcv/alloc/Fwd.hpp rename to src/nvcv/src/include/nvcv/alloc/Fwd.hpp index bac592f1b..2261d8da7 100644 --- a/src/nvcv_types/include/nvcv/alloc/Fwd.hpp +++ b/src/nvcv/src/include/nvcv/alloc/Fwd.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/alloc/Requirements.h b/src/nvcv/src/include/nvcv/alloc/Requirements.h similarity index 97% rename from src/nvcv_types/include/nvcv/alloc/Requirements.h rename to src/nvcv/src/include/nvcv/alloc/Requirements.h index b1a7fbf07..80664628f 100644 --- a/src/nvcv_types/include/nvcv/alloc/Requirements.h +++ b/src/nvcv/src/include/nvcv/alloc/Requirements.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/alloc/Requirements.hpp b/src/nvcv/src/include/nvcv/alloc/Requirements.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/alloc/Requirements.hpp rename to src/nvcv/src/include/nvcv/alloc/Requirements.hpp index 7232fe693..2f6ab1493 100644 --- a/src/nvcv_types/include/nvcv/alloc/Requirements.hpp +++ b/src/nvcv/src/include/nvcv/alloc/Requirements.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/Align.hpp b/src/nvcv/src/include/nvcv/detail/Align.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/detail/Align.hpp rename to src/nvcv/src/include/nvcv/detail/Align.hpp index 11e4d8142..4b8dd584b 100644 --- a/src/nvcv_types/include/nvcv/detail/Align.hpp +++ b/src/nvcv/src/include/nvcv/detail/Align.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/ArrayDataImpl.hpp b/src/nvcv/src/include/nvcv/detail/ArrayDataImpl.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/detail/ArrayDataImpl.hpp rename to src/nvcv/src/include/nvcv/detail/ArrayDataImpl.hpp index f13177b31..38d85496b 100644 --- a/src/nvcv_types/include/nvcv/detail/ArrayDataImpl.hpp +++ b/src/nvcv/src/include/nvcv/detail/ArrayDataImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/ArrayImpl.hpp b/src/nvcv/src/include/nvcv/detail/ArrayImpl.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/detail/ArrayImpl.hpp rename to src/nvcv/src/include/nvcv/detail/ArrayImpl.hpp index 65775a479..037277b1b 100644 --- a/src/nvcv_types/include/nvcv/detail/ArrayImpl.hpp +++ b/src/nvcv/src/include/nvcv/detail/ArrayImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/BaseFromMember.hpp b/src/nvcv/src/include/nvcv/detail/BaseFromMember.hpp similarity index 95% rename from src/nvcv_types/include/nvcv/detail/BaseFromMember.hpp rename to src/nvcv/src/include/nvcv/detail/BaseFromMember.hpp index 32fc1a79a..199f3bb91 100644 --- a/src/nvcv_types/include/nvcv/detail/BaseFromMember.hpp +++ b/src/nvcv/src/include/nvcv/detail/BaseFromMember.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/Callback.hpp b/src/nvcv/src/include/nvcv/detail/Callback.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/detail/Callback.hpp rename to src/nvcv/src/include/nvcv/detail/Callback.hpp index cce2bad78..f431d7a67 100644 --- a/src/nvcv_types/include/nvcv/detail/Callback.hpp +++ b/src/nvcv/src/include/nvcv/detail/Callback.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/CastsImpl.hpp b/src/nvcv/src/include/nvcv/detail/CastsImpl.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/detail/CastsImpl.hpp rename to src/nvcv/src/include/nvcv/detail/CastsImpl.hpp index a6ef77256..a87d37293 100644 --- a/src/nvcv_types/include/nvcv/detail/CastsImpl.hpp +++ b/src/nvcv/src/include/nvcv/detail/CastsImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/CheckError.hpp b/src/nvcv/src/include/nvcv/detail/CheckError.hpp similarity index 93% rename from src/nvcv_types/include/nvcv/detail/CheckError.hpp rename to src/nvcv/src/include/nvcv/detail/CheckError.hpp index 15da0a29c..12993b097 100644 --- a/src/nvcv_types/include/nvcv/detail/CheckError.hpp +++ b/src/nvcv/src/include/nvcv/detail/CheckError.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/CompilerUtils.h b/src/nvcv/src/include/nvcv/detail/CompilerUtils.h similarity index 94% rename from src/nvcv_types/include/nvcv/detail/CompilerUtils.h rename to src/nvcv/src/include/nvcv/detail/CompilerUtils.h index 7b8eab073..0c7f9bfee 100644 --- a/src/nvcv_types/include/nvcv/detail/CompilerUtils.h +++ b/src/nvcv/src/include/nvcv/detail/CompilerUtils.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/Concepts.hpp b/src/nvcv/src/include/nvcv/detail/Concepts.hpp similarity index 90% rename from src/nvcv_types/include/nvcv/detail/Concepts.hpp rename to src/nvcv/src/include/nvcv/detail/Concepts.hpp index c25b6500f..a2c676d1b 100644 --- a/src/nvcv_types/include/nvcv/detail/Concepts.hpp +++ b/src/nvcv/src/include/nvcv/detail/Concepts.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/CudaFwd.h b/src/nvcv/src/include/nvcv/detail/CudaFwd.h similarity index 85% rename from src/nvcv_types/include/nvcv/detail/CudaFwd.h rename to src/nvcv/src/include/nvcv/detail/CudaFwd.h index 821a924e6..79dac8dee 100644 --- a/src/nvcv_types/include/nvcv/detail/CudaFwd.h +++ b/src/nvcv/src/include/nvcv/detail/CudaFwd.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual diff --git a/src/nvcv_types/include/nvcv/detail/FormatUtils.h b/src/nvcv/src/include/nvcv/detail/FormatUtils.h similarity index 99% rename from src/nvcv_types/include/nvcv/detail/FormatUtils.h rename to src/nvcv/src/include/nvcv/detail/FormatUtils.h index c357a6f84..e8396e1c2 100644 --- a/src/nvcv_types/include/nvcv/detail/FormatUtils.h +++ b/src/nvcv/src/include/nvcv/detail/FormatUtils.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/ImageBatchDataImpl.hpp b/src/nvcv/src/include/nvcv/detail/ImageBatchDataImpl.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/detail/ImageBatchDataImpl.hpp rename to src/nvcv/src/include/nvcv/detail/ImageBatchDataImpl.hpp index b50f3e851..0b90d2e44 100644 --- a/src/nvcv_types/include/nvcv/detail/ImageBatchDataImpl.hpp +++ b/src/nvcv/src/include/nvcv/detail/ImageBatchDataImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/ImageBatchImpl.hpp b/src/nvcv/src/include/nvcv/detail/ImageBatchImpl.hpp similarity index 99% rename from src/nvcv_types/include/nvcv/detail/ImageBatchImpl.hpp rename to src/nvcv/src/include/nvcv/detail/ImageBatchImpl.hpp index 2be423c21..10a952811 100644 --- a/src/nvcv_types/include/nvcv/detail/ImageBatchImpl.hpp +++ b/src/nvcv/src/include/nvcv/detail/ImageBatchImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/ImageDataImpl.hpp b/src/nvcv/src/include/nvcv/detail/ImageDataImpl.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/detail/ImageDataImpl.hpp rename to src/nvcv/src/include/nvcv/detail/ImageDataImpl.hpp index c5001807c..512367438 100644 --- a/src/nvcv_types/include/nvcv/detail/ImageDataImpl.hpp +++ b/src/nvcv/src/include/nvcv/detail/ImageDataImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/ImageImpl.hpp b/src/nvcv/src/include/nvcv/detail/ImageImpl.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/detail/ImageImpl.hpp rename to src/nvcv/src/include/nvcv/detail/ImageImpl.hpp index efd09de47..babd6ebda 100644 --- a/src/nvcv_types/include/nvcv/detail/ImageImpl.hpp +++ b/src/nvcv/src/include/nvcv/detail/ImageImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/InPlace.hpp b/src/nvcv/src/include/nvcv/detail/InPlace.hpp similarity index 88% rename from src/nvcv_types/include/nvcv/detail/InPlace.hpp rename to src/nvcv/src/include/nvcv/detail/InPlace.hpp index 03d6a0ace..4362d7cdb 100644 --- a/src/nvcv_types/include/nvcv/detail/InPlace.hpp +++ b/src/nvcv/src/include/nvcv/detail/InPlace.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/IndexSequence.hpp b/src/nvcv/src/include/nvcv/detail/IndexSequence.hpp similarity index 92% rename from src/nvcv_types/include/nvcv/detail/IndexSequence.hpp rename to src/nvcv/src/include/nvcv/detail/IndexSequence.hpp index cb7ab0e76..22bf79d05 100644 --- a/src/nvcv_types/include/nvcv/detail/IndexSequence.hpp +++ b/src/nvcv/src/include/nvcv/detail/IndexSequence.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/TensorBatchImpl.hpp b/src/nvcv/src/include/nvcv/detail/TensorBatchImpl.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/detail/TensorBatchImpl.hpp rename to src/nvcv/src/include/nvcv/detail/TensorBatchImpl.hpp index 1d028a656..2be99edf0 100644 --- a/src/nvcv_types/include/nvcv/detail/TensorBatchImpl.hpp +++ b/src/nvcv/src/include/nvcv/detail/TensorBatchImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/TensorDataImpl.hpp b/src/nvcv/src/include/nvcv/detail/TensorDataImpl.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/detail/TensorDataImpl.hpp rename to src/nvcv/src/include/nvcv/detail/TensorDataImpl.hpp index 3e42f273e..895b7567e 100644 --- a/src/nvcv_types/include/nvcv/detail/TensorDataImpl.hpp +++ b/src/nvcv/src/include/nvcv/detail/TensorDataImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/TensorImpl.hpp b/src/nvcv/src/include/nvcv/detail/TensorImpl.hpp similarity index 98% rename from src/nvcv_types/include/nvcv/detail/TensorImpl.hpp rename to src/nvcv/src/include/nvcv/detail/TensorImpl.hpp index f4ceab2ae..804a9b03a 100644 --- a/src/nvcv_types/include/nvcv/detail/TensorImpl.hpp +++ b/src/nvcv/src/include/nvcv/detail/TensorImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/include/nvcv/detail/TypeTraits.hpp b/src/nvcv/src/include/nvcv/detail/TypeTraits.hpp similarity index 97% rename from src/nvcv_types/include/nvcv/detail/TypeTraits.hpp rename to src/nvcv/src/include/nvcv/detail/TypeTraits.hpp index d30d70906..f8bb6fcf9 100644 --- a/src/nvcv_types/include/nvcv/detail/TypeTraits.hpp +++ b/src/nvcv/src/include/nvcv/detail/TypeTraits.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/AllocatorManager.hpp b/src/nvcv/src/priv/AllocatorManager.hpp similarity index 94% rename from src/nvcv_types/priv/AllocatorManager.hpp rename to src/nvcv/src/priv/AllocatorManager.hpp index 1b0c9a365..344e08694 100644 --- a/src/nvcv_types/priv/AllocatorManager.hpp +++ b/src/nvcv/src/priv/AllocatorManager.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/Array.cpp b/src/nvcv/src/priv/Array.cpp similarity index 97% rename from src/nvcv_types/priv/Array.cpp rename to src/nvcv/src/priv/Array.cpp index 2eb02a118..cd21d5359 100644 --- a/src/nvcv_types/priv/Array.cpp +++ b/src/nvcv/src/priv/Array.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,9 +24,9 @@ #include #include -#include -#include -#include +#include +#include +#include #include #include diff --git a/src/nvcv_types/priv/Array.hpp b/src/nvcv/src/priv/Array.hpp similarity index 96% rename from src/nvcv_types/priv/Array.hpp rename to src/nvcv/src/priv/Array.hpp index 19c76abd8..b868e0d41 100644 --- a/src/nvcv_types/priv/Array.hpp +++ b/src/nvcv/src/priv/Array.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/ArrayManager.hpp b/src/nvcv/src/priv/ArrayManager.hpp similarity index 94% rename from src/nvcv_types/priv/ArrayManager.hpp rename to src/nvcv/src/priv/ArrayManager.hpp index 973034dc8..ff43488c5 100644 --- a/src/nvcv_types/priv/ArrayManager.hpp +++ b/src/nvcv/src/priv/ArrayManager.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/ArrayWrapData.cpp b/src/nvcv/src/priv/ArrayWrapData.cpp similarity index 95% rename from src/nvcv_types/priv/ArrayWrapData.cpp rename to src/nvcv/src/priv/ArrayWrapData.cpp index 23b8d1c7e..bbc36383f 100644 --- a/src/nvcv_types/priv/ArrayWrapData.cpp +++ b/src/nvcv/src/priv/ArrayWrapData.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,8 +22,8 @@ #include "Requirements.hpp" #include -#include -#include +#include +#include #include #include diff --git a/src/nvcv_types/priv/ArrayWrapData.hpp b/src/nvcv/src/priv/ArrayWrapData.hpp similarity index 96% rename from src/nvcv_types/priv/ArrayWrapData.hpp rename to src/nvcv/src/priv/ArrayWrapData.hpp index 2b15565fa..2489da680 100644 --- a/src/nvcv_types/priv/ArrayWrapData.hpp +++ b/src/nvcv/src/priv/ArrayWrapData.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/Bitfield.hpp b/src/nvcv/src/priv/Bitfield.hpp similarity index 91% rename from src/nvcv_types/priv/Bitfield.hpp rename to src/nvcv/src/priv/Bitfield.hpp index 8ce634c31..3fb644dad 100644 --- a/src/nvcv_types/priv/Bitfield.hpp +++ b/src/nvcv/src/priv/Bitfield.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/CMakeLists.txt b/src/nvcv/src/priv/CMakeLists.txt similarity index 92% rename from src/nvcv_types/priv/CMakeLists.txt rename to src/nvcv/src/priv/CMakeLists.txt index 049f173ef..b75ebe546 100644 --- a/src/nvcv_types/priv/CMakeLists.txt +++ b/src/nvcv/src/priv/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +find_package(CUDAToolkit REQUIRED) + add_library(nvcv_types_priv STATIC Context.cpp TLS.cpp @@ -41,7 +43,7 @@ add_library(nvcv_types_priv STATIC target_include_directories(nvcv_types_priv PUBLIC - ../.. + ../../.. ) target_compile_definitions(nvcv_types_priv PUBLIC -DNVCV_EXPORTING=1) diff --git a/src/nvcv_types/priv/ColorFormat.cpp b/src/nvcv/src/priv/ColorFormat.cpp similarity index 91% rename from src/nvcv_types/priv/ColorFormat.cpp rename to src/nvcv/src/priv/ColorFormat.cpp index 685a82f5f..12b5dfa35 100644 --- a/src/nvcv_types/priv/ColorFormat.cpp +++ b/src/nvcv/src/priv/ColorFormat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/ColorFormat.hpp b/src/nvcv/src/priv/ColorFormat.hpp similarity index 91% rename from src/nvcv_types/priv/ColorFormat.hpp rename to src/nvcv/src/priv/ColorFormat.hpp index 210204ce1..4d14cb0e9 100644 --- a/src/nvcv_types/priv/ColorFormat.hpp +++ b/src/nvcv/src/priv/ColorFormat.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/ColorSpec.cpp b/src/nvcv/src/priv/ColorSpec.cpp similarity index 99% rename from src/nvcv_types/priv/ColorSpec.cpp rename to src/nvcv/src/priv/ColorSpec.cpp index 778257957..6dfda07dd 100644 --- a/src/nvcv_types/priv/ColorSpec.cpp +++ b/src/nvcv/src/priv/ColorSpec.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "Exception.hpp" #include "TLS.hpp" -#include +#include #include diff --git a/src/nvcv_types/priv/ColorSpec.hpp b/src/nvcv/src/priv/ColorSpec.hpp similarity index 97% rename from src/nvcv_types/priv/ColorSpec.hpp rename to src/nvcv/src/priv/ColorSpec.hpp index d0ac6f5cc..cbe45b4ff 100644 --- a/src/nvcv_types/priv/ColorSpec.hpp +++ b/src/nvcv/src/priv/ColorSpec.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/Context.cpp b/src/nvcv/src/priv/Context.cpp similarity index 94% rename from src/nvcv_types/priv/Context.cpp rename to src/nvcv/src/priv/Context.cpp index c1505716b..41d3dcb75 100644 --- a/src/nvcv_types/priv/Context.cpp +++ b/src/nvcv/src/priv/Context.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #include "HandleManagerImpl.hpp" -#include +#include namespace nvcv::priv { diff --git a/src/nvcv_types/priv/Context.hpp b/src/nvcv/src/priv/Context.hpp similarity index 96% rename from src/nvcv_types/priv/Context.hpp rename to src/nvcv/src/priv/Context.hpp index b963a0ef8..d1be14ace 100644 --- a/src/nvcv_types/priv/Context.hpp +++ b/src/nvcv/src/priv/Context.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/CustomAllocator.cpp b/src/nvcv/src/priv/CustomAllocator.cpp similarity index 98% rename from src/nvcv_types/priv/CustomAllocator.cpp rename to src/nvcv/src/priv/CustomAllocator.cpp index c4d839ffb..54bfde07f 100644 --- a/src/nvcv_types/priv/CustomAllocator.cpp +++ b/src/nvcv/src/priv/CustomAllocator.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include // for aligned_alloc diff --git a/src/nvcv_types/priv/CustomAllocator.hpp b/src/nvcv/src/priv/CustomAllocator.hpp similarity index 96% rename from src/nvcv_types/priv/CustomAllocator.hpp rename to src/nvcv/src/priv/CustomAllocator.hpp index 362020915..1295714ed 100644 --- a/src/nvcv_types/priv/CustomAllocator.hpp +++ b/src/nvcv/src/priv/CustomAllocator.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/DataLayout.cpp b/src/nvcv/src/priv/DataLayout.cpp similarity index 99% rename from src/nvcv_types/priv/DataLayout.cpp rename to src/nvcv/src/priv/DataLayout.cpp index 56c35ed7a..2d736f8fa 100644 --- a/src/nvcv_types/priv/DataLayout.cpp +++ b/src/nvcv/src/priv/DataLayout.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,9 +21,9 @@ #include "Exception.hpp" #include "TLS.hpp" -#include -#include -#include +#include +#include +#include #include diff --git a/src/nvcv_types/priv/DataLayout.hpp b/src/nvcv/src/priv/DataLayout.hpp similarity index 98% rename from src/nvcv_types/priv/DataLayout.hpp rename to src/nvcv/src/priv/DataLayout.hpp index da7d09932..3bdbe4f98 100644 --- a/src/nvcv_types/priv/DataLayout.hpp +++ b/src/nvcv/src/priv/DataLayout.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/DataType.cpp b/src/nvcv/src/priv/DataType.cpp similarity index 97% rename from src/nvcv_types/priv/DataType.cpp rename to src/nvcv/src/priv/DataType.cpp index c24ebafe7..0c2299bc9 100644 --- a/src/nvcv_types/priv/DataType.cpp +++ b/src/nvcv/src/priv/DataType.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,8 +21,8 @@ #include "Exception.hpp" #include "ImageFormat.hpp" -#include -#include +#include +#include #include diff --git a/src/nvcv_types/priv/DataType.hpp b/src/nvcv/src/priv/DataType.hpp similarity index 95% rename from src/nvcv_types/priv/DataType.hpp rename to src/nvcv/src/priv/DataType.hpp index 2fc36a784..4ad8b8c41 100644 --- a/src/nvcv_types/priv/DataType.hpp +++ b/src/nvcv/src/priv/DataType.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/DefaultAllocator.cpp b/src/nvcv/src/priv/DefaultAllocator.cpp similarity index 97% rename from src/nvcv_types/priv/DefaultAllocator.cpp rename to src/nvcv/src/priv/DefaultAllocator.cpp index 2b364fe19..df9a2c921 100644 --- a/src/nvcv_types/priv/DefaultAllocator.cpp +++ b/src/nvcv/src/priv/DefaultAllocator.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include // for aligned_alloc diff --git a/src/nvcv_types/priv/DefaultAllocator.hpp b/src/nvcv/src/priv/DefaultAllocator.hpp similarity index 95% rename from src/nvcv_types/priv/DefaultAllocator.hpp rename to src/nvcv/src/priv/DefaultAllocator.hpp index ad1f7f5e8..36405e19c 100644 --- a/src/nvcv_types/priv/DefaultAllocator.hpp +++ b/src/nvcv/src/priv/DefaultAllocator.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/Exception.cpp b/src/nvcv/src/priv/Exception.cpp similarity index 93% rename from src/nvcv_types/priv/Exception.cpp rename to src/nvcv/src/priv/Exception.cpp index acbba8a8f..b7addfbf8 100644 --- a/src/nvcv_types/priv/Exception.cpp +++ b/src/nvcv/src/priv/Exception.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #include "Status.hpp" -#include +#include #include diff --git a/src/nvcv_types/priv/Exception.hpp b/src/nvcv/src/priv/Exception.hpp similarity index 96% rename from src/nvcv_types/priv/Exception.hpp rename to src/nvcv/src/priv/Exception.hpp index ace6a70d9..6c7f12e9a 100644 --- a/src/nvcv_types/priv/Exception.hpp +++ b/src/nvcv/src/priv/Exception.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/HandleManager.hpp b/src/nvcv/src/priv/HandleManager.hpp similarity index 97% rename from src/nvcv_types/priv/HandleManager.hpp rename to src/nvcv/src/priv/HandleManager.hpp index efef20706..c5c0b4aba 100644 --- a/src/nvcv_types/priv/HandleManager.hpp +++ b/src/nvcv/src/priv/HandleManager.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,8 +20,8 @@ #include #include -#include -#include +#include +#include #include #include diff --git a/src/nvcv_types/priv/HandleManagerImpl.hpp b/src/nvcv/src/priv/HandleManagerImpl.hpp similarity index 99% rename from src/nvcv_types/priv/HandleManagerImpl.hpp rename to src/nvcv/src/priv/HandleManagerImpl.hpp index 6a123a9e6..66c0e41db 100644 --- a/src/nvcv_types/priv/HandleManagerImpl.hpp +++ b/src/nvcv/src/priv/HandleManagerImpl.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/HandleTraits.hpp b/src/nvcv/src/priv/HandleTraits.hpp similarity index 93% rename from src/nvcv_types/priv/HandleTraits.hpp rename to src/nvcv/src/priv/HandleTraits.hpp index cba0036ad..663b1d36d 100644 --- a/src/nvcv_types/priv/HandleTraits.hpp +++ b/src/nvcv/src/priv/HandleTraits.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/IAllocator.cpp b/src/nvcv/src/priv/IAllocator.cpp similarity index 97% rename from src/nvcv_types/priv/IAllocator.cpp rename to src/nvcv/src/priv/IAllocator.cpp index dbd7f03e4..205ced716 100644 --- a/src/nvcv_types/priv/IAllocator.cpp +++ b/src/nvcv/src/priv/IAllocator.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,7 +20,7 @@ #include "AllocatorManager.hpp" #include "IContext.hpp" -#include +#include namespace nvcv::priv { diff --git a/src/nvcv_types/priv/IAllocator.hpp b/src/nvcv/src/priv/IAllocator.hpp similarity index 97% rename from src/nvcv_types/priv/IAllocator.hpp rename to src/nvcv/src/priv/IAllocator.hpp index 681df2d27..5e72a9a3e 100644 --- a/src/nvcv_types/priv/IAllocator.hpp +++ b/src/nvcv/src/priv/IAllocator.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/IArray.hpp b/src/nvcv/src/priv/IArray.hpp similarity index 96% rename from src/nvcv_types/priv/IArray.hpp rename to src/nvcv/src/priv/IArray.hpp index 0b9127bdd..e89d41b1d 100644 --- a/src/nvcv_types/priv/IArray.hpp +++ b/src/nvcv/src/priv/IArray.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/IContext.hpp b/src/nvcv/src/priv/IContext.hpp similarity index 96% rename from src/nvcv_types/priv/IContext.hpp rename to src/nvcv/src/priv/IContext.hpp index 309d77a62..db20f5532 100644 --- a/src/nvcv_types/priv/IContext.hpp +++ b/src/nvcv/src/priv/IContext.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/ICoreObject.hpp b/src/nvcv/src/priv/ICoreObject.hpp similarity index 98% rename from src/nvcv_types/priv/ICoreObject.hpp rename to src/nvcv/src/priv/ICoreObject.hpp index 4f76a8653..22edbd6fb 100644 --- a/src/nvcv_types/priv/ICoreObject.hpp +++ b/src/nvcv/src/priv/ICoreObject.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/IImage.hpp b/src/nvcv/src/priv/IImage.hpp similarity index 95% rename from src/nvcv_types/priv/IImage.hpp rename to src/nvcv/src/priv/IImage.hpp index 3302ff79b..286fef24f 100644 --- a/src/nvcv_types/priv/IImage.hpp +++ b/src/nvcv/src/priv/IImage.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/IImageBatch.hpp b/src/nvcv/src/priv/IImageBatch.hpp similarity index 96% rename from src/nvcv_types/priv/IImageBatch.hpp rename to src/nvcv/src/priv/IImageBatch.hpp index 8cc54c2e8..be78d77c1 100644 --- a/src/nvcv_types/priv/IImageBatch.hpp +++ b/src/nvcv/src/priv/IImageBatch.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/ITensor.hpp b/src/nvcv/src/priv/ITensor.hpp similarity index 95% rename from src/nvcv_types/priv/ITensor.hpp rename to src/nvcv/src/priv/ITensor.hpp index 0116c5396..1da8a8028 100644 --- a/src/nvcv_types/priv/ITensor.hpp +++ b/src/nvcv/src/priv/ITensor.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/ITensorBatch.hpp b/src/nvcv/src/priv/ITensorBatch.hpp similarity index 96% rename from src/nvcv_types/priv/ITensorBatch.hpp rename to src/nvcv/src/priv/ITensorBatch.hpp index 14be328b6..dda279b17 100644 --- a/src/nvcv_types/priv/ITensorBatch.hpp +++ b/src/nvcv/src/priv/ITensorBatch.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/Image.cpp b/src/nvcv/src/priv/Image.cpp similarity index 98% rename from src/nvcv_types/priv/Image.cpp rename to src/nvcv/src/priv/Image.cpp index 2ec1703fb..9e4b937e0 100644 --- a/src/nvcv_types/priv/Image.cpp +++ b/src/nvcv/src/priv/Image.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,8 +23,8 @@ #include "Requirements.hpp" #include -#include -#include +#include +#include #include #include diff --git a/src/nvcv_types/priv/Image.hpp b/src/nvcv/src/priv/Image.hpp similarity index 97% rename from src/nvcv_types/priv/Image.hpp rename to src/nvcv/src/priv/Image.hpp index 51c22dd62..a5b497ac4 100644 --- a/src/nvcv_types/priv/Image.hpp +++ b/src/nvcv/src/priv/Image.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/ImageBatchManager.hpp b/src/nvcv/src/priv/ImageBatchManager.hpp similarity index 94% rename from src/nvcv_types/priv/ImageBatchManager.hpp rename to src/nvcv/src/priv/ImageBatchManager.hpp index 53a79bd41..0fd06c484 100644 --- a/src/nvcv_types/priv/ImageBatchManager.hpp +++ b/src/nvcv/src/priv/ImageBatchManager.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/ImageBatchVarShape.cpp b/src/nvcv/src/priv/ImageBatchVarShape.cpp similarity index 99% rename from src/nvcv_types/priv/ImageBatchVarShape.cpp rename to src/nvcv/src/priv/ImageBatchVarShape.cpp index a2f9a5868..67a449f14 100644 --- a/src/nvcv_types/priv/ImageBatchVarShape.cpp +++ b/src/nvcv/src/priv/ImageBatchVarShape.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,8 +25,8 @@ #include "Requirements.hpp" #include -#include -#include +#include +#include #include #include diff --git a/src/nvcv_types/priv/ImageBatchVarShape.hpp b/src/nvcv/src/priv/ImageBatchVarShape.hpp similarity index 97% rename from src/nvcv_types/priv/ImageBatchVarShape.hpp rename to src/nvcv/src/priv/ImageBatchVarShape.hpp index d962672b3..3bbb803b9 100644 --- a/src/nvcv_types/priv/ImageBatchVarShape.hpp +++ b/src/nvcv/src/priv/ImageBatchVarShape.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/ImageFormat.cpp b/src/nvcv/src/priv/ImageFormat.cpp similarity index 99% rename from src/nvcv_types/priv/ImageFormat.cpp rename to src/nvcv/src/priv/ImageFormat.cpp index b90056a4b..307fdbd10 100644 --- a/src/nvcv_types/priv/ImageFormat.cpp +++ b/src/nvcv/src/priv/ImageFormat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,7 +24,7 @@ #include "Exception.hpp" #include -#include +#include #include #include diff --git a/src/nvcv_types/priv/ImageFormat.hpp b/src/nvcv/src/priv/ImageFormat.hpp similarity index 98% rename from src/nvcv_types/priv/ImageFormat.hpp rename to src/nvcv/src/priv/ImageFormat.hpp index 6ea2e12c8..1019ddaeb 100644 --- a/src/nvcv_types/priv/ImageFormat.hpp +++ b/src/nvcv/src/priv/ImageFormat.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,7 +24,7 @@ #include "Size.hpp" #include -#include +#include #include #include diff --git a/src/nvcv_types/priv/ImageManager.hpp b/src/nvcv/src/priv/ImageManager.hpp similarity index 94% rename from src/nvcv_types/priv/ImageManager.hpp rename to src/nvcv/src/priv/ImageManager.hpp index 4dee534e2..9fefbb754 100644 --- a/src/nvcv_types/priv/ImageManager.hpp +++ b/src/nvcv/src/priv/ImageManager.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/LockFreeStack.hpp b/src/nvcv/src/priv/LockFreeStack.hpp similarity index 98% rename from src/nvcv_types/priv/LockFreeStack.hpp rename to src/nvcv/src/priv/LockFreeStack.hpp index 81419c9ac..a1681e1b0 100644 --- a/src/nvcv_types/priv/LockFreeStack.hpp +++ b/src/nvcv/src/priv/LockFreeStack.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/Requirements.cpp b/src/nvcv/src/priv/Requirements.cpp similarity index 96% rename from src/nvcv_types/priv/Requirements.cpp rename to src/nvcv/src/priv/Requirements.cpp index 43da2f653..adcb6fbc2 100644 --- a/src/nvcv_types/priv/Requirements.cpp +++ b/src/nvcv/src/priv/Requirements.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #include "Exception.hpp" -#include +#include #include #include diff --git a/src/nvcv_types/priv/Requirements.hpp b/src/nvcv/src/priv/Requirements.hpp similarity index 91% rename from src/nvcv_types/priv/Requirements.hpp rename to src/nvcv/src/priv/Requirements.hpp index 8b26fa76d..7bb725ef1 100644 --- a/src/nvcv_types/priv/Requirements.hpp +++ b/src/nvcv/src/priv/Requirements.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/SharedCoreObj.hpp b/src/nvcv/src/priv/SharedCoreObj.hpp similarity index 97% rename from src/nvcv_types/priv/SharedCoreObj.hpp rename to src/nvcv/src/priv/SharedCoreObj.hpp index ccd79e041..405e19467 100644 --- a/src/nvcv_types/priv/SharedCoreObj.hpp +++ b/src/nvcv/src/priv/SharedCoreObj.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/Size.hpp b/src/nvcv/src/priv/Size.hpp similarity index 85% rename from src/nvcv_types/priv/Size.hpp rename to src/nvcv/src/priv/Size.hpp index 83a1ab419..fee1ab100 100644 --- a/src/nvcv_types/priv/Size.hpp +++ b/src/nvcv/src/priv/Size.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #ifndef NVCV_CORE_PRIV_SIZE_HPP #define NVCV_CORE_PRIV_SIZE_HPP -#include +#include namespace nvcv::priv { diff --git a/src/nvcv_types/priv/Status.cpp b/src/nvcv/src/priv/Status.cpp similarity index 96% rename from src/nvcv_types/priv/Status.cpp rename to src/nvcv/src/priv/Status.cpp index 4675aba86..1a19b27ae 100644 --- a/src/nvcv_types/priv/Status.cpp +++ b/src/nvcv/src/priv/Status.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "TLS.hpp" #include -#include +#include #include diff --git a/src/nvcv_types/priv/Status.hpp b/src/nvcv/src/priv/Status.hpp similarity index 93% rename from src/nvcv_types/priv/Status.hpp rename to src/nvcv/src/priv/Status.hpp index fdbd28bfd..0765bd824 100644 --- a/src/nvcv_types/priv/Status.hpp +++ b/src/nvcv/src/priv/Status.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/SymbolVersioning.hpp b/src/nvcv/src/priv/SymbolVersioning.hpp similarity index 85% rename from src/nvcv_types/priv/SymbolVersioning.hpp rename to src/nvcv/src/priv/SymbolVersioning.hpp index 9b75bd458..9fee0b972 100644 --- a/src/nvcv_types/priv/SymbolVersioning.hpp +++ b/src/nvcv/src/priv/SymbolVersioning.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #ifndef NVCV_CORE_PRIV_SYMBOLVERSIONING_HPP #define NVCV_CORE_PRIV_SYMBOLVERSIONING_HPP -#include +#include #define NVCV_DEFINE_API(...) NVCV_PROJ_DEFINE_API(NVCV, __VA_ARGS__) #define NVCV_DEFINE_OLD_API(...) NVCV_PROJ_DEFINE_OLD_API(NVCV, __VA_ARGS__) diff --git a/src/nvcv_types/priv/TLS.cpp b/src/nvcv/src/priv/TLS.cpp similarity index 87% rename from src/nvcv_types/priv/TLS.cpp rename to src/nvcv/src/priv/TLS.cpp index cfe66234c..a3df90527 100644 --- a/src/nvcv_types/priv/TLS.cpp +++ b/src/nvcv/src/priv/TLS.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/TLS.hpp b/src/nvcv/src/priv/TLS.hpp similarity index 96% rename from src/nvcv_types/priv/TLS.hpp rename to src/nvcv/src/priv/TLS.hpp index 2e53cba6f..7b4bb12c2 100644 --- a/src/nvcv_types/priv/TLS.hpp +++ b/src/nvcv/src/priv/TLS.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/Tensor.cpp b/src/nvcv/src/priv/Tensor.cpp similarity index 98% rename from src/nvcv_types/priv/Tensor.cpp rename to src/nvcv/src/priv/Tensor.cpp index 0c49daed9..b681b654d 100644 --- a/src/nvcv_types/priv/Tensor.cpp +++ b/src/nvcv/src/priv/Tensor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,9 +26,9 @@ #include "TensorShape.hpp" #include -#include -#include -#include +#include +#include +#include #include #include diff --git a/src/nvcv_types/priv/Tensor.hpp b/src/nvcv/src/priv/Tensor.hpp similarity index 96% rename from src/nvcv_types/priv/Tensor.hpp rename to src/nvcv/src/priv/Tensor.hpp index 9d6dc52b7..753c678ae 100644 --- a/src/nvcv_types/priv/Tensor.hpp +++ b/src/nvcv/src/priv/Tensor.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/TensorBatch.cpp b/src/nvcv/src/priv/TensorBatch.cpp similarity index 98% rename from src/nvcv_types/priv/TensorBatch.cpp rename to src/nvcv/src/priv/TensorBatch.cpp index 356bdbe62..3c5b75887 100644 --- a/src/nvcv_types/priv/TensorBatch.cpp +++ b/src/nvcv/src/priv/TensorBatch.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,8 +20,8 @@ #include "Requirements.hpp" #include "TensorBatchManager.hpp" -#include -#include +#include +#include namespace nvcv::priv { diff --git a/src/nvcv_types/priv/TensorBatch.hpp b/src/nvcv/src/priv/TensorBatch.hpp similarity index 98% rename from src/nvcv_types/priv/TensorBatch.hpp rename to src/nvcv/src/priv/TensorBatch.hpp index 16a41f537..9dd075962 100644 --- a/src/nvcv_types/priv/TensorBatch.hpp +++ b/src/nvcv/src/priv/TensorBatch.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/TensorBatchManager.hpp b/src/nvcv/src/priv/TensorBatchManager.hpp similarity index 94% rename from src/nvcv_types/priv/TensorBatchManager.hpp rename to src/nvcv/src/priv/TensorBatchManager.hpp index d9082a317..2a93667cd 100644 --- a/src/nvcv_types/priv/TensorBatchManager.hpp +++ b/src/nvcv/src/priv/TensorBatchManager.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/TensorData.cpp b/src/nvcv/src/priv/TensorData.cpp similarity index 99% rename from src/nvcv_types/priv/TensorData.cpp rename to src/nvcv/src/priv/TensorData.cpp index 68edc8cec..579ff6e91 100644 --- a/src/nvcv_types/priv/TensorData.cpp +++ b/src/nvcv/src/priv/TensorData.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/TensorData.hpp b/src/nvcv/src/priv/TensorData.hpp similarity index 94% rename from src/nvcv_types/priv/TensorData.hpp rename to src/nvcv/src/priv/TensorData.hpp index 6ad859151..a227ac19a 100644 --- a/src/nvcv_types/priv/TensorData.hpp +++ b/src/nvcv/src/priv/TensorData.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/TensorLayout.cpp b/src/nvcv/src/priv/TensorLayout.cpp similarity index 96% rename from src/nvcv_types/priv/TensorLayout.cpp rename to src/nvcv/src/priv/TensorLayout.cpp index a4bd3ce7f..b4760fe94 100644 --- a/src/nvcv_types/priv/TensorLayout.cpp +++ b/src/nvcv/src/priv/TensorLayout.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #include "Exception.hpp" -#include +#include namespace nvcv::priv { diff --git a/src/nvcv_types/priv/TensorLayout.hpp b/src/nvcv/src/priv/TensorLayout.hpp similarity index 93% rename from src/nvcv_types/priv/TensorLayout.hpp rename to src/nvcv/src/priv/TensorLayout.hpp index 16ab20b1f..a765acf9a 100644 --- a/src/nvcv_types/priv/TensorLayout.hpp +++ b/src/nvcv/src/priv/TensorLayout.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/TensorManager.hpp b/src/nvcv/src/priv/TensorManager.hpp similarity index 94% rename from src/nvcv_types/priv/TensorManager.hpp rename to src/nvcv/src/priv/TensorManager.hpp index 85a9e0c79..27b48dfa4 100644 --- a/src/nvcv_types/priv/TensorManager.hpp +++ b/src/nvcv/src/priv/TensorManager.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/TensorShape.cpp b/src/nvcv/src/priv/TensorShape.cpp similarity index 93% rename from src/nvcv_types/priv/TensorShape.cpp rename to src/nvcv/src/priv/TensorShape.cpp index 0bad54a0c..298cf3911 100644 --- a/src/nvcv_types/priv/TensorShape.cpp +++ b/src/nvcv/src/priv/TensorShape.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/TensorShape.hpp b/src/nvcv/src/priv/TensorShape.hpp similarity index 86% rename from src/nvcv_types/priv/TensorShape.hpp rename to src/nvcv/src/priv/TensorShape.hpp index 318df9789..ed6d17991 100644 --- a/src/nvcv_types/priv/TensorShape.hpp +++ b/src/nvcv/src/priv/TensorShape.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #define NVCV_CORE_PRIV_TENSORSHAPE_HPP #include -#include +#include namespace nvcv::priv { diff --git a/src/nvcv_types/priv/TensorWrapDataStrided.cpp b/src/nvcv/src/priv/TensorWrapDataStrided.cpp similarity index 96% rename from src/nvcv_types/priv/TensorWrapDataStrided.cpp rename to src/nvcv/src/priv/TensorWrapDataStrided.cpp index ef77b7c62..c199668d5 100644 --- a/src/nvcv_types/priv/TensorWrapDataStrided.cpp +++ b/src/nvcv/src/priv/TensorWrapDataStrided.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,8 +24,8 @@ #include "TensorLayout.hpp" #include -#include -#include +#include +#include #include #include diff --git a/src/nvcv_types/priv/TensorWrapDataStrided.hpp b/src/nvcv/src/priv/TensorWrapDataStrided.hpp similarity index 95% rename from src/nvcv_types/priv/TensorWrapDataStrided.hpp rename to src/nvcv/src/priv/TensorWrapDataStrided.hpp index 0987b258d..039871875 100644 --- a/src/nvcv_types/priv/TensorWrapDataStrided.hpp +++ b/src/nvcv/src/priv/TensorWrapDataStrided.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/nvcv_types/priv/Version.hpp b/src/nvcv/src/priv/Version.hpp similarity index 86% rename from src/nvcv_types/priv/Version.hpp rename to src/nvcv/src/priv/Version.hpp index d64af14a9..60ef87242 100644 --- a/src/nvcv_types/priv/Version.hpp +++ b/src/nvcv/src/priv/Version.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #define NVCV_CORE_PRIV_VERSION_HPP #include -#include +#include namespace nvcv::priv { diff --git a/src/util/Algorithm.hpp b/src/nvcv/util/Algorithm.hpp similarity index 90% rename from src/util/Algorithm.hpp rename to src/nvcv/util/Algorithm.hpp index 58784e195..fc562f079 100644 --- a/src/util/Algorithm.hpp +++ b/src/nvcv/util/Algorithm.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/Assert.cpp b/src/nvcv/util/Assert.cpp similarity index 90% rename from src/util/Assert.cpp rename to src/nvcv/util/Assert.cpp index 9cc074b1f..977c2436b 100644 --- a/src/util/Assert.cpp +++ b/src/nvcv/util/Assert.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/Assert.h b/src/nvcv/util/Assert.h similarity index 96% rename from src/util/Assert.h rename to src/nvcv/util/Assert.h index c2628673d..908b087fa 100644 --- a/src/util/Assert.h +++ b/src/nvcv/util/Assert.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/CMakeLists.txt b/src/nvcv/util/CMakeLists.txt similarity index 95% rename from src/util/CMakeLists.txt rename to src/nvcv/util/CMakeLists.txt index a889a14a6..748cc9843 100644 --- a/src/util/CMakeLists.txt +++ b/src/nvcv/util/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,8 +15,6 @@ find_package(CUDAToolkit REQUIRED) -set(CMAKE_POSITION_INDEPENDENT_CODE on) - # nvcv_util_sanitizer --------------------------------- add_library(nvcv_util_sanitizer SanitizerOptions.c @@ -116,15 +114,15 @@ if(ENABLE_COMPAT_OLD_GLIBC) # Compat.c needs Compat.h get_source_file_property(COMPAT_INCDIRS ${CMAKE_CURRENT_BINARY_DIR}/Compat.c INCLUDE_DIRECTORIES) if(COMPAT_INCDIRS) - list(APPEND COMPAT_INCDIRS ${CMAKE_CURRENT_SOURCE_DIR}/..) + list(APPEND COMPAT_INCDIRS ${CMAKE_CURRENT_SOURCE_DIR}/../..) else() - set(COMPAT_INCDIRS ${CMAKE_CURRENT_SOURCE_DIR}/..) + set(COMPAT_INCDIRS ${CMAKE_CURRENT_SOURCE_DIR}/../..) endif() set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/Compat.c PROPERTIES INCLUDE_DIRECTORIES "${COMPAT_INCDIRS}") target_include_directories(nvcv_util_compat INTERFACE - ${CMAKE_CURRENT_SOURCE_DIR}/.. + ${CMAKE_CURRENT_SOURCE_DIR}/../.. ) # glibc-2.17 came split into different libraries. @@ -174,7 +172,7 @@ if(LTO_ENABLED AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPIL target_compile_options(nvcv_util_symver INTERFACE -fno-lto) endif() target_include_directories(nvcv_util_symver - INTERFACE .. + INTERFACE ../.. ) # nvcv_util --------------------------------- @@ -183,17 +181,11 @@ add_library(nvcv_util CheckError.cpp String.cpp Version.cpp - TensorDataUtils.cpp - Event.cpp - Stream.cpp - StreamId.cpp ) target_include_directories(nvcv_util INTERFACE - .. - PUBLIC - ../core/include + ../.. ) target_link_libraries(nvcv_util diff --git a/src/util/CheckError.cpp b/src/nvcv/util/CheckError.cpp similarity index 97% rename from src/util/CheckError.cpp rename to src/nvcv/util/CheckError.cpp index 70de3765c..08a9f7e23 100644 --- a/src/util/CheckError.cpp +++ b/src/nvcv/util/CheckError.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/CheckError.hpp b/src/nvcv/util/CheckError.hpp similarity index 98% rename from src/util/CheckError.hpp rename to src/nvcv/util/CheckError.hpp index a7d80dd6e..b24c5c770 100644 --- a/src/util/CheckError.hpp +++ b/src/nvcv/util/CheckError.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,7 +28,7 @@ #include #if NVCV_EXPORTING -# include +# include #else # include #endif diff --git a/src/util/Compat.c.in b/src/nvcv/util/Compat.c.in similarity index 99% rename from src/util/Compat.c.in rename to src/nvcv/util/Compat.c.in index dd008720d..0d94335ff 100644 --- a/src/util/Compat.c.in +++ b/src/nvcv/util/Compat.c.in @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ #define _GNU_SOURCE -#include +#include #include #include #include diff --git a/src/util/Compat.cpp b/src/nvcv/util/Compat.cpp similarity index 98% rename from src/util/Compat.cpp rename to src/nvcv/util/Compat.cpp index 77f8f046e..e018475a6 100644 --- a/src/util/Compat.cpp +++ b/src/nvcv/util/Compat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/Compat.h b/src/nvcv/util/Compat.h similarity index 93% rename from src/util/Compat.h rename to src/nvcv/util/Compat.h index b114aee9d..27d50b3cc 100644 --- a/src/util/Compat.h +++ b/src/nvcv/util/Compat.h @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/Compiler.hpp b/src/nvcv/util/Compiler.hpp similarity index 95% rename from src/util/Compiler.hpp rename to src/nvcv/util/Compiler.hpp index 85dea82f1..ef4a0a1f6 100644 --- a/src/util/Compiler.hpp +++ b/src/nvcv/util/Compiler.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/Math.hpp b/src/nvcv/util/Math.hpp similarity index 97% rename from src/util/Math.hpp rename to src/nvcv/util/Math.hpp index c558a8ee3..b708fcef1 100644 --- a/src/util/Math.hpp +++ b/src/nvcv/util/Math.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/Metaprogramming.hpp b/src/nvcv/util/Metaprogramming.hpp similarity index 88% rename from src/util/Metaprogramming.hpp rename to src/nvcv/util/Metaprogramming.hpp index bc905045b..0f679a270 100644 --- a/src/util/Metaprogramming.hpp +++ b/src/nvcv/util/Metaprogramming.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/Ranges.hpp b/src/nvcv/util/Ranges.hpp similarity index 95% rename from src/util/Ranges.hpp rename to src/nvcv/util/Ranges.hpp index cb8766f50..f094181be 100644 --- a/src/util/Ranges.hpp +++ b/src/nvcv/util/Ranges.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/SanitizerOptions.c b/src/nvcv/util/SanitizerOptions.c similarity index 97% rename from src/util/SanitizerOptions.c rename to src/nvcv/util/SanitizerOptions.c index 0c8d80bc7..2696c9821 100644 --- a/src/util/SanitizerOptions.c +++ b/src/nvcv/util/SanitizerOptions.c @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/Size.hpp b/src/nvcv/util/Size.hpp similarity index 90% rename from src/util/Size.hpp rename to src/nvcv/util/Size.hpp index d3ec261bb..ee88af225 100644 --- a/src/util/Size.hpp +++ b/src/nvcv/util/Size.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/StaticVector.hpp b/src/nvcv/util/StaticVector.hpp similarity index 99% rename from src/util/StaticVector.hpp rename to src/nvcv/util/StaticVector.hpp index faae3d29f..57c046eb6 100644 --- a/src/util/StaticVector.hpp +++ b/src/nvcv/util/StaticVector.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/String.cpp b/src/nvcv/util/String.cpp similarity index 98% rename from src/util/String.cpp rename to src/nvcv/util/String.cpp index d9788e757..2eb5b6aff 100644 --- a/src/util/String.cpp +++ b/src/nvcv/util/String.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/String.hpp b/src/nvcv/util/String.hpp similarity index 94% rename from src/util/String.hpp rename to src/nvcv/util/String.hpp index e35462691..24bbbb1d4 100644 --- a/src/util/String.hpp +++ b/src/nvcv/util/String.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/SymbolVersioning.hpp b/src/nvcv/util/SymbolVersioning.hpp similarity index 93% rename from src/util/SymbolVersioning.hpp rename to src/nvcv/util/SymbolVersioning.hpp index cac6cd28e..48ca24f5b 100644 --- a/src/util/SymbolVersioning.hpp +++ b/src/nvcv/util/SymbolVersioning.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES * SPDX-License-Identifier: Apache-2.0 @@ -30,7 +30,7 @@ At first, all public functions are defined like this: implementation 1; } -If we need to add a new version of nvcvFooCreate, we update cv-cuda code like this: +If we need to add a new version of nvcvFooCreate, we update NVCV code like this: Header: #if NVCV_API_VERSION == 100 @@ -53,7 +53,7 @@ If we need to add a new version of nvcvFooCreate, we update cv-cuda code like th } If user defines NVCV_API_VERSION == 100, he will use the first definition and linker will -link to the cvcuda-1.0 API. If nothing is defined, he will use the most recent definition. +link to the nvcv_types-1.0 API. If nothing is defined, he will use the most recent definition. Users using dlopen to retrieve our functions will have to use dlvsym to specify what version to get. Regular dlsym will always pick the most recent one. diff --git a/src/util/Version.cpp b/src/nvcv/util/Version.cpp similarity index 89% rename from src/util/Version.cpp rename to src/nvcv/util/Version.cpp index 6823a0ea8..4f682a8fc 100644 --- a/src/util/Version.cpp +++ b/src/nvcv/util/Version.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/Version.hpp b/src/nvcv/util/Version.hpp similarity index 97% rename from src/util/Version.hpp rename to src/nvcv/util/Version.hpp index 8f68caf81..35f7484fe 100644 --- a/src/util/Version.hpp +++ b/src/nvcv/util/Version.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual diff --git a/src/util/compat_symbols.txt b/src/nvcv/util/compat_symbols.txt similarity index 97% rename from src/util/compat_symbols.txt rename to src/nvcv/util/compat_symbols.txt index 9efe16923..a4044aed7 100644 --- a/src/util/compat_symbols.txt +++ b/src/nvcv/util/compat_symbols.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/stubs/.gitattributes b/src/nvcv/util/stubs/.gitattributes similarity index 85% rename from src/util/stubs/.gitattributes rename to src/nvcv/util/stubs/.gitattributes index a216adb50..6c4e71657 100644 --- a/src/util/stubs/.gitattributes +++ b/src/nvcv/util/stubs/.gitattributes @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/stubs/libdl-2.17_stub.so b/src/nvcv/util/stubs/libdl-2.17_stub.so similarity index 100% rename from src/util/stubs/libdl-2.17_stub.so rename to src/nvcv/util/stubs/libdl-2.17_stub.so diff --git a/src/util/stubs/libpthread-2.17_stub.so b/src/nvcv/util/stubs/libpthread-2.17_stub.so similarity index 100% rename from src/util/stubs/libpthread-2.17_stub.so rename to src/nvcv/util/stubs/libpthread-2.17_stub.so diff --git a/src/util/stubs/librt-2.17_stub.so b/src/nvcv/util/stubs/librt-2.17_stub.so similarity index 100% rename from src/util/stubs/librt-2.17_stub.so rename to src/nvcv/util/stubs/librt-2.17_stub.so diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 2fb4c84d7..e6d06d960 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -54,7 +54,13 @@ if(UNIX) add_test(NAME "${TESTNAME}" COMMAND "${TESTCMD}") + # If an external target is being added to the tests, it should not be considered as a regular target, + # avoiding linking it against libraries and using install targets with it if(TARGET ${TESTNAME}) + get_target_property(EXTERNAL_TARGET ${TESTNAME} IMPORTED) + endif() + + if(TARGET ${TESTNAME} AND NOT EXTERNAL_TARGET) target_link_libraries(${TESTNAME} PRIVATE nvcv_test_main) endif() @@ -66,7 +72,7 @@ if(UNIX) file(APPEND "${TESTS_DRIVER}" "run ${TESTNAME} ${TESTGROUP}\n") - if(TARGET ${TESTNAME}) + if(TARGET ${TESTNAME} AND NOT EXTERNAL_TARGET) install(TARGETS ${TESTNAME} DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT tests) diff --git a/tests/common/CMakeLists.txt b/tests/common/CMakeLists.txt index 0913bb788..d7912c3de 100644 --- a/tests/common/CMakeLists.txt +++ b/tests/common/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,6 +28,7 @@ find_package(ZLIB REQUIRED) add_library(nvcv_test_common STATIC Printers.cpp HashMD5.cpp + TensorDataUtils.cpp ) target_include_directories(nvcv_test_common @@ -47,9 +48,10 @@ target_link_libraries(nvcv_test_common nvcv_types_headers nvcv_util nvcv_util_compat + cvcuda_headers PRIVATE OpenSSL::Crypto - ZLIB::ZLIB + ZLIB::ZLIB ) # nvcv_test_common_system ============================== diff --git a/tests/common/CheckStatus.hpp b/tests/common/CheckStatus.hpp index 2374e3593..902da811f 100644 --- a/tests/common/CheckStatus.hpp +++ b/tests/common/CheckStatus.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #include #if NVCV_EXPORTING -# include +# include #else # include #endif diff --git a/tests/common/HashMD5.cpp b/tests/common/HashMD5.cpp index 0f6e3a88f..1d2a9dff8 100644 --- a/tests/common/HashMD5.cpp +++ b/tests/common/HashMD5.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,8 @@ #include "HashMD5.hpp" +#include #include -#include #include diff --git a/tests/common/HashMD5.hpp b/tests/common/HashMD5.hpp index 6bab7cd8d..3e6796e16 100644 --- a/tests/common/HashMD5.hpp +++ b/tests/common/HashMD5.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #ifndef NVCV_TEST_COMMON_HASHMD5_HPP #define NVCV_TEST_COMMON_HASHMD5_HPP -#include +#include #include #include diff --git a/tests/common/InterpUtils.hpp b/tests/common/InterpUtils.hpp index 956956e13..746ea447e 100644 --- a/tests/common/InterpUtils.hpp +++ b/tests/common/InterpUtils.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,11 +18,11 @@ #ifndef NVCV_TESTS_COMMON_INTERPUTILS_HPP #define NVCV_TESTS_COMMON_INTERPUTILS_HPP -#include // for test::IsInside, etc. -#include // for NVCVInterpolationType, etc. -#include // for operator +, etc. -#include // for cuda::round, etc. -#include // for cuda::SaturateCast, etc. +#include // for test::IsInside, etc. +#include // for NVCVInterpolationType, etc. +#include // for operator +, etc. +#include // for cuda::round, etc. +#include // for cuda::SaturateCast, etc. #include diff --git a/tests/common/Printers.cpp b/tests/common/Printers.cpp index c3476fd98..0c5fdef12 100644 --- a/tests/common/Printers.cpp +++ b/tests/common/Printers.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ #include "Printers.hpp" -#include +#include #include diff --git a/tests/common/Printers.hpp b/tests/common/Printers.hpp index f686f0caa..fb4583d61 100644 --- a/tests/common/Printers.hpp +++ b/tests/common/Printers.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,9 +23,9 @@ #include #if NVCV_EXPORTING -# include -# include -# include +# include +# include +# include #else # include # include diff --git a/src/util/TensorDataUtils.cpp b/tests/common/TensorDataUtils.cpp similarity index 99% rename from src/util/TensorDataUtils.cpp rename to tests/common/TensorDataUtils.cpp index 862f62aa0..a5486a138 100644 --- a/src/util/TensorDataUtils.cpp +++ b/tests/common/TensorDataUtils.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/util/TensorDataUtils.hpp b/tests/common/TensorDataUtils.hpp similarity index 99% rename from src/util/TensorDataUtils.hpp rename to tests/common/TensorDataUtils.hpp index fb641a076..ac468a4ac 100644 --- a/src/util/TensorDataUtils.hpp +++ b/tests/common/TensorDataUtils.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,11 +19,11 @@ #define NVCV_TEST_COMMON_TENSOR_DATA_UTILS_HPP #include +#include +#include +#include #include #include -#include -#include -#include #include #include diff --git a/tests/common/TypeList.hpp b/tests/common/TypeList.hpp index ae89f2dfa..0b0729ca9 100644 --- a/tests/common/TypeList.hpp +++ b/tests/common/TypeList.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #include #include -#include +#include // Utilities for creating parameters for typed tests on GoogleTest // We support both typed and (constexpr) value parameters. In order to work them diff --git a/tests/cvcuda/python/cvcuda_test_python.in b/tests/cvcuda/python/cvcuda_test_python.in index 8b3e1bcd9..30592ca13 100755 --- a/tests/cvcuda/python/cvcuda_test_python.in +++ b/tests/cvcuda/python/cvcuda_test_python.in @@ -1,6 +1,6 @@ #!/bin/bash -e # -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/cvcuda/python/cvcuda_util.py b/tests/cvcuda/python/cvcuda_util.py index dcdf55f3d..b3ad73587 100644 --- a/tests/cvcuda/python/cvcuda_util.py +++ b/tests/cvcuda/python/cvcuda_util.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -197,7 +197,7 @@ def create_tensor(shape, dtype, layout, max_random=None, rng=None, transform_dis layout (string): Tensor layout (e.g. NC, HWC, NHWC) max_random (number or tuple or list): Maximum random value rng (numpy random Generator): To fill tensor with random values - transform_dist (function): To transform random values (e.g. MAKE_ODD) + transform_dist (function): To transform random values (e.g. dist_odd) Returns: nvcv.Tensor: The created tensor diff --git a/tests/cvcuda/python/test_cache.py b/tests/cvcuda/python/test_cache.py new file mode 100644 index 000000000..aabd5bcba --- /dev/null +++ b/tests/cvcuda/python/test_cache.py @@ -0,0 +1,95 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cvcuda +import nvcv +import gc +import numpy as np +import torch +import sys + +# TODO: These tests technically belong to nvcv, but since it doesn't expose any +# operator (or anything that we could submit to a stream), we need to add this +# to cvcuda instead. Ideally nvcv should allows us to write proper stream tests +# using only the facilities it provides. Maybe a "noop" operator, or some mocks, +# maybe container copy, etc. + + +def test_clear_cache_inside_op(): + tensor = nvcv.Tensor((100, 1500, 1500, 3), nvcv.Type.U8, nvcv.TensorLayout.NHWC) + map = nvcv.Tensor((100, 1500, 1500, 2), nvcv.Type.F32, nvcv.TensorLayout.NHWC) + with cvcuda.Stream(): + out = cvcuda.remap(tensor, map) + nvcv.clear_cache() + del tensor + del map + del out + gc.collect() + + +def test_gcbag_is_being_emptied(): + # Make sure there's no work scheduled on the stream, it's all ours. + workstream = nvcv.cuda.Stream() + + # In order to test if the GCBag was really emptied, + + # we create a torch tensor, + ttensor = torch.as_tensor(np.ndarray([100, 1500, 1500, 3], np.uint8), device="cuda") + # keep track of its initial refcount. + orig_ttensor_refcount = sys.getrefcount(ttensor) + # and wrap it in a nvcv tensor 'cvwrapper' + cvwrapper = nvcv.as_tensor(ttensor, nvcv.TensorLayout.NHWC) + + # We can then indirectly tell if 'cvwrapper' was destroyed by + # monitoring 'ttensor's refcount. + # This works because we know 'cvwrapper' holds a reference to + # 'ttensor', as proved by the following assert: + wrapped_ttensor_refcount = sys.getrefcount(ttensor) + assert wrapped_ttensor_refcount > orig_ttensor_refcount + + # We need now to make sure cvwrapper is in the GCBag. + # For that, we need to use it in operator + with workstream: + cvcuda.median_blur(cvwrapper, [3, 3], stream=workstream) + # And make sure it finishes. + workstream.sync() + # Make sure the auxiliary stream has finished extending cvwrapper's lifetime + nvcv.cuda.internal.syncAuxStream() + + # cvwrapper being referenced by others shouldn't change ttensor's refcount. + assert sys.getrefcount(ttensor) == wrapped_ttensor_refcount + + # Now remove cvwrapper from the cache by clearing it. + nvcv.clear_cache() + + # We can now release it from python side. We can't track its lifetime + # directly anymore. + del cvwrapper + + # But we know indirectly that it is still alive + assert sys.getrefcount(ttensor) == wrapped_ttensor_refcount + + # To finally destroy cvwrapper, we empty the GCBag by executing a + # cvcuda operator, any would do. + with workstream: + cvcuda.median_blur( + nvcv.Tensor((3, 64, 32, 3), nvcv.Type.U8, nvcv.TensorLayout.NHWC), [3, 3] + ) + workstream.sync() + nvcv.cuda.internal.syncAuxStream() + + # Lo and behold, cvwrapper is no more. + # The wrapped tensor torch has the same refcount it had when we've created it. + assert sys.getrefcount(ttensor) == orig_ttensor_refcount diff --git a/tests/cvcuda/python/test_multi_stream.py b/tests/cvcuda/python/test_multi_stream.py index 24a2bf833..ab425ea99 100644 --- a/tests/cvcuda/python/test_multi_stream.py +++ b/tests/cvcuda/python/test_multi_stream.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/cvcuda/python/test_opcvtcolor.py b/tests/cvcuda/python/test_opcvtcolor.py index 9c0c04131..482ad0928 100644 --- a/tests/cvcuda/python/test_opcvtcolor.py +++ b/tests/cvcuda/python/test_opcvtcolor.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -50,6 +50,16 @@ cvcuda.ColorConversion.YUV2RGB, ((1, 61, 62, 3), np.uint8, "NHWC"), ), + ( + ((60, 62, 1), np.uint8, "HWC"), + cvcuda.ColorConversion.YUV2RGB_NV12, + ((40, 62, 3), np.uint8, "HWC"), + ), + ( + ((2, 40, 62, 3), np.uint8, "NHWC"), + cvcuda.ColorConversion.BGR2YUV_NV21, + ((2, 60, 62, 1), np.uint8, "NHWC"), + ), ], ) def test_op_cvtcolor(input_args, code, output_args): @@ -68,7 +78,6 @@ def test_op_cvtcolor(input_args, code, output_args): stream=stream, ) assert tmp is output - assert output.shape[:-1] == input.shape[:-1] @t.mark.parametrize( diff --git a/tests/cvcuda/python/test_opfindhomography.py b/tests/cvcuda/python/test_opfindhomography.py index bbc57e9aa..059be9e7f 100644 --- a/tests/cvcuda/python/test_opfindhomography.py +++ b/tests/cvcuda/python/test_opfindhomography.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/cvcuda/python/test_oplabel.py b/tests/cvcuda/python/test_oplabel.py index ec61fe236..19e04dd27 100644 --- a/tests/cvcuda/python/test_oplabel.py +++ b/tests/cvcuda/python/test_oplabel.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/cvcuda/python/test_opresize.py b/tests/cvcuda/python/test_opresize.py index 8e3135277..ab269e1ab 100644 --- a/tests/cvcuda/python/test_opresize.py +++ b/tests/cvcuda/python/test_opresize.py @@ -14,6 +14,7 @@ # limitations under the License. import cvcuda +import torch import pytest as t import numpy as np import cvcuda_util as util @@ -37,7 +38,10 @@ (132, 15, 4), cvcuda.Interp.CUBIC, ), - (((16, 23, 1), np.uint8, "HWC"), (132, 15, 1), None), + (((37, 19, 1), np.uint8 , "HWC"), (113, 47, 1), None), + (((37, 19, 3), np.uint8 , "HWC"), (113, 47, 3), cvcuda.Interp.NEAREST), + (((37, 19, 1), np.single, "HWC"), (113, 47, 1), None), + (((37, 19, 3), np.single, "HWC"), (113, 47, 3), None), ], ) def test_op_resize(input_args, out_shape, interp): @@ -66,6 +70,7 @@ def test_op_resize(input_args, out_shape, interp): out = cvcuda.resize(src=input, shape=out_shape, stream=stream) else: out = cvcuda.resize(src=input, shape=out_shape, interp=interp, stream=stream) + stream.sync() assert out.layout == input.layout assert out.shape == out_shape assert out.dtype == input.dtype @@ -80,6 +85,38 @@ def test_op_resize(input_args, out_shape, interp): assert out.dtype == input.dtype + +@t.mark.parametrize( + "in_shape,out_shape,data_type,interp", + [ + ((5, 720, 406, 3), (5, 360, 203, 3), torch.uint8, cvcuda.Interp.NEAREST), # noqa + ((5, 720, 406, 3), (5, 360, 203, 3), torch.uint8, cvcuda.Interp.LINEAR), # noqa + ((5, 720, 406, 3), (5, 360, 203, 3), torch.uint8, cvcuda.Interp.CUBIC), # noqa + ((3, 23, 23, 4), (3, 42, 42, 4), torch.uint8, cvcuda.Interp.LINEAR), # noqa + ((3, 23, 53, 1), (3, 132, 23, 1), torch.uint8, cvcuda.Interp.LINEAR), # noqa + ((3, 23, 53, 1), (3, 132, 23, 1), torch.float, cvcuda.Interp.LINEAR), # noqa + ((1, 37, 19, 1), (1, 113, 47, 1), torch.uint8, cvcuda.Interp.LINEAR), # noqa + ((1, 37, 19, 3), (1, 113, 47, 3), torch.uint8, cvcuda.Interp.NEAREST), # noqa + ((1, 37, 19, 1), (1, 113, 47, 1), torch.float, cvcuda.Interp.LINEAR), # noqa + ((1, 37, 19, 3), (1, 113, 47, 3), torch.float, cvcuda.Interp.LINEAR), # noqa + ], +) +def test_op_resize_packed_torch_tensor(in_shape, out_shape, data_type, interp): + stream = cvcuda.Stream() + + input = torch.empty( in_shape, dtype=data_type, device=f"cuda:0") + output = torch.empty(out_shape, dtype=data_type, device=f"cuda:0") + + src = cvcuda.as_tensor( input.cuda(0), "NHWC") + dst = cvcuda.as_tensor(output.cuda(0), "NHWC") + tmp = cvcuda.resize_into(dst, src, interp, stream=stream) + stream.sync() + + assert tmp is dst + assert dst.layout == src.layout + assert dst.shape == out_shape + assert dst.dtype == src.dtype + @t.mark.parametrize( "inSize, outSize, interp", [((123, 321), (321, 123), cvcuda.Interp.LINEAR), ((123, 321), (321, 123), None)], diff --git a/tests/cvcuda/python/test_stream.py b/tests/cvcuda/python/test_stream.py new file mode 100644 index 000000000..2e283508e --- /dev/null +++ b/tests/cvcuda/python/test_stream.py @@ -0,0 +1,36 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cvcuda +import nvcv +import torch + +# TODO: These tests technically belong to nvcv, but since it doesn't expose any +# operator (or anything that we could submit to a stream), we need to add this +# to cvcuda instead. Ideally nvcv should allows us to write proper stream tests +# using only the facilities it provides. Maybe a "noop" operator, or some mocks, +# maybe container copy, etc. + + +def test_stream_gcbag_vs_streamsync_race_condition(): + inputImage = torch.randint(0, 256, (100, 1500, 1500, 3), dtype=torch.uint8).cuda() + nvcvInputTensor = nvcv.as_tensor(inputImage, "NHWC") + inputmap = torch.randint(0, 256, (100, 1500, 1500, 2), dtype=torch.float).cuda() + nvcvInputMap = nvcv.as_tensor(inputmap, "NHWC") + + cvcuda_stream = cvcuda.Stream() + with cvcuda_stream: + nvcvResizeTensor = cvcuda.remap(nvcvInputTensor, nvcvInputMap) + del nvcvResizeTensor diff --git a/tests/cvcuda/system/CMakeLists.txt b/tests/cvcuda/system/CMakeLists.txt index 0b3375c3d..fcf9a455c 100644 --- a/tests/cvcuda/system/CMakeLists.txt +++ b/tests/cvcuda/system/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -114,6 +114,8 @@ add_header_compat_test(TARGET cvcuda_test_capi_header_compat # Gather C++ headers file(GLOB_RECURSE CXXAPI_HEADERS RELATIVE "${CVCUDA_SOURCE_DIR}/include" CONFIGURE_DEPENDS "${CVCUDA_SOURCE_DIR}/include/*.hpp") +# remove optools files, they are c++17 +list(FILTER CXXAPI_HEADERS EXCLUDE REGEX "cuda_tools/") add_header_compat_test(TARGET cvcuda_test_cxxapi_header_compat SOURCE TestAPI.cpp diff --git a/tests/cvcuda/system/ConvUtils.cpp b/tests/cvcuda/system/ConvUtils.cpp index 5cc7e4add..aeb9796b0 100644 --- a/tests/cvcuda/system/ConvUtils.cpp +++ b/tests/cvcuda/system/ConvUtils.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,12 @@ #include "ConvUtils.hpp" -#include // for SaturateCast, etc. -#include // for operator *, etc. -#include // for min/max -#include // for SaturateCast, etc. -#include // for BaseType, etc. -#include // for NVCV_ASSERT, etc. +#include // for SaturateCast, etc. +#include // for operator *, etc. +#include // for min/max +#include // for SaturateCast, etc. +#include // for BaseType, etc. +#include // for NVCV_ASSERT, etc. namespace nvcv::test { diff --git a/tests/cvcuda/system/FlipUtils.cpp b/tests/cvcuda/system/FlipUtils.cpp index 30834090b..863338656 100644 --- a/tests/cvcuda/system/FlipUtils.cpp +++ b/tests/cvcuda/system/FlipUtils.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,11 +17,11 @@ #include "FlipUtils.hpp" -#include // for SaturateCast, etc. -#include // for operator *, etc. -#include // for SaturateCast, etc. -#include // for BaseType, etc. -#include // for NVCV_ASSERT, etc. +#include // for SaturateCast, etc. +#include // for operator *, etc. +#include // for SaturateCast, etc. +#include // for BaseType, etc. +#include // for NVCV_ASSERT, etc. namespace nvcv::test { diff --git a/tests/cvcuda/system/ResizeUtils.cpp b/tests/cvcuda/system/ResizeUtils.cpp index 45c04ece5..0685443c8 100644 --- a/tests/cvcuda/system/ResizeUtils.cpp +++ b/tests/cvcuda/system/ResizeUtils.cpp @@ -17,12 +17,12 @@ #include "ResizeUtils.hpp" -#include // for SaturateCast, etc. -#include // for operator *, etc. -#include // for ROUND, etc -#include // for SaturateCast, etc. -#include // for BaseType, etc. -#include // for NVCV_ASSERT, etc. +#include // for SaturateCast, etc. +#include // for operator *, etc. +#include // for ROUND, etc +#include // for SaturateCast, etc. +#include // for BaseType, etc. +#include // for NVCV_ASSERT, etc. #include @@ -233,10 +233,8 @@ void resizedCrop(std::vector &dst, int dstStep, nvcv::Size2D dstSize, const s int sy = std::floor(fy); int sx = std::floor(fx); - fy -= sy; - fx -= sx; - - fx = (sx < 0 || sx >= srcSize.w - 1) ? 0 : fx; + fy = ((sy < 0) ? 0 : ((sy > srcSize.h - 2) ? 1 : fy - sy)); + fx = ((sx < 0) ? 0 : ((sx > srcSize.w - 2) ? 1 : fx - sx)); sy = std::max(0, std::min(sy, srcSize.h - 2)); sx = std::max(0, std::min(sx, srcSize.w - 2)); diff --git a/tests/cvcuda/system/ResizeUtils.hpp b/tests/cvcuda/system/ResizeUtils.hpp index 960830481..a0cc47801 100644 --- a/tests/cvcuda/system/ResizeUtils.hpp +++ b/tests/cvcuda/system/ResizeUtils.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/cvcuda/system/TestOpAdaptiveThreshold.cpp b/tests/cvcuda/system/TestOpAdaptiveThreshold.cpp index a54d341d2..d363aeed9 100644 --- a/tests/cvcuda/system/TestOpAdaptiveThreshold.cpp +++ b/tests/cvcuda/system/TestOpAdaptiveThreshold.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,16 +19,16 @@ #include "Definitions.hpp" #include +#include #include #include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include #include diff --git a/tests/cvcuda/system/TestOpAdvCvtColor.cpp b/tests/cvcuda/system/TestOpAdvCvtColor.cpp index c046df782..66e7bff76 100644 --- a/tests/cvcuda/system/TestOpAdvCvtColor.cpp +++ b/tests/cvcuda/system/TestOpAdvCvtColor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,15 +17,15 @@ #include "Definitions.hpp" +#include #include #include +#include +#include #include #include #include #include -#include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpAverageBlur.cpp b/tests/cvcuda/system/TestOpAverageBlur.cpp index 9358fa0f0..2ec8d1781 100644 --- a/tests/cvcuda/system/TestOpAverageBlur.cpp +++ b/tests/cvcuda/system/TestOpAverageBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,14 +18,14 @@ #include "ConvUtils.hpp" #include "Definitions.hpp" +#include #include #include +#include #include #include #include #include -#include -#include #include diff --git a/tests/cvcuda/system/TestOpBilateralFilter.cpp b/tests/cvcuda/system/TestOpBilateralFilter.cpp index 10ef192b2..3063228a7 100644 --- a/tests/cvcuda/system/TestOpBilateralFilter.cpp +++ b/tests/cvcuda/system/TestOpBilateralFilter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +17,13 @@ #include "Definitions.hpp" +#include #include #include #include #include #include #include -#include #include #include @@ -96,9 +96,9 @@ static void CPUBilateralFilter(uint8_t *pIn, uint8_t *pOut, int columns, int row std::vector numerators(channels, 0.0f); float denominator = 0.0f; std::vector centers{static_cast(pIn[j * rowStride + k * channels]), - static_cast(pIn[j * rowStride + k * channels + 1]), - static_cast(pIn[j * rowStride + k * channels + 2]), - static_cast(pIn[j * rowStride + k * channels + 3])}; + channels > 1 ? static_cast(pIn[j * rowStride + k * channels + 1]) : 0, + channels > 2 ? static_cast(pIn[j * rowStride + k * channels + 2]) : 0, + channels > 3 ? static_cast(pIn[j * rowStride + k * channels + 3]) : 0}; for (int y = j - radius; y <= j + radius; y++) { diff --git a/tests/cvcuda/system/TestOpBndBox.cpp b/tests/cvcuda/system/TestOpBndBox.cpp index e895347bf..3236465cd 100644 --- a/tests/cvcuda/system/TestOpBndBox.cpp +++ b/tests/cvcuda/system/TestOpBndBox.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,13 +19,13 @@ #include "OsdUtils.cuh" +#include #include #include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpBoxBlur.cpp b/tests/cvcuda/system/TestOpBoxBlur.cpp index e3efd566d..814140d2f 100644 --- a/tests/cvcuda/system/TestOpBoxBlur.cpp +++ b/tests/cvcuda/system/TestOpBoxBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,13 +19,13 @@ #include "OsdUtils.cuh" +#include #include #include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpBrightnessContrast.cpp b/tests/cvcuda/system/TestOpBrightnessContrast.cpp index 2b5bb9797..00c5b1b8e 100644 --- a/tests/cvcuda/system/TestOpBrightnessContrast.cpp +++ b/tests/cvcuda/system/TestOpBrightnessContrast.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,16 +16,16 @@ */ #include +#include #include #include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpCenterCrop.cpp b/tests/cvcuda/system/TestOpCenterCrop.cpp index b83f66c65..c59ec192b 100644 --- a/tests/cvcuda/system/TestOpCenterCrop.cpp +++ b/tests/cvcuda/system/TestOpCenterCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,12 @@ #include "Definitions.hpp" +#include #include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpChannelReorder.cpp b/tests/cvcuda/system/TestOpChannelReorder.cpp index 011491bb4..37caacc0e 100644 --- a/tests/cvcuda/system/TestOpChannelReorder.cpp +++ b/tests/cvcuda/system/TestOpChannelReorder.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +17,13 @@ #include "Definitions.hpp" +#include #include #include +#include #include #include #include -#include -#include namespace test = nvcv::test; diff --git a/tests/cvcuda/system/TestOpColorTwist.cpp b/tests/cvcuda/system/TestOpColorTwist.cpp index ac0c1cd76..a4eae8fb3 100644 --- a/tests/cvcuda/system/TestOpColorTwist.cpp +++ b/tests/cvcuda/system/TestOpColorTwist.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,17 +16,17 @@ */ #include +#include #include #include +#include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpConv2D.cpp b/tests/cvcuda/system/TestOpConv2D.cpp index 7efea05f9..ff3a5c7cf 100644 --- a/tests/cvcuda/system/TestOpConv2D.cpp +++ b/tests/cvcuda/system/TestOpConv2D.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,11 +20,11 @@ #include #include +#include #include #include #include #include -#include #include diff --git a/tests/cvcuda/system/TestOpConvertTo.cpp b/tests/cvcuda/system/TestOpConvertTo.cpp index 4913c05da..9228ca61d 100644 --- a/tests/cvcuda/system/TestOpConvertTo.cpp +++ b/tests/cvcuda/system/TestOpConvertTo.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +17,13 @@ #include "Definitions.hpp" +#include #include #include +#include #include #include #include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpCropFlipNormalizeReformat.cpp b/tests/cvcuda/system/TestOpCropFlipNormalizeReformat.cpp index 01dbc44ee..cf9bc44c8 100644 --- a/tests/cvcuda/system/TestOpCropFlipNormalizeReformat.cpp +++ b/tests/cvcuda/system/TestOpCropFlipNormalizeReformat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,17 +19,17 @@ #include #include +#include #include #include #include #include +#include #include #include #include #include #include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpCustomCrop.cpp b/tests/cvcuda/system/TestOpCustomCrop.cpp index fe51cd543..dd7516c1e 100644 --- a/tests/cvcuda/system/TestOpCustomCrop.cpp +++ b/tests/cvcuda/system/TestOpCustomCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,12 @@ #include "Definitions.hpp" +#include #include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpCvtColor.cpp b/tests/cvcuda/system/TestOpCvtColor.cpp index 833cfa92a..31e5c682b 100644 --- a/tests/cvcuda/system/TestOpCvtColor.cpp +++ b/tests/cvcuda/system/TestOpCvtColor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,14 +18,14 @@ #include "ConvUtils.hpp" #include "Definitions.hpp" +#include #include #include +#include #include #include #include #include -#include -#include #include diff --git a/tests/cvcuda/system/TestOpErase.cpp b/tests/cvcuda/system/TestOpErase.cpp index ca249ea0d..ea4372cf5 100644 --- a/tests/cvcuda/system/TestOpErase.cpp +++ b/tests/cvcuda/system/TestOpErase.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +17,13 @@ #include "Definitions.hpp" +#include #include #include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpFindHomography.cpp b/tests/cvcuda/system/TestOpFindHomography.cpp index a0ef4fb8e..186e6e1d7 100644 --- a/tests/cvcuda/system/TestOpFindHomography.cpp +++ b/tests/cvcuda/system/TestOpFindHomography.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,19 +15,19 @@ * limitations under the License. */ +#include #include #include +#include +#include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include +#include #include #include diff --git a/tests/cvcuda/system/TestOpFlip.cpp b/tests/cvcuda/system/TestOpFlip.cpp index d51b2b922..9e0524da9 100644 --- a/tests/cvcuda/system/TestOpFlip.cpp +++ b/tests/cvcuda/system/TestOpFlip.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,14 +18,14 @@ #include "Definitions.hpp" #include "FlipUtils.hpp" +#include #include #include +#include #include #include #include #include -#include -#include #include diff --git a/tests/cvcuda/system/TestOpGammaContrast.cpp b/tests/cvcuda/system/TestOpGammaContrast.cpp index 155291a54..004a212c9 100644 --- a/tests/cvcuda/system/TestOpGammaContrast.cpp +++ b/tests/cvcuda/system/TestOpGammaContrast.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,12 +20,12 @@ #include #include +#include +#include #include #include #include #include -#include -#include #include diff --git a/tests/cvcuda/system/TestOpGaussian.cpp b/tests/cvcuda/system/TestOpGaussian.cpp index 8486752fd..876377797 100644 --- a/tests/cvcuda/system/TestOpGaussian.cpp +++ b/tests/cvcuda/system/TestOpGaussian.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,14 +18,14 @@ #include "ConvUtils.hpp" #include "Definitions.hpp" +#include #include #include +#include #include #include #include #include -#include -#include #include diff --git a/tests/cvcuda/system/TestOpGaussianNoise.cpp b/tests/cvcuda/system/TestOpGaussianNoise.cpp index a8fe5419b..9a10a770c 100644 --- a/tests/cvcuda/system/TestOpGaussianNoise.cpp +++ b/tests/cvcuda/system/TestOpGaussianNoise.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,15 +20,15 @@ #include "GaussianNoiseUtils.cuh" #include +#include #include #include +#include +#include #include #include #include #include -#include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpHQResize.cpp b/tests/cvcuda/system/TestOpHQResize.cpp index b78d4ab22..c5953b528 100644 --- a/tests/cvcuda/system/TestOpHQResize.cpp +++ b/tests/cvcuda/system/TestOpHQResize.cpp @@ -16,18 +16,18 @@ */ #include +#include #include #include +#include +#include +#include #include #include #include #include #include -#include -#include -#include -#include -#include +#include #include #include diff --git a/tests/cvcuda/system/TestOpHistogram.cpp b/tests/cvcuda/system/TestOpHistogram.cpp index 533134d1a..f0c22b3e7 100644 --- a/tests/cvcuda/system/TestOpHistogram.cpp +++ b/tests/cvcuda/system/TestOpHistogram.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,12 @@ #include "Definitions.hpp" +#include #include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpHistogramEq.cpp b/tests/cvcuda/system/TestOpHistogramEq.cpp index f917534d7..a57f68199 100644 --- a/tests/cvcuda/system/TestOpHistogramEq.cpp +++ b/tests/cvcuda/system/TestOpHistogramEq.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +17,13 @@ #include "Definitions.hpp" +#include #include #include +#include #include #include #include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpInpaint.cpp b/tests/cvcuda/system/TestOpInpaint.cpp index 2400df8a0..a3d10bdc3 100644 --- a/tests/cvcuda/system/TestOpInpaint.cpp +++ b/tests/cvcuda/system/TestOpInpaint.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +17,13 @@ #include "Definitions.hpp" +#include #include #include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpJointBilateralFilter.cpp b/tests/cvcuda/system/TestOpJointBilateralFilter.cpp index 2be0529bf..53967bd62 100644 --- a/tests/cvcuda/system/TestOpJointBilateralFilter.cpp +++ b/tests/cvcuda/system/TestOpJointBilateralFilter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +17,13 @@ #include "Definitions.hpp" +#include #include #include #include #include #include #include -#include #include #include @@ -103,10 +103,11 @@ static void CPUJointBilateralFilter(uint8_t *pIn, uint8_t *pInColor, uint8_t *pO { std::vector numerators(channels, 0.0f); float denominator = 0; - std::vector centerColors{static_cast(pInColor[j * rowStride + k * channels]), - static_cast(pInColor[j * rowStride + k * channels + 1]), - static_cast(pInColor[j * rowStride + k * channels + 2]), - static_cast(pInColor[j * rowStride + k * channels + 3])}; + std::vector centerColors{ + static_cast(pInColor[j * rowStride + k * channels]), + channels > 1 ? static_cast(pInColor[j * rowStride + k * channels + 1]) : 0, + channels > 2 ? static_cast(pInColor[j * rowStride + k * channels + 2]) : 0, + channels > 3 ? static_cast(pInColor[j * rowStride + k * channels + 3]) : 0}; for (int y = j - radius; y <= j + radius; y++) { diff --git a/tests/cvcuda/system/TestOpLabel.cpp b/tests/cvcuda/system/TestOpLabel.cpp index 3439a5905..f6904cb42 100644 --- a/tests/cvcuda/system/TestOpLabel.cpp +++ b/tests/cvcuda/system/TestOpLabel.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,14 +17,14 @@ #include "Definitions.hpp" +#include #include #include +#include +#include +#include #include #include -#include -#include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpLaplacian.cpp b/tests/cvcuda/system/TestOpLaplacian.cpp index f6cf0b67e..b0d3c6a72 100644 --- a/tests/cvcuda/system/TestOpLaplacian.cpp +++ b/tests/cvcuda/system/TestOpLaplacian.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,14 +18,14 @@ #include "ConvUtils.hpp" #include "Definitions.hpp" +#include #include #include +#include #include #include #include #include -#include -#include #include diff --git a/tests/cvcuda/system/TestOpMedianBlur.cpp b/tests/cvcuda/system/TestOpMedianBlur.cpp index aaaa331db..ada597e06 100644 --- a/tests/cvcuda/system/TestOpMedianBlur.cpp +++ b/tests/cvcuda/system/TestOpMedianBlur.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/cvcuda/system/TestOpMinAreaRect.cpp b/tests/cvcuda/system/TestOpMinAreaRect.cpp index 1fa0c2e2e..65d3a2740 100644 --- a/tests/cvcuda/system/TestOpMinAreaRect.cpp +++ b/tests/cvcuda/system/TestOpMinAreaRect.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,12 @@ #include "Definitions.hpp" +#include #include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpMinMaxLoc.cpp b/tests/cvcuda/system/TestOpMinMaxLoc.cpp index 5ef26c342..8cf101957 100644 --- a/tests/cvcuda/system/TestOpMinMaxLoc.cpp +++ b/tests/cvcuda/system/TestOpMinMaxLoc.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,16 +18,16 @@ #include "Definitions.hpp" #include +#include #include #include +#include +#include #include #include #include #include #include -#include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpMorphology.cpp b/tests/cvcuda/system/TestOpMorphology.cpp index ce4c45460..8219b87b9 100644 --- a/tests/cvcuda/system/TestOpMorphology.cpp +++ b/tests/cvcuda/system/TestOpMorphology.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,14 +18,14 @@ #include "ConvUtils.hpp" #include "Definitions.hpp" +#include #include #include +#include #include #include #include #include -#include -#include #include diff --git a/tests/cvcuda/system/TestOpNonMaximumSuppression.cpp b/tests/cvcuda/system/TestOpNonMaximumSuppression.cpp index 9a93afbd2..849f01ad3 100644 --- a/tests/cvcuda/system/TestOpNonMaximumSuppression.cpp +++ b/tests/cvcuda/system/TestOpNonMaximumSuppression.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,15 +15,15 @@ * limitations under the License. */ +#include #include #include +#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpNormalize.cpp b/tests/cvcuda/system/TestOpNormalize.cpp index e381dc401..3275cd097 100644 --- a/tests/cvcuda/system/TestOpNormalize.cpp +++ b/tests/cvcuda/system/TestOpNormalize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +17,13 @@ #include "Definitions.hpp" +#include #include #include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpOSD.cpp b/tests/cvcuda/system/TestOpOSD.cpp index d401408f8..620a8448b 100644 --- a/tests/cvcuda/system/TestOpOSD.cpp +++ b/tests/cvcuda/system/TestOpOSD.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,13 +19,13 @@ #include "OsdUtils.cuh" +#include #include #include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpPairwiseMatcher.cpp b/tests/cvcuda/system/TestOpPairwiseMatcher.cpp index c27424616..594981186 100644 --- a/tests/cvcuda/system/TestOpPairwiseMatcher.cpp +++ b/tests/cvcuda/system/TestOpPairwiseMatcher.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +17,10 @@ #include "Definitions.hpp" +#include #include #include -#include -#include +#include #include #include diff --git a/tests/cvcuda/system/TestOpPillowResize.cpp b/tests/cvcuda/system/TestOpPillowResize.cpp index 4cc2c46f2..02592e04a 100644 --- a/tests/cvcuda/system/TestOpPillowResize.cpp +++ b/tests/cvcuda/system/TestOpPillowResize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -1232,4 +1232,6 @@ TEST(OpPillowResize, invalidGetWorkSpace) EXPECT_EQ(NVCV_ERROR_INVALID_ARGUMENT, cvcudaPillowResizeVarShapeGetWorkspaceRequirements(pillowResizeHandle, 1, inputSizesWH, outputSizesWH, NVCV_IMAGE_FORMAT_U8, nullptr)); + + nvcvOperatorDestroy(pillowResizeHandle); } diff --git a/tests/cvcuda/system/TestOpRandomResizedCrop.cpp b/tests/cvcuda/system/TestOpRandomResizedCrop.cpp index e73223a15..83608e215 100644 --- a/tests/cvcuda/system/TestOpRandomResizedCrop.cpp +++ b/tests/cvcuda/system/TestOpRandomResizedCrop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,15 +19,15 @@ #include "ResizeUtils.hpp" #include +#include #include #include +#include +#include #include #include #include #include -#include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpReformat.cpp b/tests/cvcuda/system/TestOpReformat.cpp index 9821ff3b6..4ba3cff41 100644 --- a/tests/cvcuda/system/TestOpReformat.cpp +++ b/tests/cvcuda/system/TestOpReformat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +17,13 @@ #include "Definitions.hpp" +#include #include #include +#include #include #include #include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpRemap.cpp b/tests/cvcuda/system/TestOpRemap.cpp index 7ce6f8750..6d9638c5f 100644 --- a/tests/cvcuda/system/TestOpRemap.cpp +++ b/tests/cvcuda/system/TestOpRemap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,16 +16,16 @@ */ #include +#include #include #include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpResize.cpp b/tests/cvcuda/system/TestOpResize.cpp index eef44e285..870e39123 100644 --- a/tests/cvcuda/system/TestOpResize.cpp +++ b/tests/cvcuda/system/TestOpResize.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,15 +19,15 @@ #include "ResizeUtils.hpp" #include +#include #include #include +#include +#include #include #include #include #include -#include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpResizeCropConvertReformat.cpp b/tests/cvcuda/system/TestOpResizeCropConvertReformat.cpp index f2fe9ccee..8bd3ae20f 100644 --- a/tests/cvcuda/system/TestOpResizeCropConvertReformat.cpp +++ b/tests/cvcuda/system/TestOpResizeCropConvertReformat.cpp @@ -18,6 +18,7 @@ #include "Definitions.hpp" #include +#include #include #include #include @@ -29,7 +30,6 @@ #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpRotate.cpp b/tests/cvcuda/system/TestOpRotate.cpp index 24d52b273..898a5dc2a 100644 --- a/tests/cvcuda/system/TestOpRotate.cpp +++ b/tests/cvcuda/system/TestOpRotate.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,12 +19,12 @@ #include #include +#include +#include #include #include #include #include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpSIFT.cpp b/tests/cvcuda/system/TestOpSIFT.cpp index 1cde253dc..74845c495 100644 --- a/tests/cvcuda/system/TestOpSIFT.cpp +++ b/tests/cvcuda/system/TestOpSIFT.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,14 +19,14 @@ #include "Definitions.hpp" #include +#include #include #include +#include +#include +#include #include #include -#include -#include -#include -#include #include #include diff --git a/tests/cvcuda/system/TestOpStack.cpp b/tests/cvcuda/system/TestOpStack.cpp index b8f2bff26..87a902369 100644 --- a/tests/cvcuda/system/TestOpStack.cpp +++ b/tests/cvcuda/system/TestOpStack.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,12 @@ #include "Definitions.hpp" +#include #include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpThreshold.cpp b/tests/cvcuda/system/TestOpThreshold.cpp index a495173c6..d34e1ddd0 100644 --- a/tests/cvcuda/system/TestOpThreshold.cpp +++ b/tests/cvcuda/system/TestOpThreshold.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +17,13 @@ #include "Definitions.hpp" +#include #include #include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpWarpAffine.cpp b/tests/cvcuda/system/TestOpWarpAffine.cpp index 367e48e93..4e4e50db6 100644 --- a/tests/cvcuda/system/TestOpWarpAffine.cpp +++ b/tests/cvcuda/system/TestOpWarpAffine.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,11 +20,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include diff --git a/tests/cvcuda/system/TestOpWarpPerspective.cpp b/tests/cvcuda/system/TestOpWarpPerspective.cpp index 04c6d647e..e9765aebe 100644 --- a/tests/cvcuda/system/TestOpWarpPerspective.cpp +++ b/tests/cvcuda/system/TestOpWarpPerspective.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,12 +20,12 @@ #include #include #include +#include +#include #include #include #include #include -#include -#include #include #include diff --git a/tests/cvcuda/unit/CMakeLists.txt b/tests/cvcuda/unit/CMakeLists.txt index 53e5aba17..e46d58267 100644 --- a/tests/cvcuda/unit/CMakeLists.txt +++ b/tests/cvcuda/unit/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,6 +16,9 @@ add_executable(cvcuda_test_unit TestWorkspaceAllocator.cpp TestWorkspaceEstimator.cpp + TestStreamId.cpp + TestSimpleCache.cpp + TestPerStreamCache.cpp ) target_compile_definitions(cvcuda_test_unit @@ -29,6 +32,8 @@ target_link_libraries(cvcuda_test_unit nvcv_util nvcv_test_common cvcuda_priv + cvcuda_util + cuda ) nvcv_add_test(cvcuda_test_unit cvcuda) diff --git a/tests/nvcv_types/unit/TestPerStreamCache.cpp b/tests/cvcuda/unit/TestPerStreamCache.cpp similarity index 98% rename from tests/nvcv_types/unit/TestPerStreamCache.cpp rename to tests/cvcuda/unit/TestPerStreamCache.cpp index 8f066426f..33416663f 100644 --- a/tests/nvcv_types/unit/TestPerStreamCache.cpp +++ b/tests/cvcuda/unit/TestPerStreamCache.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,9 @@ #include "Definitions.hpp" -#include -#include -#include +#include +#include +#include #include diff --git a/tests/nvcv_types/unit/TestSimpleCache.cpp b/tests/cvcuda/unit/TestSimpleCache.cpp similarity index 93% rename from tests/nvcv_types/unit/TestSimpleCache.cpp rename to tests/cvcuda/unit/TestSimpleCache.cpp index 4b970d25f..387ed1227 100644 --- a/tests/nvcv_types/unit/TestSimpleCache.cpp +++ b/tests/cvcuda/unit/TestSimpleCache.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ #include "Definitions.hpp" -#include +#include namespace { struct Payload diff --git a/tests/nvcv_types/unit/TestStreamId.cpp b/tests/cvcuda/unit/TestStreamId.cpp similarity index 96% rename from tests/nvcv_types/unit/TestStreamId.cpp rename to tests/cvcuda/unit/TestStreamId.cpp index b5108bbf3..f12f182dd 100644 --- a/tests/nvcv_types/unit/TestStreamId.cpp +++ b/tests/cvcuda/unit/TestStreamId.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #include #include -#include +#include #include diff --git a/tests/nvcv_types/CMakeLists.txt b/tests/nvcv_types/CMakeLists.txt index ba7d50a22..3a0221a21 100644 --- a/tests/nvcv_types/CMakeLists.txt +++ b/tests/nvcv_types/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -31,3 +31,38 @@ if(BUILD_PYTHON) # System tests for nvcv python add_subdirectory(python) endif() + +# Test NVCV can be used standalone + +# The idea is to copy NVCV root source dir outside of CVCUDA tree prior to build it to make sure it has no hard +# dependencies betweeen NVCV and CVCUDA. Then the NVCV standalone test is build using external project to ensure +# it is not taking advantage of CVCUDA build. The installation is disabled on the NVCV standalone build so it does +# not interfere with CVCUDA installation of NVCV. After NVCV standalone is built, its executable is imported here, +# this is done to allow it to be added as an NVCV test. + +include(ExternalProject) + +file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/../../src/nvcv DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/standalone/) + +ExternalProject_Add( + nvcv_standalone + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/standalone + BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/standalone + INSTALL_COMMAND "" + CMAKE_ARGS + -DCMAKE_RUNTIME_OUTPUT_DIRECTORY=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + -DNVCV_DIR=${CMAKE_CURRENT_BINARY_DIR}/standalone/nvcv + -DNVCV_ENABLE_INSTALL=OFF + -DEXPOSE_CODE=OFF + -DWARNINGS_AS_ERRORS=${WARNINGS_AS_ERRORS} + -DENABLE_TEGRA=${ENABLE_TEGRA} + -DENABLE_COMPAT_OLD_GLIBC=${ENABLE_COMPAT_OLD_GLIBC} +) + +add_executable(nvcv_test_standalone IMPORTED) +set_target_properties(nvcv_test_standalone PROPERTIES + IMPORTED_LOCATION ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/nvcv_test_standalone +) +add_dependencies(nvcv_test_standalone nvcv_standalone) + +nvcv_add_test(${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/nvcv_test_standalone nvcv) diff --git a/tests/nvcv_types/cudatools_system/CMakeLists.txt b/tests/nvcv_types/cudatools_system/CMakeLists.txt index 3779482ec..8b735e15a 100644 --- a/tests/nvcv_types/cudatools_system/CMakeLists.txt +++ b/tests/nvcv_types/cudatools_system/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/nvcv_types/cudatools_system/DeviceAtomics.cu b/tests/nvcv_types/cudatools_system/DeviceAtomics.cu index f4335163e..1b3c30810 100644 --- a/tests/nvcv_types/cudatools_system/DeviceAtomics.cu +++ b/tests/nvcv_types/cudatools_system/DeviceAtomics.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,8 @@ #include "DeviceAtomics.hpp" // to test in the device -#include // for EXPECT_EQ, etc. -#include // the object of this test +#include // the object of this test +#include // for EXPECT_EQ, etc. namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/DeviceBorderVarShapeWrap.cu b/tests/nvcv_types/cudatools_system/DeviceBorderVarShapeWrap.cu index add116391..56566be40 100644 --- a/tests/nvcv_types/cudatools_system/DeviceBorderVarShapeWrap.cu +++ b/tests/nvcv_types/cudatools_system/DeviceBorderVarShapeWrap.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,11 +17,11 @@ #include "DeviceBorderVarShapeWrap.hpp" // to test in the device -#include // for EXPECT_EQ, etc. -#include // the object of this test -#include // for ImageBatchVarShape, etc. -#include // for operator *, etc. -#include // for StaticCast, etc. +#include // the object of this test +#include // for ImageBatchVarShape, etc. +#include // for operator *, etc. +#include // for StaticCast, etc. +#include // for EXPECT_EQ, etc. namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/DeviceBorderWrap.cu b/tests/nvcv_types/cudatools_system/DeviceBorderWrap.cu index 8f1f02948..53fb0bebf 100644 --- a/tests/nvcv_types/cudatools_system/DeviceBorderWrap.cu +++ b/tests/nvcv_types/cudatools_system/DeviceBorderWrap.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,12 @@ #include "DeviceBorderWrap.hpp" // to test in the device -#include // for EXPECT_EQ, etc. -#include // the object of this test -#include // for FullTensorWrap, etc. -#include // for operator *, etc. -#include // for StaticCast, etc. -#include // for Tensor3DWrap, etc. +#include // the object of this test +#include // for FullTensorWrap, etc. +#include // for operator *, etc. +#include // for StaticCast, etc. +#include // for Tensor3DWrap, etc. +#include // for EXPECT_EQ, etc. namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/DeviceFullTensorWrap.cu b/tests/nvcv_types/cudatools_system/DeviceFullTensorWrap.cu index b813b86fe..4e32fdf47 100644 --- a/tests/nvcv_types/cudatools_system/DeviceFullTensorWrap.cu +++ b/tests/nvcv_types/cudatools_system/DeviceFullTensorWrap.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,11 +17,11 @@ #include "DeviceFullTensorWrap.hpp" // to test in the device -#include // for EXPECT_EQ, etc. -#include // for DropCast, etc. -#include // for operator == to allow EXPECT_EQ -#include // for StaticCast, etc. -#include // the object of this test +#include // for DropCast, etc. +#include // for operator == to allow EXPECT_EQ +#include // for StaticCast, etc. +#include // the object of this test +#include // for EXPECT_EQ, etc. namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/DeviceFullTensorWrap.hpp b/tests/nvcv_types/cudatools_system/DeviceFullTensorWrap.hpp index 431b0c4eb..1eb784edc 100644 --- a/tests/nvcv_types/cudatools_system/DeviceFullTensorWrap.hpp +++ b/tests/nvcv_types/cudatools_system/DeviceFullTensorWrap.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,8 +20,8 @@ #include "DeviceTensorWrap.hpp" // for Array, etc. -#include // for int2, etc. -#include // the object of this test +#include // for int2, etc. +#include // the object of this test template void DeviceUseFullTensorWrap(const InputType &); diff --git a/tests/nvcv_types/cudatools_system/DeviceImageBatchVarShapeWrap.cu b/tests/nvcv_types/cudatools_system/DeviceImageBatchVarShapeWrap.cu index a749c1be0..001687278 100644 --- a/tests/nvcv_types/cudatools_system/DeviceImageBatchVarShapeWrap.cu +++ b/tests/nvcv_types/cudatools_system/DeviceImageBatchVarShapeWrap.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,9 @@ #include "DeviceImageBatchVarShapeWrap.hpp" -#include // for EXPECT_EQ, etc. -#include // for operator == to allow EXPECT_EQ -#include // for StaticCast, etc. +#include // for operator == to allow EXPECT_EQ +#include // for StaticCast, etc. +#include // for EXPECT_EQ, etc. namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/DeviceImageBatchVarShapeWrap.hpp b/tests/nvcv_types/cudatools_system/DeviceImageBatchVarShapeWrap.hpp index 87720037e..b54d5e94b 100644 --- a/tests/nvcv_types/cudatools_system/DeviceImageBatchVarShapeWrap.hpp +++ b/tests/nvcv_types/cudatools_system/DeviceImageBatchVarShapeWrap.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,8 +18,8 @@ #ifndef NVCV_TESTS_DEVICE_IMAGE_BATCH_VAR_SHAPE_WRAP_HPP #define NVCV_TESTS_DEVICE_IMAGE_BATCH_VAR_SHAPE_WRAP_HPP -#include // for int3, etc. -#include // the object of this test +#include // for int3, etc. +#include // the object of this test template void DeviceSetTwos(DstWrapper &, int3, cudaStream_t &); diff --git a/tests/nvcv_types/cudatools_system/DeviceInterpolationVarShapeWrap.cu b/tests/nvcv_types/cudatools_system/DeviceInterpolationVarShapeWrap.cu index edc725ab7..dfbf97b90 100644 --- a/tests/nvcv_types/cudatools_system/DeviceInterpolationVarShapeWrap.cu +++ b/tests/nvcv_types/cudatools_system/DeviceInterpolationVarShapeWrap.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +17,10 @@ #include "DeviceInterpolationVarShapeWrap.hpp" // to test in the device -#include // for EXPECT_EQ, etc. -#include // the object of this test -#include // for operator *, etc. -#include // for StaticCast, etc. +#include // the object of this test +#include // for operator *, etc. +#include // for StaticCast, etc. +#include // for EXPECT_EQ, etc. namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/DeviceInterpolationWrap.cu b/tests/nvcv_types/cudatools_system/DeviceInterpolationWrap.cu index b9d189131..5d8897410 100644 --- a/tests/nvcv_types/cudatools_system/DeviceInterpolationWrap.cu +++ b/tests/nvcv_types/cudatools_system/DeviceInterpolationWrap.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,11 +17,11 @@ #include "DeviceInterpolationWrap.hpp" // to test in the device -#include // for EXPECT_EQ, etc. -#include // for DropCast, etc. -#include // the object of this test -#include // for operator *, etc. -#include // for StaticCast, etc. +#include // for DropCast, etc. +#include // the object of this test +#include // for operator *, etc. +#include // for StaticCast, etc. +#include // for EXPECT_EQ, etc. namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/DeviceMathWrappers.hpp b/tests/nvcv_types/cudatools_system/DeviceMathWrappers.hpp index 09a9f50fd..f7b505cd6 100644 --- a/tests/nvcv_types/cudatools_system/DeviceMathWrappers.hpp +++ b/tests/nvcv_types/cudatools_system/DeviceMathWrappers.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #ifndef NVCV_TESTS_DEVICE_MATH_WRAPPERS_HPP #define NVCV_TESTS_DEVICE_MATH_WRAPPERS_HPP -#include // the object of this test +#include // the object of this test namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/DeviceSaturateCast.cu b/tests/nvcv_types/cudatools_system/DeviceSaturateCast.cu index 3431bbb16..38c9b849d 100644 --- a/tests/nvcv_types/cudatools_system/DeviceSaturateCast.cu +++ b/tests/nvcv_types/cudatools_system/DeviceSaturateCast.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,8 @@ #include "DeviceSaturateCast.hpp" // to test in the device -#include // for EXPECT_EQ, etc. -#include // the object of this test +#include // the object of this test +#include // for EXPECT_EQ, etc. namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/DeviceTensorBatchWrap.cu b/tests/nvcv_types/cudatools_system/DeviceTensorBatchWrap.cu index fa1a3b1f1..d411f4b65 100644 --- a/tests/nvcv_types/cudatools_system/DeviceTensorBatchWrap.cu +++ b/tests/nvcv_types/cudatools_system/DeviceTensorBatchWrap.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +17,10 @@ #include "DeviceTensorBatchWrap.hpp" -#include // for EXPECT_EQ, etc. -#include // for operator == to allow EXPECT_EQ -#include // for StaticCast, etc. -#include +#include // for operator == to allow EXPECT_EQ +#include // for StaticCast, etc. +#include +#include // for EXPECT_EQ, etc. namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/DeviceTensorBatchWrap.hpp b/tests/nvcv_types/cudatools_system/DeviceTensorBatchWrap.hpp index 2f98460b3..005c7e92b 100644 --- a/tests/nvcv_types/cudatools_system/DeviceTensorBatchWrap.hpp +++ b/tests/nvcv_types/cudatools_system/DeviceTensorBatchWrap.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,8 +15,8 @@ * limitations under the License. */ +#include #include -#include namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/DeviceTensorWrap.cu b/tests/nvcv_types/cudatools_system/DeviceTensorWrap.cu index 9a2c1d4fe..afff2b95d 100644 --- a/tests/nvcv_types/cudatools_system/DeviceTensorWrap.cu +++ b/tests/nvcv_types/cudatools_system/DeviceTensorWrap.cu @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,11 +17,11 @@ #include "DeviceTensorWrap.hpp" // to test in the device -#include // for EXPECT_EQ, etc. -#include // for DropCast, etc. -#include // for operator == to allow EXPECT_EQ -#include // for StaticCast, etc. -#include // the object of this test +#include // for DropCast, etc. +#include // for operator == to allow EXPECT_EQ +#include // for StaticCast, etc. +#include // the object of this test +#include // for EXPECT_EQ, etc. namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/DeviceTensorWrap.hpp b/tests/nvcv_types/cudatools_system/DeviceTensorWrap.hpp index 436a160de..53672db04 100644 --- a/tests/nvcv_types/cudatools_system/DeviceTensorWrap.hpp +++ b/tests/nvcv_types/cudatools_system/DeviceTensorWrap.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,9 +18,9 @@ #ifndef NVCV_TESTS_DEVICE_TENSOR_WRAP_HPP #define NVCV_TESTS_DEVICE_TENSOR_WRAP_HPP -#include // for int2, etc. -#include // the object of this test -#include // for MakeType, etc. +#include // for int2, etc. +#include // the object of this test +#include // for MakeType, etc. #include // for std::array, etc. diff --git a/tests/nvcv_types/cudatools_system/TestArrayWrap.cpp b/tests/nvcv_types/cudatools_system/TestArrayWrap.cpp index 216c2d1b4..0035b61d5 100644 --- a/tests/nvcv_types/cudatools_system/TestArrayWrap.cpp +++ b/tests/nvcv_types/cudatools_system/TestArrayWrap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,10 +20,10 @@ #include // for NVCV_INSTANTIATE_TEST_SUITE_P, etc. #include // for NVCV_TYPED_TEST_SUITE, etc. #include // for StringLiteral +#include +#include #include #include -#include -#include #include diff --git a/tests/nvcv_types/cudatools_system/TestAtomics.cpp b/tests/nvcv_types/cudatools_system/TestAtomics.cpp index 4aae472ea..9df6be26a 100644 --- a/tests/nvcv_types/cudatools_system/TestAtomics.cpp +++ b/tests/nvcv_types/cudatools_system/TestAtomics.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,9 @@ #include "DeviceAtomics.hpp" // to test in the device -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // the object of this test -#include // for operator == to allow EXPECT_EQ +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // the object of this test +#include // for operator == to allow EXPECT_EQ #include // for std::iota diff --git a/tests/nvcv_types/cudatools_system/TestBorderVarShapeWrap.cpp b/tests/nvcv_types/cudatools_system/TestBorderVarShapeWrap.cpp index 42e468629..fd9f642f3 100644 --- a/tests/nvcv_types/cudatools_system/TestBorderVarShapeWrap.cpp +++ b/tests/nvcv_types/cudatools_system/TestBorderVarShapeWrap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,14 +17,14 @@ #include "DeviceBorderVarShapeWrap.hpp" // to test in the device -#include // for test::IsInside, etc. -#include // for stream operator, etc. -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // for Image, etc. -#include // for ImageBatchVarShape, etc. -#include // the object of this test -#include // for ImageBatchVarShapeWrap, etc. -#include // for operator == to allow EXPECT_EQ +#include // for test::IsInside, etc. +#include // for stream operator, etc. +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // the object of this test +#include // for ImageBatchVarShapeWrap, etc. +#include // for operator == to allow EXPECT_EQ +#include // for Image, etc. +#include // for ImageBatchVarShape, etc. #include #include diff --git a/tests/nvcv_types/cudatools_system/TestBorderWrap.cpp b/tests/nvcv_types/cudatools_system/TestBorderWrap.cpp index 1c207187f..b4d53107a 100644 --- a/tests/nvcv_types/cudatools_system/TestBorderWrap.cpp +++ b/tests/nvcv_types/cudatools_system/TestBorderWrap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,15 +17,15 @@ #include "DeviceBorderWrap.hpp" // to test in the device -#include // for test::ReplicateBorderIndex, etc. -#include // for stream operator, etc. -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // for Tensor, etc. -#include // for TensorDataAccessStridedImagePlanar, etc. -#include // the object of this test -#include // for operator == to allow EXPECT_EQ -#include // for Tensor3DWrap, etc. -#include // for nvcv::util::CreateTensor, etc. +#include // for test::ReplicateBorderIndex, etc. +#include // for stream operator, etc. +#include // for nvcv::util::CreateTensor, etc. +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // the object of this test +#include // for operator == to allow EXPECT_EQ +#include // for Tensor3DWrap, etc. +#include // for Tensor, etc. +#include // for TensorDataAccessStridedImagePlanar, etc. #include #include diff --git a/tests/nvcv_types/cudatools_system/TestDropCast.cpp b/tests/nvcv_types/cudatools_system/TestDropCast.cpp index 7cc495455..4d0354bb2 100644 --- a/tests/nvcv_types/cudatools_system/TestDropCast.cpp +++ b/tests/nvcv_types/cudatools_system/TestDropCast.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,8 +15,8 @@ * limitations under the License. */ -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // the object of this test +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // the object of this test namespace t = ::testing; namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/TestFullTensorWrap.cpp b/tests/nvcv_types/cudatools_system/TestFullTensorWrap.cpp index c542055c7..354fb5a17 100644 --- a/tests/nvcv_types/cudatools_system/TestFullTensorWrap.cpp +++ b/tests/nvcv_types/cudatools_system/TestFullTensorWrap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,12 +18,12 @@ #include "DeviceBorderWrap.hpp" // to test in device #include "DeviceFullTensorWrap.hpp" // to test in device -#include // for test::IsInside, etc. -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // for Tensor, etc. -#include // for TensorDataAccessStridedImagePlanar, etc. -#include // for BorderWrap, etc. -#include // for operator == to allow EXPECT_EQ +#include // for test::IsInside, etc. +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // for BorderWrap, etc. +#include // for operator == to allow EXPECT_EQ +#include // for Tensor, etc. +#include // for TensorDataAccessStridedImagePlanar, etc. #include #include diff --git a/tests/nvcv_types/cudatools_system/TestImageBatchVarShapeWrap.cpp b/tests/nvcv_types/cudatools_system/TestImageBatchVarShapeWrap.cpp index 9b67a6b97..33876ac20 100644 --- a/tests/nvcv_types/cudatools_system/TestImageBatchVarShapeWrap.cpp +++ b/tests/nvcv_types/cudatools_system/TestImageBatchVarShapeWrap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +17,10 @@ #include "DeviceImageBatchVarShapeWrap.hpp" // to test in the device -#include // for NVCV_MIXTYPED_TEST_SUITE_P, etc. -#include // for ImageBatchVarShape, etc. -#include // for ImageBatchVarShapeWrap, etc. -#include // for operator == to allow EXPECT_EQ +#include // for NVCV_MIXTYPED_TEST_SUITE_P, etc. +#include // for ImageBatchVarShapeWrap, etc. +#include // for operator == to allow EXPECT_EQ +#include // for ImageBatchVarShape, etc. #include #include diff --git a/tests/nvcv_types/cudatools_system/TestInterpolationVarShapeWrap.cpp b/tests/nvcv_types/cudatools_system/TestInterpolationVarShapeWrap.cpp index 2c579b491..70e04dbe9 100644 --- a/tests/nvcv_types/cudatools_system/TestInterpolationVarShapeWrap.cpp +++ b/tests/nvcv_types/cudatools_system/TestInterpolationVarShapeWrap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,15 +17,15 @@ #include "DeviceInterpolationVarShapeWrap.hpp" // to test in the device -#include // for test::GoldInterp, etc. -#include // for stream operator, etc. -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // for Image, etc. -#include // for ImageBatchVarShape, etc. -#include // for ImageBatchVarShapeWrap, etc. -#include // for ImageBatchVarShapeWrap, etc. -#include // the object of this test -#include // for operator == to allow EXPECT_EQ +#include // for test::GoldInterp, etc. +#include // for stream operator, etc. +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // for ImageBatchVarShapeWrap, etc. +#include // for ImageBatchVarShapeWrap, etc. +#include // the object of this test +#include // for operator == to allow EXPECT_EQ +#include // for Image, etc. +#include // for ImageBatchVarShape, etc. #include #include diff --git a/tests/nvcv_types/cudatools_system/TestInterpolationWrap.cpp b/tests/nvcv_types/cudatools_system/TestInterpolationWrap.cpp index 3da35b1c5..957643223 100644 --- a/tests/nvcv_types/cudatools_system/TestInterpolationWrap.cpp +++ b/tests/nvcv_types/cudatools_system/TestInterpolationWrap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,13 +18,13 @@ #include "DeviceInterpolationWrap.hpp" // to test in the device #include "DeviceTensorWrap.hpp" // for PackedImage, etc. -#include // for test::GoldInterp, etc. -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // for Tensor, etc. -#include // for TensorDataAccessStridedImagePlanar, etc. -#include // for DropCast, etc. -#include // the object of this test -#include // for operator == to allow EXPECT_EQ +#include // for test::GoldInterp, etc. +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // for DropCast, etc. +#include // the object of this test +#include // for operator == to allow EXPECT_EQ +#include // for Tensor, etc. +#include // for TensorDataAccessStridedImagePlanar, etc. #include #include diff --git a/tests/nvcv_types/cudatools_system/TestLinAlg.cpp b/tests/nvcv_types/cudatools_system/TestLinAlg.cpp index fcef2cb6e..b235c3d0f 100644 --- a/tests/nvcv_types/cudatools_system/TestLinAlg.cpp +++ b/tests/nvcv_types/cudatools_system/TestLinAlg.cpp @@ -15,9 +15,9 @@ * limitations under the License. */ -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // for StringLiteral -#include // the object of this test +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // for StringLiteral +#include // the object of this test #include // for std::generate, etc. #include // for std::pow, etc. diff --git a/tests/nvcv_types/cudatools_system/TestMathOps.cpp b/tests/nvcv_types/cudatools_system/TestMathOps.cpp index b7450fa17..1932808e7 100644 --- a/tests/nvcv_types/cudatools_system/TestMathOps.cpp +++ b/tests/nvcv_types/cudatools_system/TestMathOps.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,8 +15,8 @@ * limitations under the License. */ -#include // for NVCV_TYPED_TEST_SUITE_F, etc. -#include // the object of this test +#include // for NVCV_TYPED_TEST_SUITE_F, etc. +#include // the object of this test namespace t = ::testing; namespace cuda = nvcv::cuda; diff --git a/tests/nvcv_types/cudatools_system/TestMathWrappers.cpp b/tests/nvcv_types/cudatools_system/TestMathWrappers.cpp index e0b5f9fe2..2bc60034f 100644 --- a/tests/nvcv_types/cudatools_system/TestMathWrappers.cpp +++ b/tests/nvcv_types/cudatools_system/TestMathWrappers.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,9 @@ #include "DeviceMathWrappers.hpp" // to test in the device -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // for operator == to allow EXPECT_EQ -#include // the object of this test +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // for operator == to allow EXPECT_EQ +#include // the object of this test namespace cuda = nvcv::cuda; namespace ttype = nvcv::test::type; diff --git a/tests/nvcv_types/cudatools_system/TestMetaprogramming.cpp b/tests/nvcv_types/cudatools_system/TestMetaprogramming.cpp index 4ca24a7f6..e2c38112d 100644 --- a/tests/nvcv_types/cudatools_system/TestMetaprogramming.cpp +++ b/tests/nvcv_types/cudatools_system/TestMetaprogramming.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,8 +15,8 @@ * limitations under the License. */ -#include // for NVCV_TYPED_TEST_SUITE_F, etc. -#include // the object of this test +#include // for NVCV_TYPED_TEST_SUITE_F, etc. +#include // the object of this test namespace t = ::testing; namespace test = nvcv::test; diff --git a/tests/nvcv_types/cudatools_system/TestRangeCast.cpp b/tests/nvcv_types/cudatools_system/TestRangeCast.cpp index 3f93a1f82..8b6d03d61 100644 --- a/tests/nvcv_types/cudatools_system/TestRangeCast.cpp +++ b/tests/nvcv_types/cudatools_system/TestRangeCast.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,9 @@ * limitations under the License. */ -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // for operator == to allow EXPECT_EQ -#include // the object of this test +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // for operator == to allow EXPECT_EQ +#include // the object of this test #include // for std::round, etc. #include // for std::numeric_limits, etc. diff --git a/tests/nvcv_types/cudatools_system/TestSaturateCast.cpp b/tests/nvcv_types/cudatools_system/TestSaturateCast.cpp index 928c3a2df..366eef24e 100644 --- a/tests/nvcv_types/cudatools_system/TestSaturateCast.cpp +++ b/tests/nvcv_types/cudatools_system/TestSaturateCast.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,9 @@ #include "DeviceSaturateCast.hpp" // to test in the device -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // for operator == to allow EXPECT_EQ -#include // the object of this test +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // for operator == to allow EXPECT_EQ +#include // the object of this test namespace cuda = nvcv::cuda; namespace ttype = nvcv::test::type; diff --git a/tests/nvcv_types/cudatools_system/TestStaticCast.cpp b/tests/nvcv_types/cudatools_system/TestStaticCast.cpp index 440959bd8..cf9aadb76 100644 --- a/tests/nvcv_types/cudatools_system/TestStaticCast.cpp +++ b/tests/nvcv_types/cudatools_system/TestStaticCast.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,8 +15,8 @@ * limitations under the License. */ -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // the object of this test +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // the object of this test namespace cuda = nvcv::cuda; namespace ttype = nvcv::test::type; diff --git a/tests/nvcv_types/cudatools_system/TestTensorBatchWrap.cpp b/tests/nvcv_types/cudatools_system/TestTensorBatchWrap.cpp index 50d6fbf64..d9c0e5e16 100644 --- a/tests/nvcv_types/cudatools_system/TestTensorBatchWrap.cpp +++ b/tests/nvcv_types/cudatools_system/TestTensorBatchWrap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,14 +17,14 @@ #include "DeviceTensorBatchWrap.hpp" -#include // for NVCV_INSTANTIATE_TEST_SUITE_P, etc. -#include // for NVCV_MIXTYPED_TEST_SUITE_P, etc. -#include // for StringLiteral +#include // for NVCV_INSTANTIATE_TEST_SUITE_P, etc. +#include // for NVCV_MIXTYPED_TEST_SUITE_P, etc. +#include // for StringLiteral +#include // for operator == to allow EXPECT_EQ +#include #include // for Image, etc. #include // for TensorBatch #include // for TensorDataAccessStridedImagePlanar, etc. -#include // for operator == to allow EXPECT_EQ -#include #include #include diff --git a/tests/nvcv_types/cudatools_system/TestTensorWrap.cpp b/tests/nvcv_types/cudatools_system/TestTensorWrap.cpp index 511f43a6a..d57fc0db3 100644 --- a/tests/nvcv_types/cudatools_system/TestTensorWrap.cpp +++ b/tests/nvcv_types/cudatools_system/TestTensorWrap.cpp @@ -17,13 +17,13 @@ #include "DeviceTensorWrap.hpp" // to test in device -#include // for NVCV_INSTANTIATE_TEST_SUITE_P, etc. -#include // for NVCV_TYPED_TEST_SUITE, etc. -#include // for StringLiteral -#include // for Image, etc. -#include // for Tensor, etc. -#include // for TensorDataAccessStridedImagePlanar, etc. -#include // for operator == to allow EXPECT_EQ +#include // for NVCV_INSTANTIATE_TEST_SUITE_P, etc. +#include // for NVCV_TYPED_TEST_SUITE, etc. +#include // for StringLiteral +#include // for operator == to allow EXPECT_EQ +#include // for Image, etc. +#include // for Tensor, etc. +#include // for TensorDataAccessStridedImagePlanar, etc. #include diff --git a/tests/nvcv_types/cudatools_system/TestTypeTraits.cpp b/tests/nvcv_types/cudatools_system/TestTypeTraits.cpp index 047955016..81ce8e2c7 100644 --- a/tests/nvcv_types/cudatools_system/TestTypeTraits.cpp +++ b/tests/nvcv_types/cudatools_system/TestTypeTraits.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,10 +15,10 @@ * limitations under the License. */ -#include // for NVCV_TYPED_TEST_SUITE_F, etc. -#include // for StringLiteral -#include // also object of this test -#include // the object of this test +#include // for NVCV_TYPED_TEST_SUITE_F, etc. +#include // for StringLiteral +#include // also object of this test +#include // the object of this test #include // for std::numeric_limits, etc. diff --git a/tests/nvcv_types/python/nvcv_test_types_python.in b/tests/nvcv_types/python/nvcv_test_types_python.in index ee25eda73..6a7928499 100755 --- a/tests/nvcv_types/python/nvcv_test_types_python.in +++ b/tests/nvcv_types/python/nvcv_test_types_python.in @@ -1,6 +1,6 @@ #!/bin/bash -e # -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/nvcv_types/python/test_cache.py b/tests/nvcv_types/python/test_cache.py new file mode 100644 index 000000000..4f8763a4c --- /dev/null +++ b/tests/nvcv_types/python/test_cache.py @@ -0,0 +1,118 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import nvcv +import cvcuda +import torch +import pytest + + +def test_cache_limit_get_set(): + nvcv.clear_cache() + + # Verify initial cache limit (half of total gpu mem) + total = torch.cuda.mem_get_info()[1] + assert nvcv.get_cache_limit_inbytes() == total // 2 + + # Verify we can also set the cache limit + nvcv.set_cache_limit_inbytes(total) + assert nvcv.get_cache_limit_inbytes() == total + + +def test_cache_current_byte_size(): + nvcv.clear_cache() + + nvcv_cache_size = 0 + assert nvcv.current_cache_size_inbytes() == nvcv_cache_size + + img_create = nvcv.Image.zeros((1, 1), nvcv.Format.F32) + nvcv_cache_size += nvcv.internal.nbytes_in_cache(img_create) + assert nvcv.current_cache_size_inbytes() == nvcv_cache_size + + image_batch_create = nvcv.ImageBatchVarShape(5) + nvcv_cache_size += nvcv.internal.nbytes_in_cache(image_batch_create) + assert nvcv.current_cache_size_inbytes() == nvcv_cache_size + + stream = nvcv.cuda.Stream() + nvcv_cache_size += nvcv.internal.nbytes_in_cache(stream) + assert nvcv.current_cache_size_inbytes() == nvcv_cache_size + + tensor_create = nvcv.Tensor(2, (37, 7), nvcv.Format.RGB8, rowalign=1) + nvcv_cache_size += nvcv.internal.nbytes_in_cache(tensor_create) + assert nvcv.current_cache_size_inbytes() == nvcv_cache_size + + tensor_batch_create = nvcv.TensorBatch(10) + nvcv_cache_size += nvcv.internal.nbytes_in_cache(tensor_batch_create) + assert nvcv.current_cache_size_inbytes() == nvcv_cache_size + + +def test_cache_external_cacheitem(): + nvcv.clear_cache() + + input_tensor = torch.rand(2, 30, 16, 1).cuda() + input_tensor = input_tensor * 255 + input_tensor = input_tensor.to(dtype=torch.uint8) + frames_cvcuda = cvcuda.as_tensor(input_tensor, "NHWC") + assert nvcv.current_cache_size_inbytes() == 0 + + frames_cvcuda_out = cvcuda.advcvtcolor( + frames_cvcuda, cvcuda.ColorConversion.YUV2RGB_NV12, cvcuda.ColorSpec.BT2020 + ) + assert ( + nvcv.current_cache_size_inbytes() + == nvcv.internal.nbytes_in_cache(frames_cvcuda_out) + ) and (nvcv.internal.nbytes_in_cache(frames_cvcuda_out) > 0) + + +def test_cache_limit_clearing(): + nvcv.clear_cache() + + img_create = nvcv.Image.zeros((1, 1), nvcv.Format.F32) + img_cache_size = nvcv.internal.nbytes_in_cache(img_create) + + # Cache should be emptied if new set limit is smaller than current cache size + nvcv.set_cache_limit_inbytes(img_cache_size - 1) + assert nvcv.current_cache_size_inbytes() == 0 + del img_create + + # Element should not be added to Cache, if its size exceeds cache limit + nvcv.set_cache_limit_inbytes(img_cache_size - 1) + img_create = nvcv.Image.zeros((1, 1), nvcv.Format.F32) + assert nvcv.current_cache_size_inbytes() == 0 + del img_create + + # If cache grows too large, cache should be emptied and new element should be added + nvcv.set_cache_limit_inbytes(img_cache_size) + img_create = nvcv.Image.zeros((1, 1), nvcv.Format.F32) + assert nvcv.current_cache_size_inbytes() == img_cache_size + img_create2 = nvcv.Image.zeros((1, 1), nvcv.Format.F32) + assert nvcv.current_cache_size_inbytes() == img_cache_size + del img_create + del img_create2 + + +def test_cache_zero_cache_limit(): + nvcv.set_cache_limit_inbytes(0) + + assert nvcv.get_cache_limit_inbytes() == 0 + + img_create = nvcv.Image.zeros((1, 1), nvcv.Format.F32) + assert nvcv.internal.nbytes_in_cache(img_create) > 0 + assert nvcv.current_cache_size_inbytes() == 0 + + +def test_cache_negative_cache_limit(): + with pytest.raises(ValueError): + nvcv.set_cache_limit_inbytes(-1) diff --git a/tests/nvcv_types/python/test_image.py b/tests/nvcv_types/python/test_image.py index b9e5efae6..7c8e28cef 100644 --- a/tests/nvcv_types/python/test_image.py +++ b/tests/nvcv_types/python/test_image.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -484,3 +484,27 @@ def test_image_wrapper_nodeletion(): assert (pt_img.cpu().numpy() == np_img).all() except RuntimeError: assert False, "Invalid memory" + + +def test_image_size_in_bytes(): + """ + Checks if the computation of the image size in bytes is correct + """ + img_create = nvcv.Image.zeros((1, 1), nvcv.Format.F32) + assert nvcv.internal.nbytes_in_cache(img_create) > 0 + + np_img = np.random.rand(1, 1).astype(np.float32) + img_create_host = nvcv.Image(np_img) + assert nvcv.internal.nbytes_in_cache(img_create_host) > 0 + + np_img = np.random.rand(1, 1).astype(np.float32) + img_create_host_vector = nvcv.Image([np_img, np_img]) + assert nvcv.internal.nbytes_in_cache(img_create_host_vector) > 0 + + pt_img = torch.from_numpy(np_img).cuda() + + img_wrap_external_buffer = cvcuda.as_image(pt_img) + assert nvcv.internal.nbytes_in_cache(img_wrap_external_buffer) == 0 + + img_wrap_external_buffer_vector = cvcuda.as_image([pt_img, pt_img]) + assert nvcv.internal.nbytes_in_cache(img_wrap_external_buffer_vector) == 0 diff --git a/tests/nvcv_types/python/test_imgbatchvarshape.py b/tests/nvcv_types/python/test_imgbatchvarshape.py index cb0af3306..436f5bc41 100644 --- a/tests/nvcv_types/python/test_imgbatchvarshape.py +++ b/tests/nvcv_types/python/test_imgbatchvarshape.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -204,3 +204,15 @@ def test_imgbatchvarshape_wrapper_nodeletion(): assert (pt_img.cpu().numpy() == np_img).all() except RuntimeError: assert False, "Invalid memory" + + +def test_imagebatchvarshape_size_in_bytes(): + """ + Checks if the computation of the ImageBatchVarShape size in bytes is correct + """ + batch_create = nvcv.ImageBatchVarShape(5) + assert nvcv.internal.nbytes_in_cache(batch_create) > 0 + + pt_img = torch.as_tensor(np.ndarray((16, 32, 4), dtype=np.float32), device="cuda") + batch_as_images = nvcv.as_images([pt_img]) + assert nvcv.internal.nbytes_in_cache(batch_as_images) > 0 diff --git a/tests/nvcv_types/python/test_stream.py b/tests/nvcv_types/python/test_stream.py index d70343054..20c6fb834 100644 --- a/tests/nvcv_types/python/test_stream.py +++ b/tests/nvcv_types/python/test_stream.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -112,3 +112,11 @@ def test_wrap_stream_external(stream_type): def test_stream_default_is_zero(): assert nvcv.cuda.Stream.default.handle == 0 + + +def test_stream_size_in_bytes(): + """ + Checks if the computation of the Stream size in bytes is correct + """ + stream = nvcv.cuda.Stream() + assert nvcv.internal.nbytes_in_cache(stream) == 0 diff --git a/tests/nvcv_types/python/test_tensor.py b/tests/nvcv_types/python/test_tensor.py index 52ff631fb..316f6c33a 100644 --- a/tests/nvcv_types/python/test_tensor.py +++ b/tests/nvcv_types/python/test_tensor.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -428,3 +428,28 @@ def test_tensor_wrap_cuda_array_interface(shape_arg, dtype_arg, layout_arg): assert wrapped.shape == shape_arg assert wrapped.dtype == dtype_arg assert wrapped.layout == layout_arg + + +def test_tensor_size_in_bytes(): + """ + Checks if the computation of the Tensor size in bytes is correct + """ + tensor_create_for_image_batch = nvcv.Tensor( + 2, (37, 7), nvcv.Format.RGB8, rowalign=1 + ) + assert nvcv.internal.nbytes_in_cache(tensor_create_for_image_batch) > 0 + + tensor_create = nvcv.Tensor((5, 16, 32, 4), np.float32, nvcv.TensorLayout.NHWC) + assert nvcv.internal.nbytes_in_cache(tensor_create) > 0 + + tensor_wrap = nvcv.as_tensor( + torch.as_tensor(np.ndarray((5, 16, 32, 4), dtype=np.float32), device="cuda") + ) + assert nvcv.internal.nbytes_in_cache(tensor_wrap) == 0 + + img = nvcv.Image((32, 16), nvcv.Format.RGBA8) + tensor_wrap_image = nvcv.as_tensor(img) + assert nvcv.internal.nbytes_in_cache(tensor_wrap_image) == 0 + + tensor_reshape = nvcv.reshape(tensor_create, (5, 32, 16, 4), nvcv.TensorLayout.NHWC) + assert nvcv.internal.nbytes_in_cache(tensor_reshape) == 0 diff --git a/tests/nvcv_types/python/test_tensor_batch.py b/tests/nvcv_types/python/test_tensor_batch.py index 260120331..832130b41 100644 --- a/tests/nvcv_types/python/test_tensor_batch.py +++ b/tests/nvcv_types/python/test_tensor_batch.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -225,3 +225,15 @@ def test_tensorbatch_errors(): batch = nvcv.TensorBatch(10) batch.pushback(random_tensors(2, np.int16, 4, "NHWC")) batch.pushback(random_tensors(3, np.int16, 4, "FHWC")) + + +def test_tensorbatch_size_in_bytes(): + """ + Checks if the computation of the TensorBatch size in bytes is correct + """ + batch_create = nvcv.TensorBatch(10) + assert nvcv.internal.nbytes_in_cache(batch_create) > 0 + + pt_img = torch.as_tensor(np.ndarray((16, 32, 4), dtype=np.float32), device="cuda") + batch_as_tensors = nvcv.as_tensors([pt_img]) + assert nvcv.internal.nbytes_in_cache(batch_as_tensors) > 0 diff --git a/tests/nvcv_types/standalone/CMakeLists.txt b/tests/nvcv_types/standalone/CMakeLists.txt new file mode 100644 index 000000000..cee910fc0 --- /dev/null +++ b/tests/nvcv_types/standalone/CMakeLists.txt @@ -0,0 +1,40 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.20.1) + +project(nvcv_test_standalone) + +set(CMAKE_CXX_STANDARD 17) + +enable_testing() + +find_package(GTest REQUIRED) + +if(NOT NVCV_DIR) + message(FATAL_ERROR "NVCV_DIR is empty! Path to NVCV directory must be given.") +endif() + +add_subdirectory(${NVCV_DIR} nvcv_dir) + +add_executable(nvcv_test_standalone + TestNVCVStandalone.cpp +) + +target_link_libraries(nvcv_test_standalone + PUBLIC + nvcv_types + GTest::gtest_main +) diff --git a/tests/nvcv_types/standalone/TestNVCVStandalone.cpp b/tests/nvcv_types/standalone/TestNVCVStandalone.cpp new file mode 100644 index 000000000..ada80a34e --- /dev/null +++ b/tests/nvcv_types/standalone/TestNVCVStandalone.cpp @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +TEST(StandaloneTest, nvcv_can_be_used_standalone) +{ + constexpr nvcv::Size2D size12{1, 2}, size21{2, 1}, size22{2, 2}; + + EXPECT_EQ(nvcv::MaxSize(size12, size21), size22); + + nvcv::Image img(size22, nvcv::FMT_RGBA8); + + EXPECT_EQ(img.size(), size22); + EXPECT_EQ(img.format(), nvcv::FMT_RGBA8); + EXPECT_NE(img.handle(), nullptr); +} diff --git a/tests/nvcv_types/system/CMakeLists.txt b/tests/nvcv_types/system/CMakeLists.txt index c4038188b..e0e3fdbaa 100644 --- a/tests/nvcv_types/system/CMakeLists.txt +++ b/tests/nvcv_types/system/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -65,8 +65,6 @@ add_header_compat_test(TARGET nvcv_test_capi_header_compat # Gather C++ headers file(GLOB_RECURSE CXXAPI_HEADERS RELATIVE "${NVCV_SOURCE_DIR}/include" CONFIGURE_DEPENDS "${NVCV_SOURCE_DIR}/include/*.hpp") -# remove optools files, they are c++17 -list(FILTER CXXAPI_HEADERS EXCLUDE REGEX "nvcv/cuda/") add_header_compat_test(TARGET nvcv_test_cxxapi_header_compat SOURCE TestAPI.cpp diff --git a/tests/nvcv_types/system/TestAllocatorC.cpp b/tests/nvcv_types/system/TestAllocatorC.cpp index 8d7fe0bbf..aa05ef5c7 100644 --- a/tests/nvcv_types/system/TestAllocatorC.cpp +++ b/tests/nvcv_types/system/TestAllocatorC.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/nvcv_types/system/TestArray.cpp b/tests/nvcv_types/system/TestArray.cpp index dbf204ba3..cea79dae7 100644 --- a/tests/nvcv_types/system/TestArray.cpp +++ b/tests/nvcv_types/system/TestArray.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/nvcv_types/system/TestColorSpec.cpp b/tests/nvcv_types/system/TestColorSpec.cpp index bc06a055e..8ea6adb4c 100644 --- a/tests/nvcv_types/system/TestColorSpec.cpp +++ b/tests/nvcv_types/system/TestColorSpec.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,8 +19,8 @@ #include #include -#include -#include +#include +#include namespace t = ::testing; namespace util = nvcv::util; diff --git a/tests/nvcv_types/system/TestConfig.cpp b/tests/nvcv_types/system/TestConfig.cpp index ef028c5ca..7527cc8f8 100644 --- a/tests/nvcv_types/system/TestConfig.cpp +++ b/tests/nvcv_types/system/TestConfig.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/nvcv_types/system/TestDataLayout.cpp b/tests/nvcv_types/system/TestDataLayout.cpp index 437c21517..e70f9535d 100644 --- a/tests/nvcv_types/system/TestDataLayout.cpp +++ b/tests/nvcv_types/system/TestDataLayout.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #include "Definitions.hpp" #include -#include +#include #include diff --git a/tests/nvcv_types/system/TestImage.cpp b/tests/nvcv_types/system/TestImage.cpp index 371f48319..df9b215fe 100644 --- a/tests/nvcv_types/system/TestImage.cpp +++ b/tests/nvcv_types/system/TestImage.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/nvcv_types/system/TestImageBatch.cpp b/tests/nvcv_types/system/TestImageBatch.cpp index 83030ee34..23d7ab6bc 100644 --- a/tests/nvcv_types/system/TestImageBatch.cpp +++ b/tests/nvcv_types/system/TestImageBatch.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/nvcv_types/system/TestImageFormat.cpp b/tests/nvcv_types/system/TestImageFormat.cpp index 72dab22da..1a6bc9461 100644 --- a/tests/nvcv_types/system/TestImageFormat.cpp +++ b/tests/nvcv_types/system/TestImageFormat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,9 +20,9 @@ #include #include #include -#include -#include -#include +#include +#include +#include #include diff --git a/tests/nvcv_types/system/TestRequirements.cpp b/tests/nvcv_types/system/TestRequirements.cpp index 3dc5fe5cc..cc58e2542 100644 --- a/tests/nvcv_types/system/TestRequirements.cpp +++ b/tests/nvcv_types/system/TestRequirements.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #include "Definitions.hpp" #include -#include +#include #include diff --git a/tests/nvcv_types/system/TestTensor.cpp b/tests/nvcv_types/system/TestTensor.cpp index 1e31cb051..997f1ad00 100644 --- a/tests/nvcv_types/system/TestTensor.cpp +++ b/tests/nvcv_types/system/TestTensor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/nvcv_types/system/TestTensorBatch.cpp b/tests/nvcv_types/system/TestTensorBatch.cpp index 8a62d6095..11c425ea5 100644 --- a/tests/nvcv_types/system/TestTensorBatch.cpp +++ b/tests/nvcv_types/system/TestTensorBatch.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/nvcv_types/system/TestTensorDataUtils.cpp b/tests/nvcv_types/system/TestTensorDataUtils.cpp index 04b54440b..9b96f5453 100644 --- a/tests/nvcv_types/system/TestTensorDataUtils.cpp +++ b/tests/nvcv_types/system/TestTensorDataUtils.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,12 @@ #include "Definitions.hpp" +#include #include #include #include #include #include -#include #include #include diff --git a/tests/nvcv_types/system/TestTensorLayout.cpp b/tests/nvcv_types/system/TestTensorLayout.cpp index d0d349fda..bdf3185fe 100644 --- a/tests/nvcv_types/system/TestTensorLayout.cpp +++ b/tests/nvcv_types/system/TestTensorLayout.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/nvcv_types/unit/CMakeLists.txt b/tests/nvcv_types/unit/CMakeLists.txt index d42a9ca23..099e0fe7a 100644 --- a/tests/nvcv_types/unit/CMakeLists.txt +++ b/tests/nvcv_types/unit/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -35,9 +35,6 @@ add_executable(nvcv_test_types_unit TestHandleWrapper.cpp TestTypeTraits.cpp TestSharedCoreObj.cpp - TestStreamId.cpp - TestSimpleCache.cpp - TestPerStreamCache.cpp ) if(ENABLE_COMPAT_OLD_GLIBC) diff --git a/tests/nvcv_types/unit/TestAlgorithm.cpp b/tests/nvcv_types/unit/TestAlgorithm.cpp index c595238d6..4a01a2cfb 100644 --- a/tests/nvcv_types/unit/TestAlgorithm.cpp +++ b/tests/nvcv_types/unit/TestAlgorithm.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ #include "Definitions.hpp" -#include +#include namespace util = nvcv::util; namespace test = nvcv::test; diff --git a/tests/nvcv_types/unit/TestCheckError.cpp b/tests/nvcv_types/unit/TestCheckError.cpp index 1c03441ae..8db27c8c2 100644 --- a/tests/nvcv_types/unit/TestCheckError.cpp +++ b/tests/nvcv_types/unit/TestCheckError.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #include "Definitions.hpp" #include -#include +#include namespace gt = ::testing; namespace test = nvcv::test; diff --git a/tests/nvcv_types/unit/TestCompat.cpp b/tests/nvcv_types/unit/TestCompat.cpp index 0997137db..363b707a4 100644 --- a/tests/nvcv_types/unit/TestCompat.cpp +++ b/tests/nvcv_types/unit/TestCompat.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ # include # define HAS_SYS_RANDOM_H 1 #endif -#include +#include namespace test = nvcv::test; namespace t = ::testing; diff --git a/tests/nvcv_types/unit/TestHandleManager.cpp b/tests/nvcv_types/unit/TestHandleManager.cpp index 82fcb1862..e38010577 100644 --- a/tests/nvcv_types/unit/TestHandleManager.cpp +++ b/tests/nvcv_types/unit/TestHandleManager.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,9 @@ #include "Definitions.hpp" -#include -#include -#include +#include +#include +#include #include diff --git a/tests/nvcv_types/unit/TestHandleWrapper.cpp b/tests/nvcv_types/unit/TestHandleWrapper.cpp index 7e37543c7..9d4695683 100644 --- a/tests/nvcv_types/unit/TestHandleWrapper.cpp +++ b/tests/nvcv_types/unit/TestHandleWrapper.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,9 @@ #include "Definitions.hpp" #include -#include -#include -#include +#include +#include +#include namespace { diff --git a/tests/nvcv_types/unit/TestLockFreeStack.cpp b/tests/nvcv_types/unit/TestLockFreeStack.cpp index ed3fb0db9..298af4fc2 100644 --- a/tests/nvcv_types/unit/TestLockFreeStack.cpp +++ b/tests/nvcv_types/unit/TestLockFreeStack.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ #include "Definitions.hpp" -#include +#include namespace priv = nvcv::priv; diff --git a/tests/nvcv_types/unit/TestMath.cpp b/tests/nvcv_types/unit/TestMath.cpp index c2bf64b62..4ffc5f69e 100644 --- a/tests/nvcv_types/unit/TestMath.cpp +++ b/tests/nvcv_types/unit/TestMath.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #include "Definitions.hpp" #include -#include +#include namespace t = ::testing; namespace util = nvcv::util; diff --git a/tests/nvcv_types/unit/TestMetaprogramming.cpp b/tests/nvcv_types/unit/TestMetaprogramming.cpp index a546190ed..934b11991 100644 --- a/tests/nvcv_types/unit/TestMetaprogramming.cpp +++ b/tests/nvcv_types/unit/TestMetaprogramming.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #include "Definitions.hpp" #include -#include +#include namespace ttest = nvcv::test::type; namespace util = nvcv::util; diff --git a/tests/nvcv_types/unit/TestRange.cpp b/tests/nvcv_types/unit/TestRange.cpp index 521daaaae..f2f57ccdb 100644 --- a/tests/nvcv_types/unit/TestRange.cpp +++ b/tests/nvcv_types/unit/TestRange.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #include "Definitions.hpp" #include -#include +#include #include #include diff --git a/tests/nvcv_types/unit/TestSharedCoreObj.cpp b/tests/nvcv_types/unit/TestSharedCoreObj.cpp index f137aa25b..1fb2c4f71 100644 --- a/tests/nvcv_types/unit/TestSharedCoreObj.cpp +++ b/tests/nvcv_types/unit/TestSharedCoreObj.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,12 +19,12 @@ #include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include inline NVCVImageHandle CreateImage() { diff --git a/tests/nvcv_types/unit/TestStaticVector.cpp b/tests/nvcv_types/unit/TestStaticVector.cpp index 331c567d7..ac5235073 100644 --- a/tests/nvcv_types/unit/TestStaticVector.cpp +++ b/tests/nvcv_types/unit/TestStaticVector.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ #include "Definitions.hpp" -#include +#include namespace util = nvcv::util; diff --git a/tests/nvcv_types/unit/TestString.cpp b/tests/nvcv_types/unit/TestString.cpp index 48192d128..958642678 100644 --- a/tests/nvcv_types/unit/TestString.cpp +++ b/tests/nvcv_types/unit/TestString.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,8 +18,8 @@ #include "Definitions.hpp" #include -#include -#include +#include +#include namespace util = nvcv::util; namespace test = nvcv::test; diff --git a/tests/nvcv_types/unit/TestVersion.cpp b/tests/nvcv_types/unit/TestVersion.cpp index ccf162774..8b356a9e9 100644 --- a/tests/nvcv_types/unit/TestVersion.cpp +++ b/tests/nvcv_types/unit/TestVersion.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #include "Definitions.hpp" #include -#include +#include namespace t = ::testing; namespace util = nvcv::util; diff --git a/tests/run_tests.sh.in b/tests/run_tests.sh.in index c50b66a08..e54904f58 100755 --- a/tests/run_tests.sh.in +++ b/tests/run_tests.sh.in @@ -1,6 +1,6 @@ #!/bin/bash -e -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/mkop/CppTest.cpp b/tools/mkop/CppTest.cpp index cf3311a2f..f4fb7c963 100644 --- a/tools/mkop/CppTest.cpp +++ b/tools/mkop/CppTest.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,12 @@ #include "Definitions.hpp" +#include #include #include #include #include #include -#include #include #include diff --git a/tools/mkop/PythonWrap.cpp b/tools/mkop/PythonWrap.cpp index 25c67a77d..0acec9038 100644 --- a/tools/mkop/PythonWrap.cpp +++ b/tools/mkop/PythonWrap.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); From 660cb14cc0aeeb06b99da1a6be97ef9343fa9d5c Mon Sep 17 00:00:00 2001 From: enmortensen Date: Thu, 1 Aug 2024 16:20:46 +0000 Subject: [PATCH 2/2] fix: correct copyright notices Signed-off-by: enmortensen --- .github/workflows/codeql.yml | 19 ++++++++++++------- samples/scripts/requirements.txt | 19 ++++++++++++------- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 4d1de0382..c9a771070 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -1,12 +1,17 @@ # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# SPDX-License-Identifier: Apache-2.0 # -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. name: "CodeQL" diff --git a/samples/scripts/requirements.txt b/samples/scripts/requirements.txt index a294be171..0ddcd3c51 100644 --- a/samples/scripts/requirements.txt +++ b/samples/scripts/requirements.txt @@ -1,12 +1,17 @@ # SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# SPDX-License-Identifier: Apache-2.0 # -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. torch==2.2.0 torchvision==0.17.0