Merge pull request #8219 from google/benvanik-task-errors
Propagating errors through the task system.
This allows for errors to properly fail task scopes and ensure subsequent work is discarded correctly; errors now propagate up to their parent scope and can be retrieved by HAL semaphores or users for graceful handling. Misc cleanup and simplification was done as part of this and new tests were added to verify the behavior.
Progress on #4026. Waits still need work but are being redesigned in future changes.
diff --git a/.github/workflows/advance_upstream_forks.yml b/.github/workflows/advance_upstream_forks.yml
new file mode 100644
index 0000000..3d6db0a
--- /dev/null
+++ b/.github/workflows/advance_upstream_forks.yml
@@ -0,0 +1,37 @@
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+name: Advance Upstream Forks
+
+on:
+ # TODO(gcmn): Enable cron once this is stable
+ # schedule:
+ # - cron: '0 10 * * *'
+
+ workflow_dispatch:
+
+jobs:
+ advance_iree-llvm-fork:
+ name: "Advance iree-llvm-fork"
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Checking out repository
+ uses: actions/checkout@v2
+ with:
+ token: ${{ secrets.WRITE_ACCESS_TOKEN }}
+ repository: google/iree-llvm-fork
+ ref: main
+
+ - name: Pull upstream main
+ run: |
+ git remote add upstream https://github.com/llvm/llvm-project.git
+ git pull --ff-only upstream main
+ - name: Pushing changes
+ uses: ad-m/github-push-action@v0.6.0
+ with:
+ github_token: ${{ secrets.WRITE_ACCESS_TOKEN }}
+ branch: main
+ repository: google/iree-llvm-fork
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a5244fa..3211b9e 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -58,6 +58,7 @@
#-------------------------------------------------------------------------------
option(IREE_BUILD_EXPERIMENTAL_REMOTING "Builds experimental remoting support." OFF)
+option(IREE_BUILD_EXPERIMENTAL_WEB_SAMPLES "Builds experimental web samples." OFF)
option(IREE_HAL_DRIVER_EXPERIMENTAL_ROCM "Builds the experimental ROCm Backend." OFF)
#-------------------------------------------------------------------------------
@@ -271,6 +272,11 @@
)
find_package(Python3 COMPONENTS Interpreter Development REQUIRED)
else()
+ # Configuring the Development.Module is flaky in multi-project setups.
+ # "Bootstrapping" by first looking for the optional Development component
+ # seems to be robust generally.
+ # See: https://reviews.llvm.org/D118148
+ find_package(Python3 COMPONENTS Interpreter Development)
find_package(Python3 COMPONENTS Interpreter Development.Module REQUIRED)
endif()
elseif(IREE_BUILD_COMPILER OR IREE_BUILD_TESTS)
@@ -390,7 +396,6 @@
include(external_cc_library)
include(flatbuffer_c_library)
-add_subdirectory(build_tools/third_party/cuda EXCLUDE_FROM_ALL)
add_subdirectory(build_tools/third_party/libyaml EXCLUDE_FROM_ALL)
add_subdirectory(build_tools/third_party/stblib EXCLUDE_FROM_ALL)
add_subdirectory(build_tools/third_party/vulkan_memory_allocator EXCLUDE_FROM_ALL)
@@ -539,6 +544,10 @@
add_subdirectory(experimental/remoting)
endif()
+if(${IREE_BUILD_EXPERIMENTAL_WEB_SAMPLES})
+ add_subdirectory(experimental/sample_web_static)
+endif()
+
set(IREE_PUBLIC_INCLUDE_DIRS "${IREE_COMMON_INCLUDE_DIRS}"
CACHE INTERNAL "IREE: Include Directories" FORCE)
diff --git a/bindings/python/iree/runtime/CMakeLists.txt b/bindings/python/iree/runtime/CMakeLists.txt
index 1e1a11e..fcae8c8 100644
--- a/bindings/python/iree/runtime/CMakeLists.txt
+++ b/bindings/python/iree/runtime/CMakeLists.txt
@@ -49,6 +49,7 @@
runtime
SRCS
"__init__.py"
+ "array_interop.py"
"function.py"
"system_api.py"
"tracing.py"
@@ -92,6 +93,13 @@
iree_py_test(
NAME
+ array_interop_test
+ SRCS
+ "array_interop_test.py"
+)
+
+iree_py_test(
+ NAME
function_test
SRCS
"function_test.py"
diff --git a/bindings/python/iree/runtime/__init__.py b/bindings/python/iree/runtime/__init__.py
index 589cbf0..9d2e1f0 100644
--- a/bindings/python/iree/runtime/__init__.py
+++ b/bindings/python/iree/runtime/__init__.py
@@ -14,9 +14,32 @@
# Pull some of the native symbols into the public API.
# Hal imports
-from .binding import BufferUsage, HalBuffer, HalDevice, HalDriver, HalElementType, MemoryAccess, MemoryType, Shape
+from .binding import (
+ BufferCompatibility,
+ BufferUsage,
+ HalAllocator,
+ HalBuffer,
+ HalBufferView,
+ HalDevice,
+ HalDriver,
+ HalElementType,
+ MemoryAccess,
+ MemoryType,
+ Shape,
+)
+
# Vm imports
-from .binding import create_hal_module, Linkage, VmVariantList, VmFunction, VmInstance, VmContext, VmModule
+from .binding import (
+ create_hal_module,
+ Linkage,
+ VmVariantList,
+ VmFunction,
+ VmInstance,
+ VmContext,
+ VmModule,
+)
+
+from .array_interop import *
from .system_api import *
from .function import *
from .tracing import *
diff --git a/bindings/python/iree/runtime/array_interop.py b/bindings/python/iree/runtime/array_interop.py
new file mode 100644
index 0000000..60c2913
--- /dev/null
+++ b/bindings/python/iree/runtime/array_interop.py
@@ -0,0 +1,234 @@
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+"""BufferView and Python Array Protocol interop."""
+
+from typing import Optional, Tuple
+import logging
+import numpy as np
+import numpy.lib.mixins
+
+from .binding import (
+ BufferUsage,
+ HalBufferView,
+ HalDevice,
+ HalElementType,
+ MappedMemory,
+ MemoryType,
+)
+
+__all__ = [
+ "asdevicearray",
+ "DeviceArray",
+]
+
+_DEVICE_HANDLED_FUNCTIONS = {}
+
+
+def _device_implements(np_function):
+ """Decorator that registers a base class implementation."""
+
+ def decorator(func):
+ _DEVICE_HANDLED_FUNCTIONS[np_function] = func
+ return func
+
+ return decorator
+
+
+class DeviceArray(numpy.lib.mixins.NDArrayOperatorsMixin):
+ """An IREE device array.
+
+ Device arrays can be in one of two states:
+ 1. Host accessible: The array will be backed by host accessible memory
+ and can have the usual things done with it that one expects to be
+ able to do with an ndarray.
+ 2. Device resident: The array is just a handle to a device resident
+ Buffer (and BufferView wrapper). Metadata about the array are accessible
+ (shape and dtype) but anything that touches the data cannot be accessed
+ in this state.
+
+ How a device array comes into existence controls how it can transition
+ between these states:
+ * A user can create a DeviceArray explicitly with a device allocator.
+ Such an array will not be implicitly convertible to host accessible,
+ although accessors exist to do so.
+ * When created by the platform with a synchronization policy, then
+ implicit transfer back to the host will trigger appropriate waits and
+ be performed automatically (this is the common case for function return
+ values if not otherwise configured, as an example).
+ """
+
+ def __init__(self,
+ device: HalDevice,
+ buffer_view: HalBufferView,
+ implicit_host_transfer: bool = False,
+ override_dtype=None):
+ self._device = device
+ self._buffer_view = buffer_view
+ self._implicit_host_transfer = implicit_host_transfer
+ self._override_dtype = override_dtype
+
+ # If the array is host accessible, these will be non-None.
+ self._mapped_memory: Optional[MappedMemory] = None
+ self._host_array: Optional[np.ndarray] = None
+
+ def __array__(self, dtype=None):
+ self._transfer_to_host(True)
+ if dtype is None:
+ return self._host_array
+ else:
+ return self._host_array.__array__(dtype) # pytype: disable=attribute-error
+
+ def __array_function__(self, func, types, args, kwargs):
+ if func in _DEVICE_HANDLED_FUNCTIONS:
+ return _DEVICE_HANDLED_FUNCTIONS[func](*args, **kwargs)
+
+ # Anything else forces a transfer to host and then delegates to the
+ # host array.
+ host_array = self.to_host()
+ return host_array.__array_function__(func, types, args, kwargs) # pytype: disable=attribute-error
+
+ def __repr__(self):
+ return f"<IREE DeviceArray: shape={np.shape(self)}, dtype={self.dtype}>"
+
+ @property
+ def is_host_accessible(self):
+ """Whether this array is currently host accessible."""
+ return self._host_array is not None
+
+ def to_host(self) -> np.ndarray:
+ self._transfer_to_host(False)
+ return self._host_array
+
+ def _transfer_to_host(self, implicit):
+ if self._host_array is not None:
+ return
+ if implicit and not self._implicit_host_transfer:
+ raise ValueError(
+ "DeviceArray cannot be implicitly transferred to the host: "
+ "if necessary, do an explicit transfer via .to_host()")
+ self._mapped_memory, self._host_array = self._map_to_host()
+
+ def _map_to_host(self) -> Tuple[MappedMemory, np.ndarray]:
+ # TODO: When synchronization is enabled, need to block here.
+ raw_dtype = self._get_raw_dtype()
+ mapped_memory = self._buffer_view.map()
+ host_array = mapped_memory.asarray(self._buffer_view.shape, raw_dtype)
+ # Detect if we need to force an explicit conversion. This happens when
+ # we were requested to pretend that the array is in a specific dtype,
+ # even if that is not representable on the device. You guessed it:
+ # this is to support bools.
+ if self._override_dtype is not None and self._override_dtype != raw_dtype:
+ host_array = host_array.astype(self._override_dtype)
+ return mapped_memory, host_array
+
+ def _get_raw_dtype(self):
+ return HalElementType.map_to_dtype(self._buffer_view.element_type)
+
+ @property
+ def dtype(self):
+ if self._override_dtype:
+ return self._override_dtype
+ return self._get_raw_dtype()
+
+ @property
+ def shape(self):
+ return np.shape(self)
+
+ def astype(self, dtype, casting="unsafe", copy=True):
+ if self.dtype == dtype and not copy:
+ return self
+ host_ary = self.to_host()
+ return host_ary.astype(dtype, casting=casting, copy=copy)
+
+ def __reduce__(self):
+ # Since this is used for making deep copies and pickling, we map
+ # separately from any interactive state. We just reduce to the actual
+ # host ndarray, which supports the necessary serialization protocols.
+ _, host_array = self._map_to_host()
+ return _restore_reduced_array, (host_array,)
+
+
+def _restore_reduced_array(ary):
+ return ary
+
+
+# Function implementations with custom behavior.
+@_device_implements(np.shape)
+def _(arr: DeviceArray):
+ return arr._buffer_view.shape
+
+
+def asdevicearray(device: HalDevice,
+ a,
+ dtype=None,
+ *,
+ implicit_host_transfer: bool = False,
+ memory_type=MemoryType.DEVICE_LOCAL |
+ MemoryType.DEVICE_VISIBLE,
+ allowed_usage=BufferUsage.ALL,
+ element_type: Optional[HalElementType] = None) -> DeviceArray:
+ """Helper to create a DeviceArray from an arbitrary array like.
+
+ This is similar in purpose and usage to np.asarray, except that it takes
+ a device as the first argument. This may not be the best mechanism for
+ getting a DeviceArray, depending on your use case, but it is reliable
+ and simple. This function may make a defensive copy or cause implicit
+ transfers to satisfy the request. If this is important to you, then a lower
+ level API is likely more appropriate.
+
+ Note that additional flags `memory_type`, `allowed_usage` and `element_type`
+ are only hints if creating a new DeviceArray. If `a` is already a DeviceArray,
+ they are ignored.
+ """
+ if isinstance(a, DeviceArray):
+ if dtype is None:
+ return a
+ # Need to do a conversion, which we currently do not support on the
+ # device, so transfer back to the host.
+ logging.warn(
+ "Implicit dtype conversion of a DeviceArray forces a host transfer")
+ # First get an ndarray.
+ a = np.asarray(a, dtype=dtype)
+ element_type = map_dtype_to_element_type(a.dtype)
+ if element_type is None:
+ raise ValueError(f"Could not map dtype {a.dtype} to IREE element type")
+ buffer_view = device.allocator.allocate_buffer_copy(
+ memory_type=memory_type,
+ allowed_usage=allowed_usage,
+ buffer=a,
+ element_type=element_type)
+ return DeviceArray(device,
+ buffer_view,
+ implicit_host_transfer=implicit_host_transfer,
+ override_dtype=a.dtype)
+
+
+# NOTE: Numpy dtypes are not hashable and exist in a hierarchy that should
+# be queried via isinstance checks. This should be done as a fallback but
+# this is a linear list for quick access to the most common. There may also
+# be a better way to do this.
+_DTYPE_TO_HAL_ELEMENT_TYPE = (
+ (np.float32, HalElementType.FLOAT_32),
+ (np.float64, HalElementType.FLOAT_64),
+ (np.float16, HalElementType.FLOAT_16),
+ (np.int32, HalElementType.SINT_32),
+ (np.int64, HalElementType.SINT_64),
+ (np.int16, HalElementType.SINT_16),
+ (np.int8, HalElementType.SINT_8),
+ (np.uint32, HalElementType.UINT_32),
+ (np.uint64, HalElementType.UINT_64),
+ (np.uint16, HalElementType.UINT_16),
+ (np.uint8, HalElementType.UINT_8),
+ (np.bool_, HalElementType.BOOL_8),
+)
+
+
+def map_dtype_to_element_type(dtype) -> Optional[HalElementType]:
+ for match_dtype, element_type in _DTYPE_TO_HAL_ELEMENT_TYPE:
+ if match_dtype == dtype:
+ return element_type
+ else:
+ return None
diff --git a/bindings/python/iree/runtime/array_interop_test.py b/bindings/python/iree/runtime/array_interop_test.py
new file mode 100644
index 0000000..9520f7a
--- /dev/null
+++ b/bindings/python/iree/runtime/array_interop_test.py
@@ -0,0 +1,112 @@
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+import copy
+import numpy as np
+import unittest
+
+import iree.runtime
+
+
+class DeviceHalTest(unittest.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.driver = iree.runtime.HalDriver.create("vmvx")
+ self.device = self.driver.create_default_device()
+ self.allocator = self.device.allocator
+
+ def testMetadataAttributes(self):
+ init_ary = np.zeros([3, 4], dtype=np.int32) + 2
+ ary = iree.runtime.asdevicearray(self.device, init_ary)
+ self.assertEqual([3, 4], ary.shape)
+ self.assertEqual(np.int32, ary.dtype)
+
+ def testExplicitHostTransfer(self):
+ init_ary = np.zeros([3, 4], dtype=np.int32) + 2
+ ary = iree.runtime.asdevicearray(self.device, init_ary)
+ self.assertEqual(repr(ary), "<IREE DeviceArray: shape=[3, 4], dtype=int32>")
+ self.assertFalse(ary.is_host_accessible)
+
+ # Explicit transfer.
+ cp = ary.to_host()
+ np.testing.assert_array_equal(cp, init_ary)
+ self.assertTrue(ary.is_host_accessible)
+
+ def testOverrideDtype(self):
+ init_ary = np.zeros([3, 4], dtype=np.int32) + 2
+ buffer_view = self.allocator.allocate_buffer_copy(
+ memory_type=iree.runtime.MemoryType.DEVICE_LOCAL,
+ allowed_usage=iree.runtime.BufferUsage.CONSTANT,
+ buffer=init_ary,
+ element_type=iree.runtime.HalElementType.SINT_32)
+
+ ary = iree.runtime.DeviceArray(self.device,
+ buffer_view,
+ override_dtype=np.float32)
+
+ # Explicit transfer.
+ cp = ary.to_host()
+ self.assertEqual(cp.dtype, np.float32)
+ np.testing.assert_array_equal(cp, init_ary.astype(np.float32))
+ self.assertTrue(ary.is_host_accessible)
+
+ def testIllegalImplicitHostTransfer(self):
+ init_ary = np.zeros([3, 4], dtype=np.int32) + 2
+ ary = iree.runtime.asdevicearray(self.device, init_ary)
+ # Implicit transfer.
+ with self.assertRaises(ValueError):
+ _ = np.asarray(ary)
+
+ def testImplicitHostArithmetic(self):
+ init_ary = np.zeros([3, 4], dtype=np.int32) + 2
+ ary = iree.runtime.asdevicearray(self.device,
+ init_ary,
+ implicit_host_transfer=True)
+ sum = ary + init_ary
+ np.testing.assert_array_equal(sum, init_ary + 2)
+ self.assertTrue(ary.is_host_accessible)
+
+ def testArrayFunctions(self):
+ init_ary = np.zeros([3, 4], dtype=np.float32) + 2
+ ary = iree.runtime.asdevicearray(self.device,
+ init_ary,
+ implicit_host_transfer=True)
+ f = np.isfinite(ary)
+ self.assertTrue(f.all())
+
+ def testDeepcopy(self):
+ init_ary = np.zeros([3, 4], dtype=np.float32) + 2
+ orig_ary = iree.runtime.asdevicearray(self.device,
+ init_ary,
+ implicit_host_transfer=True)
+ copy_ary = copy.deepcopy(orig_ary)
+ self.assertIsNot(orig_ary, copy_ary)
+ np.testing.assert_array_equal(orig_ary, copy_ary)
+
+ def testAsType(self):
+ init_ary = np.zeros([3, 4], dtype=np.int32) + 2
+ orig_ary = iree.runtime.asdevicearray(self.device,
+ init_ary,
+ implicit_host_transfer=True)
+ # Same dtype, no copy.
+ i32_nocopy = orig_ary.astype(np.int32, copy=False)
+ self.assertIs(orig_ary, i32_nocopy)
+
+ # Same dtype, copy.
+ i32_nocopy = orig_ary.astype(np.int32)
+ self.assertIsNot(orig_ary, i32_nocopy)
+ np.testing.assert_array_equal(orig_ary, i32_nocopy)
+
+ # Different dtype, copy.
+ f32_copy = orig_ary.astype(np.float32)
+ self.assertIsNot(orig_ary, f32_copy)
+ self.assertEqual(f32_copy.dtype, np.float32)
+ np.testing.assert_array_equal(orig_ary.astype(np.float32), f32_copy)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/bindings/python/iree/runtime/binding.h b/bindings/python/iree/runtime/binding.h
index c015e76..cded74f 100644
--- a/bindings/python/iree/runtime/binding.h
+++ b/bindings/python/iree/runtime/binding.h
@@ -38,17 +38,17 @@
~ApiRefCounted() { Release(); }
- // Creates an instance of the ref counted wrapper based on an instance
- // that has already been retained. Ownership is transferred to the
- // wrapper.
- static Self CreateRetained(T* retained_inst) {
+ // Steals the reference to the object referenced by the given raw pointer and
+ // returns a wrapper (transfers ownership).
+ static Self StealFromRawPtr(T* retained_inst) {
auto self = Self();
self.instance_ = retained_inst;
return self;
}
- // Creates a new instance, retaining the underlying object.
- static Self RetainAndCreate(T* non_retained_inst) {
+ // Retains the object referenced by the given raw pointer and returns
+ // a wrapper.
+ static Self BorrowFromRawPtr(T* non_retained_inst) {
auto self = Self();
self.instance_ = non_retained_inst;
if (non_retained_inst) {
diff --git a/bindings/python/iree/runtime/function.py b/bindings/python/iree/runtime/function.py
index a7e3874..57739b8 100644
--- a/bindings/python/iree/runtime/function.py
+++ b/bindings/python/iree/runtime/function.py
@@ -12,8 +12,22 @@
import numpy as np
-from .binding import HalDevice, HalElementType, VmContext, VmFunction, VmVariantList
+from .binding import (
+ BufferUsage,
+ HalBufferView,
+ HalDevice,
+ HalElementType,
+ MemoryType,
+ VmContext,
+ VmFunction,
+ VmVariantList,
+)
+
from . import tracing
+from .array_interop import (
+ map_dtype_to_element_type,
+ DeviceArray,
+)
__all__ = [
"FunctionInvoker",
@@ -280,7 +294,13 @@
except KeyError:
_raise_argument_error(inv, f"unrecognized dtype '{dtype_str}'")
if dtype != x.dtype:
- x = x.astype(dtype)
+ # TODO: If we got a DeviceArray in which triggers this implicit
+ # conversion, it will fault back to the host, be converted and
+ # then sent back. This is... not great.
+ # At least warning about it so we know it might be a problem.
+ logging.warn(
+ "Implicit dtype conversion of DeviceArray forces transfer to host")
+ x = np.asarray(x).astype(dtype)
rank = desc[2]
shape = desc[3:]
ndarray_shape = x.shape
@@ -292,15 +312,37 @@
_raise_argument_error(
inv, f"shape mismatch {ndarray_shape} vs {tuple(shape)}")
actual_dtype = x.dtype
- for match_dtype, element_type in DTYPE_TO_HAL_ELEMENT_TYPE:
- if match_dtype == actual_dtype:
- break
- else:
+ element_type = map_dtype_to_element_type(actual_dtype)
+ if element_type is None:
_raise_argument_error(inv, f"unsupported numpy dtype {x.dtype}")
- t.push_buffer_view(inv.device, x, element_type)
+
+ if isinstance(x, DeviceArray):
+ # Already one of ours and did not get implicitly converted.
+ buffer_view = x._buffer_view
+ else:
+ # Not one of ours. Put it on the device.
+ buffer_view = inv.device.allocator.allocate_buffer_copy(
+ memory_type=IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
+ allowed_usage=IMPLICIT_BUFFER_ARG_USAGE,
+ buffer=np.asarray(x),
+ element_type=element_type)
+
+ t.push_buffer_view(buffer_view)
+def _buffer_view_to_vm(inv: Invocation, t: VmVariantList, x, desc):
+ # BufferView is a low-level object and we do no validation here for it.
+ # The assumption is that it is coming from either an advanced use case
+ # or a systematic integration that knows what it is doing. The runtime
+ # will do necessary validation.
+ t.push_buffer_view(x)
+
+
+# Called in reflection mode when we know we want to coerce from something
+# 'ndarray' like (as defined by the reflection metadata).
def _ndarray_like_to_vm(inv: Invocation, t: VmVariantList, x, desc):
+ if isinstance(x, HalBufferView):
+ return _buffer_view_to_vm(inv, t, x, desc)
return _ndarray_to_vm(inv, t, np.asarray(x), desc)
@@ -322,6 +364,8 @@
dict: _dict_to_vm,
str: _str_to_vm,
np.ndarray: _ndarray_to_vm,
+ HalBufferView: _buffer_view_to_vm,
+ DeviceArray: _ndarray_to_vm,
}
# VM to Python converters. All take:
@@ -337,14 +381,16 @@
# The descriptor for an ndarray is like:
# ["ndarray", "<dtype>", <rank>, <dim>...]
# ex: ['ndarray', 'i32', 1, 25948]
- x = vm_list.get_as_ndarray(vm_index)
+ buffer_view = vm_list.get_as_buffer_view(vm_index)
dtype_str = desc[1]
try:
dtype = ABI_TYPE_TO_DTYPE[dtype_str]
except KeyError:
_raise_return_error(inv, f"unrecognized dtype '{dtype_str}'")
- if dtype != x.dtype:
- x = x.astype(dtype)
+ x = DeviceArray(inv.device,
+ buffer_view,
+ implicit_host_transfer=True,
+ override_dtype=dtype)
return x
@@ -425,24 +471,11 @@
"i1": np.bool_,
}
-# NOTE: Numpy dtypes are not hashable and exist in a hierarchy that should
-# be queried via isinstance checks. This should be done as a fallback but
-# this is a linear list for quick access to the most common. There may also
-# be a better way to do this.
-DTYPE_TO_HAL_ELEMENT_TYPE = (
- (np.float32, HalElementType.FLOAT_32),
- (np.float64, HalElementType.FLOAT_64),
- (np.float16, HalElementType.FLOAT_16),
- (np.int32, HalElementType.SINT_32),
- (np.int64, HalElementType.SINT_64),
- (np.int16, HalElementType.SINT_16),
- (np.int8, HalElementType.SINT_8),
- (np.uint32, HalElementType.UINT_32),
- (np.uint64, HalElementType.UINT_64),
- (np.uint16, HalElementType.UINT_16),
- (np.uint8, HalElementType.UINT_8),
- (np.bool_, HalElementType.BOOL_8),
-)
+# When we get an ndarray as an argument and are implicitly mapping it to a
+# buffer view, flags for doing so.
+IMPLICIT_BUFFER_ARG_MEMORY_TYPE = (MemoryType.DEVICE_LOCAL |
+ MemoryType.DEVICE_VISIBLE)
+IMPLICIT_BUFFER_ARG_USAGE = BufferUsage.ALL
def _is_ndarray_descriptor(desc):
@@ -510,19 +543,14 @@
for py_value, desc in zip(py_list, descs):
inv.current_arg = py_value
inv.current_desc = desc
- py_type = py_value.__class__
# For ndarray, we want to be able to handle array-like, so check for that
# explicitly (duck typed vs static typed).
if _is_ndarray_descriptor(desc):
converter = _ndarray_like_to_vm
else:
- try:
- converter = PYTHON_TO_VM_CONVERTERS[py_type]
- except KeyError:
- _raise_argument_error(
- inv, f"cannot map Python type to VM: {py_type}"
- f" (for desc {desc})")
+ converter = _get_python_to_vm_converter(inv, py_value, desc)
+
try:
converter(inv, vm_list, py_value, desc)
except ArgumentError:
@@ -532,6 +560,22 @@
e)
+def _get_python_to_vm_converter(inv: Invocation, py_value, desc):
+ py_type = py_value.__class__
+ converter = PYTHON_TO_VM_CONVERTERS.get(py_type)
+ if converter is not None:
+ return converter
+ # See if it supports the __array__ protocol and if so, pull it to
+ # the host and use the ndarray converter. This will create round-trips
+ # between frameworks but at least enables interop.
+ if hasattr(py_value, "__array__"):
+ return _ndarray_to_vm
+
+ _raise_argument_error(
+ inv, f"cannot map Python type to VM: {py_type}"
+ f" (for desc {desc})")
+
+
def _extract_vm_sequence_to_python(inv: Invocation, vm_list, descs):
vm_list_arity = len(vm_list)
if descs is None:
@@ -547,6 +591,13 @@
if desc is None:
# Dynamic (non reflection mode).
converted = vm_list.get_variant(vm_index)
+ # Special case: Upgrade HalBufferView to a DeviceArray. We do that here
+ # since this is higher level and it preserves layering. Note that
+ # the reflection case also does this conversion.
+ if isinstance(converted, HalBufferView):
+ converted = DeviceArray(inv.device,
+ converted,
+ implicit_host_transfer=True)
else:
# Known type descriptor.
vm_type = desc if isinstance(desc, str) else desc[0]
diff --git a/bindings/python/iree/runtime/function_test.py b/bindings/python/iree/runtime/function_test.py
index df5e222..b194db5 100644
--- a/bindings/python/iree/runtime/function_test.py
+++ b/bindings/python/iree/runtime/function_test.py
@@ -11,7 +11,11 @@
from absl.testing import absltest
from iree import runtime as rt
-from iree.runtime.function import FunctionInvoker
+from iree.runtime.function import (
+ FunctionInvoker,
+ IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
+ IMPLICIT_BUFFER_ARG_USAGE,
+)
from iree.runtime.binding import VmVariantList
@@ -240,13 +244,122 @@
with self.assertRaisesRegex(ValueError, "specified kwarg 'c' is unknown"):
result = invoker(-1, a=1, b=2, c=3)
+ def testNdarrayArg(self):
+ arg_array = np.asarray([1, 0], dtype=np.int32)
+
+ invoked_arg_list = None
+
+ def invoke(arg_list, ret_list):
+ nonlocal invoked_arg_list
+ invoked_arg_list = arg_list
+
+ vm_context = MockVmContext(invoke)
+ vm_function = MockVmFunction(reflection={
+ "iree.abi": json.dumps({
+ "a": [["ndarray", "i32", 1, 2]],
+ "r": [],
+ })
+ })
+ invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
+ result = invoker(arg_array)
+ self.assertEqual("<VmVariantList(1): [HalBufferView(2:0x20000011)]>",
+ repr(invoked_arg_list))
+
+ def testBufferViewArg(self):
+ arg_buffer_view = self.device.allocator.allocate_buffer_copy(
+ memory_type=IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
+ allowed_usage=IMPLICIT_BUFFER_ARG_USAGE,
+ buffer=np.asarray([1, 0], dtype=np.int32),
+ element_type=rt.HalElementType.SINT_32)
+
+ invoked_arg_list = None
+
+ def invoke(arg_list, ret_list):
+ nonlocal invoked_arg_list
+ invoked_arg_list = arg_list
+
+ vm_context = MockVmContext(invoke)
+ vm_function = MockVmFunction(reflection={
+ "iree.abi": json.dumps({
+ "a": [["ndarray", "i32", 1, 2]],
+ "r": [],
+ })
+ })
+ invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
+ _ = invoker(arg_buffer_view)
+ self.assertEqual("<VmVariantList(1): [HalBufferView(2:0x20000011)]>",
+ repr(invoked_arg_list))
+
+ def testBufferViewArgNoReflection(self):
+ arg_buffer_view = self.device.allocator.allocate_buffer_copy(
+ memory_type=IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
+ allowed_usage=IMPLICIT_BUFFER_ARG_USAGE,
+ buffer=np.asarray([1, 0], dtype=np.int32),
+ element_type=rt.HalElementType.SINT_32)
+
+ invoked_arg_list = None
+
+ def invoke(arg_list, ret_list):
+ nonlocal invoked_arg_list
+ invoked_arg_list = arg_list
+
+ vm_context = MockVmContext(invoke)
+ vm_function = MockVmFunction(reflection={})
+ invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
+ _ = invoker(arg_buffer_view)
+ self.assertEqual("<VmVariantList(1): [HalBufferView(2:0x20000011)]>",
+ repr(invoked_arg_list))
+
+ def testReturnBufferView(self):
+ result_array = np.asarray([1, 0], dtype=np.int32)
+
+ def invoke(arg_list, ret_list):
+ buffer_view = self.device.allocator.allocate_buffer_copy(
+ memory_type=IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
+ allowed_usage=IMPLICIT_BUFFER_ARG_USAGE,
+ buffer=result_array,
+ element_type=rt.HalElementType.SINT_32)
+ ret_list.push_buffer_view(buffer_view)
+
+ vm_context = MockVmContext(invoke)
+ vm_function = MockVmFunction(reflection={
+ "iree.abi": json.dumps({
+ "a": [],
+ "r": [["ndarray", "i32", 1, 2]],
+ })
+ })
+ invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
+ result = invoker()
+ np.testing.assert_array_equal([1, 0], result)
+
+ def testReturnBufferViewNoReflection(self):
+ result_array = np.asarray([1, 0], dtype=np.int32)
+
+ def invoke(arg_list, ret_list):
+ buffer_view = self.device.allocator.allocate_buffer_copy(
+ memory_type=IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
+ allowed_usage=IMPLICIT_BUFFER_ARG_USAGE,
+ buffer=result_array,
+ element_type=rt.HalElementType.SINT_32)
+ ret_list.push_buffer_view(buffer_view)
+
+ vm_context = MockVmContext(invoke)
+ vm_function = MockVmFunction(reflection={})
+ invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
+ result = invoker()
+ np.testing.assert_array_equal([1, 0], result)
+
# TODO: Fill out all return types.
def testReturnTypeNdArrayBool(self):
result_array = np.asarray([1, 0], dtype=np.int8)
def invoke(arg_list, ret_list):
- ret_list.push_buffer_view(self.device, result_array,
- rt.HalElementType.UINT_8)
+ buffer_view = self.device.allocator.allocate_buffer_copy(
+ memory_type=IMPLICIT_BUFFER_ARG_MEMORY_TYPE,
+ allowed_usage=IMPLICIT_BUFFER_ARG_USAGE,
+ buffer=result_array,
+ element_type=rt.HalElementType.UINT_8)
+ ret_list.push_buffer_view(buffer_view)
vm_context = MockVmContext(invoke)
vm_function = MockVmFunction(reflection={
@@ -258,7 +371,7 @@
invoker = FunctionInvoker(vm_context, self.device, vm_function, tracer=None)
result = invoker()
# assertEqual on bool arrays is fraught for... reasons.
- self.assertEqual("array([ True, False])", repr(result))
+ np.testing.assert_array_equal([True, False], result)
def testReturnTypeList(self):
vm_list = VmVariantList(2)
diff --git a/bindings/python/iree/runtime/hal.cc b/bindings/python/iree/runtime/hal.cc
index e3d1dfb..afb1ed7 100644
--- a/bindings/python/iree/runtime/hal.cc
+++ b/bindings/python/iree/runtime/hal.cc
@@ -7,10 +7,194 @@
#include "bindings/python/iree/runtime/hal.h"
#include "iree/hal/api.h"
+#include "pybind11/numpy.h"
namespace iree {
namespace python {
+namespace {
+
+// RAII wrapper for a Py_buffer which calls PyBuffer_Release when it goes
+// out of scope.
+class PyBufferReleaser {
+ public:
+ PyBufferReleaser(Py_buffer& b) : b_(b) {}
+ ~PyBufferReleaser() { PyBuffer_Release(&b_); }
+
+ private:
+ Py_buffer& b_;
+};
+
+static std::string ToHexString(const uint8_t* data, size_t length) {
+ static constexpr char kHexChars[] = {'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
+ std::string s(length * 2, ' ');
+ for (size_t i = 0; i < length; ++i) {
+ s[2 * i + 0] = kHexChars[(data[i] & 0xF0) >> 4];
+ s[2 * i + 1] = kHexChars[(data[i] & 0x0F) >> 0];
+ }
+ return s;
+}
+static std::string ToHexString(uint32_t value) {
+ return ToHexString((const uint8_t*)&value, sizeof(value));
+}
+
+} // namespace
+
+//------------------------------------------------------------------------------
+// HalAllocator
+//------------------------------------------------------------------------------
+
+py::dict HalAllocator::QueryStatistics() {
+ py::dict items;
+ iree_hal_allocator_statistics_t stats;
+ iree_hal_allocator_query_statistics(raw_ptr(), &stats);
+#if IREE_STATISTICS_ENABLE
+ items["host_bytes_peak"] = stats.host_bytes_peak;
+ items["host_bytes_allocated"] = stats.host_bytes_allocated;
+ items["host_bytes_freed"] = stats.host_bytes_freed;
+ items["device_bytes_peak"] = stats.device_bytes_peak;
+ items["device_bytes_allocated"] = stats.device_bytes_allocated;
+ items["device_bytes_freed"] = stats.device_bytes_freed;
+#endif
+ return items;
+}
+
+py::str HalAllocator::FormattedStatistics() {
+ // Perform all allocating string manipulation without early exit.
+ iree_string_builder_t builder;
+ iree_string_builder_initialize(iree_allocator_system(), &builder);
+ iree_hal_allocator_statistics_t stats;
+ iree_hal_allocator_query_statistics(raw_ptr(), &stats);
+ auto status = iree_hal_allocator_statistics_format(&stats, &builder);
+ iree_string_view_t view = iree_string_builder_view(&builder);
+ py::str result = py::str(view.data, view.size);
+ iree_string_builder_deinitialize(&builder);
+
+ // Check/raise after all memory alloc/dealloc.
+ CheckApiStatus(status, "unable to format statistics");
+ return result;
+}
+
+py::object HalAllocator::AllocateBufferCopy(
+ int memory_type, int allowed_usage, py::object buffer,
+ std::optional<iree_hal_element_types_t> element_type) {
+ // Request a view of the buffer (use the raw python C API to avoid
+ // some allocation and copying at the pybind level).
+ Py_buffer py_view;
+ // Note that only C-Contiguous ND-arrays are presently supported, so
+ // only request that via PyBUF_ND. Long term, we should consult an
+ // "oracle" in the runtime to determine the precise required format
+ // and set flags accordingly (and fallback/copy on failure).
+ int flags = PyBUF_FORMAT | PyBUF_ND;
+
+ // Acquire the backing buffer and setup RAII release.
+ if (PyObject_GetBuffer(buffer.ptr(), &py_view, flags) != 0) {
+ // The GetBuffer call is required to set an appropriate error.
+ throw py::error_already_set();
+ }
+ PyBufferReleaser py_view_releaser(py_view);
+
+ iree_hal_buffer_t* hal_buffer;
+ CheckApiStatus(
+ iree_hal_allocator_allocate_buffer(
+ raw_ptr(), memory_type, allowed_usage, py_view.len,
+ iree_make_const_byte_span(py_view.buf, py_view.len), &hal_buffer),
+ "Failed to allocate device visible buffer");
+
+ if (!element_type) {
+ return py::cast(HalBuffer::StealFromRawPtr(hal_buffer),
+ py::return_value_policy::move);
+ }
+
+ // Create the buffer_view. (note that numpy shape is ssize_t, so we need to
+ // copy).
+ iree_hal_encoding_type_t encoding_type =
+ IREE_HAL_ENCODING_TYPE_DENSE_ROW_MAJOR;
+ std::vector<iree_hal_dim_t> dims(py_view.ndim);
+ std::copy(py_view.shape, py_view.shape + py_view.ndim, dims.begin());
+ iree_hal_buffer_view_t* hal_buffer_view;
+ CheckApiStatus(
+ iree_hal_buffer_view_create(
+ hal_buffer, dims.data(), dims.size(), *element_type, encoding_type,
+ iree_hal_allocator_host_allocator(raw_ptr()), &hal_buffer_view),
+ "Error allocating buffer_view");
+ iree_hal_buffer_release(hal_buffer);
+
+ return py::cast(HalBufferView::StealFromRawPtr(hal_buffer_view),
+ py::return_value_policy::move);
+}
+
+//------------------------------------------------------------------------------
+// HalBuffer
+//------------------------------------------------------------------------------
+
+namespace {
+
+void AppendHalBufferRepr(iree_hal_buffer_t* buffer, std::string& repr) {
+ repr.append(std::to_string(iree_hal_buffer_byte_length(buffer)));
+ repr.append(" bytes (at offset ");
+ repr.append(std::to_string(iree_hal_buffer_byte_offset(buffer)));
+ repr.append(" into ");
+ repr.append(std::to_string(iree_hal_buffer_allocation_size(buffer)));
+ repr.append("), memory_type=");
+
+ // Memory type.
+ iree_bitfield_string_temp_t tmp;
+ iree_string_view_t sv;
+ sv = iree_hal_memory_type_format(iree_hal_buffer_memory_type(buffer), &tmp);
+ repr.append(sv.data, sv.size);
+
+ // Allowed access.
+ repr.append(", allowed_access=");
+ sv = iree_hal_memory_access_format(iree_hal_buffer_allowed_access(buffer),
+ &tmp);
+ repr.append(sv.data, sv.size);
+
+ // Allowed usage.
+ repr.append(", allowed_usage=");
+ sv =
+ iree_hal_buffer_usage_format(iree_hal_buffer_allowed_usage(buffer), &tmp);
+ repr.append(sv.data, sv.size);
+}
+
+} // namespace
+
+py::str HalBuffer::Repr() {
+ std::string repr("<HalBuffer ");
+ AppendHalBufferRepr(raw_ptr(), repr);
+ repr.append(">");
+ return py::str(repr);
+}
+
+//------------------------------------------------------------------------------
+// HalBufferView
+//------------------------------------------------------------------------------
+
+py::str HalBufferView::Repr() {
+ std::string repr("<HalBufferView (");
+
+ // Shape.
+ iree_host_size_t rank = iree_hal_buffer_view_shape_rank(raw_ptr());
+ for (iree_host_size_t i = 0; i < rank; ++i) {
+ if (i > 0) {
+ repr.append(", ");
+ }
+ repr.append(std::to_string(iree_hal_buffer_view_shape_dim(raw_ptr(), i)));
+ }
+ repr.append(")");
+
+ // Element type.
+ repr.append(", element_type=0x");
+ auto element_type = iree_hal_buffer_view_element_type(raw_ptr());
+ repr.append(ToHexString(static_cast<uint32_t>(element_type)));
+
+ repr.append(", ");
+ AppendHalBufferRepr(iree_hal_buffer_view_buffer(raw_ptr()), repr);
+ repr.append(">");
+ return py::str(repr);
+}
+
//------------------------------------------------------------------------------
// HalDriver
//------------------------------------------------------------------------------
@@ -39,7 +223,7 @@
{driver_name.data(), driver_name.size()},
iree_allocator_system(), &driver),
"Error creating driver");
- return HalDriver::CreateRetained(driver);
+ return HalDriver::StealFromRawPtr(driver);
}
HalDevice HalDriver::CreateDefaultDevice() {
@@ -47,9 +231,72 @@
CheckApiStatus(iree_hal_driver_create_default_device(
raw_ptr(), iree_allocator_system(), &device),
"Error creating default device");
- return HalDevice::CreateRetained(device);
+ return HalDevice::StealFromRawPtr(device);
}
+//------------------------------------------------------------------------------
+// Enum helpers
+//------------------------------------------------------------------------------
+
+namespace {
+
+py::object MapElementTypeToDType(iree_hal_element_type_t element_type) {
+ // See: https://docs.python.org/3/c-api/arg.html#numbers
+ // TODO: Handle dtypes that do not map to a code (i.e. fp16).
+ const char* dtype_code;
+ switch (element_type) {
+ case IREE_HAL_ELEMENT_TYPE_INT_8:
+ case IREE_HAL_ELEMENT_TYPE_SINT_8:
+ dtype_code = "b";
+ break;
+ case IREE_HAL_ELEMENT_TYPE_UINT_8:
+ dtype_code = "B";
+ break;
+ case IREE_HAL_ELEMENT_TYPE_INT_16:
+ case IREE_HAL_ELEMENT_TYPE_SINT_16:
+ dtype_code = "h";
+ break;
+ case IREE_HAL_ELEMENT_TYPE_UINT_16:
+ dtype_code = "H";
+ break;
+ case IREE_HAL_ELEMENT_TYPE_INT_32:
+ case IREE_HAL_ELEMENT_TYPE_SINT_32:
+ dtype_code = "i";
+ break;
+ case IREE_HAL_ELEMENT_TYPE_UINT_32:
+ dtype_code = "I";
+ break;
+ case IREE_HAL_ELEMENT_TYPE_INT_64:
+ case IREE_HAL_ELEMENT_TYPE_SINT_64:
+ dtype_code = "l";
+ break;
+ case IREE_HAL_ELEMENT_TYPE_UINT_64:
+ dtype_code = "L";
+ break;
+ case IREE_HAL_ELEMENT_TYPE_FLOAT_32:
+ dtype_code = "f";
+ break;
+ case IREE_HAL_ELEMENT_TYPE_FLOAT_64:
+ dtype_code = "d";
+ break;
+ case IREE_HAL_ELEMENT_TYPE_VALUE(IREE_HAL_NUMERICAL_TYPE_INTEGER, 1):
+ // Due to layering issues it is not uncommon to get i1 buffer views
+ // and we just silently promote them to i8 since that is what they are.
+ // Really i1 should not exist at this boundary.
+ dtype_code = "b";
+ break;
+ default:
+ throw RaiseValueError("Unsupported VM Buffer -> numpy dtype mapping");
+ }
+ return py::dtype(dtype_code);
+}
+
+} // namespace
+
+//------------------------------------------------------------------------------
+// Bindings
+//------------------------------------------------------------------------------
+
void SetupHalBindings(pybind11::module m) {
// Enums.
py::enum_<enum iree_hal_memory_type_bits_t>(m, "MemoryType")
@@ -61,7 +308,32 @@
.value("HOST_LOCAL", IREE_HAL_MEMORY_TYPE_HOST_LOCAL)
.value("DEVICE_VISIBLE", IREE_HAL_MEMORY_TYPE_HOST_VISIBLE)
.value("DEVICE_LOCAL", IREE_HAL_MEMORY_TYPE_DEVICE_LOCAL)
- .export_values();
+ .export_values()
+ .def("__or__",
+ [](enum iree_hal_memory_type_bits_t self,
+ enum iree_hal_memory_type_bits_t other) { return self | other; })
+ .def("__and__",
+ [](enum iree_hal_memory_type_bits_t self,
+ enum iree_hal_memory_type_bits_t other) { return self & other; });
+
+ py::enum_<enum iree_hal_buffer_compatibility_bits_t>(m, "BufferCompatibility")
+ .value("NONE", IREE_HAL_BUFFER_COMPATIBILITY_NONE)
+ .value("ALLOCATABLE", IREE_HAL_BUFFER_COMPATIBILITY_ALLOCATABLE)
+ .value("IMPORTABLE", IREE_HAL_BUFFER_COMPATIBILITY_IMPORTABLE)
+ .value("EXPORTABLE", IREE_HAL_BUFFER_COMPATIBILITY_EXPORTABLE)
+ .value("QUEUE_TRANSFER", IREE_HAL_BUFFER_COMPATIBILITY_QUEUE_TRANSFER)
+ .value("QUEUE_DISPATCH", IREE_HAL_BUFFER_COMPATIBILITY_QUEUE_DISPATCH)
+ .export_values()
+ .def("__or__",
+ [](enum iree_hal_buffer_compatibility_bits_t self,
+ enum iree_hal_buffer_compatibility_bits_t other) {
+ return self | other;
+ })
+ .def("__and__", [](enum iree_hal_buffer_compatibility_bits_t self,
+ enum iree_hal_buffer_compatibility_bits_t other) {
+ return self & other;
+ });
+
py::enum_<enum iree_hal_buffer_usage_bits_t>(m, "BufferUsage")
.value("NONE", IREE_HAL_BUFFER_USAGE_NONE)
.value("CONSTANT", IREE_HAL_BUFFER_USAGE_CONSTANT)
@@ -69,7 +341,15 @@
.value("MAPPING", IREE_HAL_BUFFER_USAGE_MAPPING)
.value("DISPATCH", IREE_HAL_BUFFER_USAGE_DISPATCH)
.value("ALL", IREE_HAL_BUFFER_USAGE_ALL)
- .export_values();
+ .export_values()
+ .def("__or__",
+ [](enum iree_hal_buffer_usage_bits_t self,
+ enum iree_hal_buffer_usage_bits_t other) { return self | other; })
+ .def("__and__", [](enum iree_hal_buffer_usage_bits_t self,
+ enum iree_hal_buffer_usage_bits_t other) {
+ return self & other;
+ });
+
py::enum_<enum iree_hal_memory_access_bits_t>(m, "MemoryAccess")
.value("NONE", IREE_HAL_MEMORY_ACCESS_NONE)
.value("READ", IREE_HAL_MEMORY_ACCESS_READ)
@@ -77,7 +357,16 @@
.value("DISCARD", IREE_HAL_MEMORY_ACCESS_DISCARD)
.value("DISCARD_WRITE", IREE_HAL_MEMORY_ACCESS_DISCARD_WRITE)
.value("ALL", IREE_HAL_MEMORY_ACCESS_ALL)
- .export_values();
+ .export_values()
+ .def(
+ "__or__",
+ [](enum iree_hal_memory_access_bits_t self,
+ enum iree_hal_memory_access_bits_t other) { return self | other; })
+ .def("__and__", [](enum iree_hal_memory_access_bits_t self,
+ enum iree_hal_memory_access_bits_t other) {
+ return self & other;
+ });
+
py::enum_<enum iree_hal_element_types_t>(m, "HalElementType")
.value("NONE", IREE_HAL_ELEMENT_TYPE_NONE)
.value("OPAQUE_8", IREE_HAL_ELEMENT_TYPE_OPAQUE_8)
@@ -106,24 +395,107 @@
.value("BOOL_8",
static_cast<iree_hal_element_types_t>(IREE_HAL_ELEMENT_TYPE_VALUE(
IREE_HAL_NUMERICAL_TYPE_INTEGER_SIGNED, 1)))
- .export_values();
+ .export_values()
+ .def_static("map_to_dtype", &MapElementTypeToDType);
- py::class_<HalDevice>(m, "HalDevice");
+ py::class_<HalDevice>(m, "HalDevice")
+ .def_property_readonly("allocator", [](HalDevice& self) {
+ return HalAllocator::BorrowFromRawPtr(self.allocator());
+ });
+
py::class_<HalDriver>(m, "HalDriver")
.def_static("query", &HalDriver::Query)
.def_static("create", &HalDriver::Create, py::arg("driver_name"))
.def("create_default_device", &HalDriver::CreateDefaultDevice);
- py::class_<HalShape>(m, "Shape").def(py::init(&HalShape::FromIntVector));
- py::class_<HalBufferView>(m, "BufferView")
- .def("map", HalMappedMemory::Create);
- py::class_<HalMappedMemory>(m, "MappedMemory", py::buffer_protocol())
- .def_buffer(&HalMappedMemory::ToBufferInfo);
+ py::class_<HalAllocator>(m, "HalAllocator")
+ .def("trim",
+ [](HalAllocator& self) {
+ CheckApiStatus(iree_hal_allocator_trim(self.raw_ptr()),
+ "Error trim()'ing HAL allocator");
+ })
+ .def_property_readonly(
+ "has_statistics",
+ [](HalAllocator& self) -> bool { return IREE_STATISTICS_ENABLE; })
+ .def_property_readonly("statistics", &HalAllocator::QueryStatistics)
+ .def_property_readonly("formatted_statistics",
+ &HalAllocator::FormattedStatistics)
+ .def(
+ "query_buffer_compatibility",
+ [](HalAllocator& self, int memory_type, int allowed_usage,
+ int intended_usage, iree_device_size_t allocation_size) -> int {
+ return iree_hal_allocator_query_buffer_compatibility(
+ self.raw_ptr(), memory_type, allowed_usage, intended_usage,
+ allocation_size);
+ },
+ py::arg("memory_type"), py::arg("allowed_usage"),
+ py::arg("intended_usage"), py::arg("allocation_size"))
+ .def(
+ "allocate_buffer",
+ [](HalAllocator& self, int memory_type, int allowed_usage,
+ iree_host_size_t allocation_size) {
+ iree_hal_buffer_t* buffer;
+ iree_const_byte_span_t empty_initial_data{nullptr, 0};
+
+ CheckApiStatus(iree_hal_allocator_allocate_buffer(
+ self.raw_ptr(), memory_type, allowed_usage,
+ allocation_size, empty_initial_data, &buffer),
+ "could not allocate buffer");
+ return HalBuffer::StealFromRawPtr(buffer);
+ },
+ py::arg("memory_type"), py::arg("allowed_usage"),
+ py::arg("allocation_size"),
+ "Allocates a new buffer with requested characteristics (does not "
+ "initialize with specific data).")
+ .def("allocate_buffer_copy", &HalAllocator::AllocateBufferCopy,
+ py::arg("memory_type"), py::arg("allowed_usage"), py::arg("buffer"),
+ py::arg("element_type") = py::none(),
+ "Allocates a new buffer and initializes it from a Python buffer "
+ "object. If an element type is specified, wraps in a BufferView "
+ "matching the characteristics of the Python buffer. The format is "
+ "requested as ND/C-Contiguous, which may incur copies if not "
+ "already in that format.");
+
py::class_<HalBuffer>(m, "HalBuffer")
.def("fill_zero", &HalBuffer::FillZero, py::arg("byte_offset"),
py::arg("byte_length"))
.def("create_view", &HalBuffer::CreateView, py::arg("shape"),
- py::arg("element_size"));
+ py::arg("element_size"))
+ .def("__repr__", &HalBuffer::Repr);
+
+ py::class_<HalBufferView>(m, "HalBufferView")
+ .def("map", HalMappedMemory::Create)
+ .def_property_readonly(
+ "shape",
+ [](HalBufferView& self) {
+ iree_host_size_t rank =
+ iree_hal_buffer_view_shape_rank(self.raw_ptr());
+ auto* dims = iree_hal_buffer_view_shape_dims(self.raw_ptr());
+ py::list result;
+ for (iree_host_size_t i = 0; i < rank; ++i) {
+ result.append(dims[i]);
+ }
+ return result;
+ })
+ .def_property_readonly(
+ "element_type",
+ [](HalBufferView& self) {
+ return iree_hal_buffer_view_element_type(self.raw_ptr());
+ })
+ .def("__repr__", &HalBufferView::Repr);
+
+ py::class_<HalMappedMemory>(m, "MappedMemory", py::buffer_protocol())
+ .def_buffer(&HalMappedMemory::ToBufferInfo)
+ .def("asarray",
+ [](HalMappedMemory& self, std::vector<iree_host_size_t> shape,
+ py::object dtype) {
+ py::object py_mapped_memory = py::cast(self);
+ return py::array(std::move(dtype), shape,
+ self.mapped_memory().contents.data,
+ std::move(py_mapped_memory) /* base */);
+ });
+
+ py::class_<HalShape>(m, "Shape").def(py::init(&HalShape::FromIntVector));
}
} // namespace python
diff --git a/bindings/python/iree/runtime/hal.h b/bindings/python/iree/runtime/hal.h
index 72debbb..e0d93b6 100644
--- a/bindings/python/iree/runtime/hal.h
+++ b/bindings/python/iree/runtime/hal.h
@@ -33,6 +33,14 @@
};
template <>
+struct ApiPtrAdapter<iree_hal_allocator_t> {
+ static void Retain(iree_hal_allocator_t* d) { iree_hal_allocator_retain(d); }
+ static void Release(iree_hal_allocator_t* d) {
+ iree_hal_allocator_release(d);
+ }
+};
+
+template <>
struct ApiPtrAdapter<iree_hal_buffer_t> {
static void Retain(iree_hal_buffer_t* b) { iree_hal_buffer_retain(b); }
static void Release(iree_hal_buffer_t* b) { iree_hal_buffer_release(b); }
@@ -67,6 +75,16 @@
HalDevice CreateDefaultDevice();
};
+class HalAllocator : public ApiRefCounted<HalAllocator, iree_hal_allocator_t> {
+ public:
+ py::dict QueryStatistics();
+ py::str FormattedStatistics();
+
+ py::object AllocateBufferCopy(
+ int memory_type, int allowed_usage, py::object buffer,
+ std::optional<iree_hal_element_types_t> element_type);
+};
+
struct HalShape {
public:
static HalShape FromIntVector(std::vector<int32_t> indices) {
@@ -81,6 +99,7 @@
class HalBufferView
: public ApiRefCounted<HalBufferView, iree_hal_buffer_view_t> {
public:
+ py::str Repr();
};
class HalBuffer : public ApiRefCounted<HalBuffer, iree_hal_buffer_t> {
@@ -106,8 +125,10 @@
raw_ptr(), shape.s.data(), shape.s.size(), element_type,
encoding_type, iree_allocator_system(), &bv),
"Error creating buffer view");
- return HalBufferView::CreateRetained(bv);
+ return HalBufferView::StealFromRawPtr(bv);
}
+
+ py::str Repr();
};
// Wrapper around an iree_hal_buffer_mapping_t and iree_hal_buffer_view_t
@@ -168,6 +189,8 @@
dims, strides);
}
+ iree_hal_buffer_mapping_t& mapped_memory() { return mapped_memory_; }
+
private:
iree_hal_buffer_mapping_t mapped_memory_ = {{0}};
iree_hal_buffer_view_t* bv_ = nullptr;
diff --git a/bindings/python/iree/runtime/hal_test.py b/bindings/python/iree/runtime/hal_test.py
index 8ae2dbc..2951461 100644
--- a/bindings/python/iree/runtime/hal_test.py
+++ b/bindings/python/iree/runtime/hal_test.py
@@ -4,18 +4,129 @@
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-from absl import logging
-from absl.testing import absltest
import iree.runtime
import numpy as np
+import unittest
-class HalTest(absltest.TestCase):
+class NonDeviceHalTest(unittest.TestCase):
def testEnums(self):
- logging.info("MemoryType: %s", iree.runtime.MemoryType)
- logging.info("HOST_VISIBLE: %s", int(iree.runtime.MemoryType.HOST_VISIBLE))
+ print("MemoryType:", iree.runtime.MemoryType)
+ print("HOST_VISIBLE:", int(iree.runtime.MemoryType.HOST_VISIBLE))
+
+ # Enum and/or operations on BufferCompatibility.
+ self.assertEqual(
+ iree.runtime.BufferCompatibility.IMPORTABLE |
+ iree.runtime.BufferCompatibility.EXPORTABLE,
+ int(iree.runtime.BufferCompatibility.IMPORTABLE) |
+ int(iree.runtime.BufferCompatibility.EXPORTABLE))
+ self.assertEqual(
+ iree.runtime.BufferCompatibility.EXPORTABLE &
+ iree.runtime.BufferCompatibility.EXPORTABLE,
+ int(iree.runtime.BufferCompatibility.EXPORTABLE))
+
+ # Enum and/or operations on BufferUsage.
+ self.assertEqual(
+ iree.runtime.BufferUsage.CONSTANT | iree.runtime.BufferUsage.TRANSFER,
+ int(iree.runtime.BufferUsage.CONSTANT) |
+ int(iree.runtime.BufferUsage.TRANSFER))
+ self.assertEqual(
+ iree.runtime.BufferUsage.CONSTANT & iree.runtime.BufferUsage.CONSTANT,
+ int(iree.runtime.BufferUsage.CONSTANT))
+
+ # Enum and/or operations on MemoryAccess.
+ self.assertEqual(
+ iree.runtime.MemoryAccess.READ | iree.runtime.MemoryAccess.WRITE,
+ int(iree.runtime.MemoryAccess.READ) |
+ int(iree.runtime.MemoryAccess.WRITE))
+ self.assertEqual(
+ iree.runtime.MemoryAccess.ALL & iree.runtime.MemoryAccess.READ,
+ int(iree.runtime.MemoryAccess.READ))
+
+ # Enum and/or operations on MemoryType.
+ self.assertEqual(
+ iree.runtime.MemoryType.TRANSIENT |
+ iree.runtime.MemoryType.HOST_VISIBLE,
+ int(iree.runtime.MemoryType.TRANSIENT) |
+ int(iree.runtime.MemoryType.HOST_VISIBLE))
+ self.assertEqual(
+ iree.runtime.MemoryType.TRANSIENT & iree.runtime.MemoryType.TRANSIENT,
+ int(iree.runtime.MemoryType.TRANSIENT))
+
+
+class DeviceHalTest(unittest.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.driver = iree.runtime.HalDriver.create("vmvx")
+ self.device = self.driver.create_default_device()
+ self.allocator = self.device.allocator
+
+ def testTrim(self):
+ self.allocator.trim()
+ # Just running is sufficient.
+
+ def testStatistics(self):
+ stats_dict = self.allocator.statistics
+ stats_str = self.allocator.formatted_statistics
+ if self.allocator.has_statistics:
+ self.assertIn("host_bytes_peak", stats_dict)
+ self.assertIn("host_bytes_allocated", stats_dict)
+ self.assertIn("host_bytes_freed", stats_dict)
+ self.assertIn("device_bytes_peak", stats_dict)
+ self.assertIn("device_bytes_allocated", stats_dict)
+ self.assertIn("device_bytes_freed", stats_dict)
+ self.assertIn("HOST_LOCAL", stats_str)
+
+ def testQueryCompatibility(self):
+ compat = self.allocator.query_buffer_compatibility(
+ memory_type=iree.runtime.MemoryType.DEVICE_LOCAL,
+ allowed_usage=iree.runtime.BufferUsage.CONSTANT,
+ intended_usage=iree.runtime.BufferUsage.CONSTANT |
+ iree.runtime.BufferUsage.TRANSFER,
+ allocation_size=1024)
+ print("COMPAT:", compat)
+ self.assertTrue(
+ bool(compat & int(iree.runtime.BufferCompatibility.ALLOCATABLE)),
+ "should be allocatable")
+ self.assertTrue(
+ bool(compat & int(iree.runtime.BufferCompatibility.IMPORTABLE)),
+ "should be importable")
+ self.assertTrue(
+ bool(compat & int(iree.runtime.BufferCompatibility.EXPORTABLE)),
+ "should be exportable")
+
+ def testAllocateBuffer(self):
+ buffer = self.allocator.allocate_buffer(
+ memory_type=iree.runtime.MemoryType.DEVICE_LOCAL,
+ allowed_usage=iree.runtime.BufferUsage.CONSTANT,
+ allocation_size=13)
+ print("BUFFER:", buffer)
+
+ def testAllocateBufferCopy(self):
+ ary = np.zeros([3, 4], dtype=np.int32) + 2
+ buffer = self.allocator.allocate_buffer_copy(
+ memory_type=iree.runtime.MemoryType.DEVICE_LOCAL,
+ allowed_usage=iree.runtime.BufferUsage.CONSTANT,
+ buffer=ary)
+ self.assertEqual(
+ repr(buffer),
+ "<HalBuffer 48 bytes (at offset 0 into 48), memory_type=DEVICE_LOCAL|HOST_VISIBLE, allowed_access=ALL, allowed_usage=CONSTANT|TRANSFER|MAPPING>"
+ )
+
+ def testAllocateBufferViewCopy(self):
+ ary = np.zeros([3, 4], dtype=np.int32) + 2
+ buffer = self.allocator.allocate_buffer_copy(
+ memory_type=iree.runtime.MemoryType.DEVICE_LOCAL,
+ allowed_usage=iree.runtime.BufferUsage.CONSTANT,
+ buffer=ary,
+ element_type=iree.runtime.HalElementType.SINT_32)
+ self.assertEqual(
+ repr(buffer),
+ "<HalBufferView (3, 4), element_type=0x20000011, 48 bytes (at offset 0 into 48), memory_type=DEVICE_LOCAL|HOST_VISIBLE, allowed_access=ALL, allowed_usage=CONSTANT|TRANSFER|MAPPING>"
+ )
if __name__ == "__main__":
- absltest.main()
+ unittest.main()
diff --git a/bindings/python/iree/runtime/system_api_test.py b/bindings/python/iree/runtime/system_api_test.py
index eeeab80..0e7d32d 100644
--- a/bindings/python/iree/runtime/system_api_test.py
+++ b/bindings/python/iree/runtime/system_api_test.py
@@ -85,6 +85,20 @@
results = f(arg0, arg1)
np.testing.assert_allclose(results, [4., 10., 18., 28.])
+ def test_chained_invoke(self):
+ # This ensures that everything works if DeviceArrays are returned
+ # and input to functions.
+ ctx = iree.runtime.SystemContext()
+ self.assertTrue(ctx.is_dynamic)
+ ctx.add_vm_module(create_simple_mul_module())
+ self.assertEqual(ctx.modules.arithmetic.name, "arithmetic")
+ f = ctx.modules.arithmetic["simple_mul"]
+ arg0 = np.array([1., 2., 3., 4.], dtype=np.float32)
+ arg1 = np.array([4., 5., 6., 7.], dtype=np.float32)
+ results = f(arg0, arg1)
+ results2 = f(results, results)
+ np.testing.assert_allclose(results2, [16., 100., 324., 784.])
+
def test_tracing_explicit(self):
with tempfile.TemporaryDirectory() as temp_dir:
tracer = iree.runtime.Tracer(temp_dir)
@@ -119,6 +133,7 @@
arg0 = np.array([1., 2., 3., 4.], dtype=np.float32)
arg1 = np.array([4., 5., 6., 7.], dtype=np.float32)
results = arithmetic.simple_mul(arg0, arg1)
+ print("SIMPLE_MUL RESULTS:", results)
np.testing.assert_allclose(results, [4., 10., 18., 28.])
diff --git a/bindings/python/iree/runtime/vm.cc b/bindings/python/iree/runtime/vm.cc
index 15a6365..0e35083 100644
--- a/bindings/python/iree/runtime/vm.cc
+++ b/bindings/python/iree/runtime/vm.cc
@@ -24,7 +24,7 @@
CheckApiStatus(iree_hal_module_create(device->raw_ptr(),
iree_allocator_system(), &module),
"Error creating hal module");
- return VmModule::CreateRetained(module);
+ return VmModule::StealFromRawPtr(module);
}
// RAII wrapper for a Py_buffer which calls PyBuffer_Release when it goes
@@ -66,7 +66,7 @@
iree_vm_instance_t* instance;
auto status = iree_vm_instance_create(iree_allocator_system(), &instance);
CheckApiStatus(status, "Error creating instance");
- return VmInstance::CreateRetained(instance);
+ return VmInstance::StealFromRawPtr(instance);
}
//------------------------------------------------------------------------------
@@ -96,7 +96,7 @@
}
IREE_CHECK(context);
- return VmContext::CreateRetained(context);
+ return VmContext::StealFromRawPtr(context);
}
void VmContext::RegisterModules(std::vector<VmModule*> modules) {
@@ -148,7 +148,7 @@
}
CheckApiStatus(status, "Error creating vm module from flatbuffer");
- auto py_module = VmModule::CreateRetained(module);
+ auto py_module = VmModule::StealFromRawPtr(module);
py_module.stashed_flatbuffer_blob = flatbuffer_blob_object;
return py_module;
}
@@ -190,68 +190,9 @@
iree_vm_list_push_ref_move(raw_ptr(), &retained);
}
-void VmVariantList::PushBufferView(HalDevice& device,
- py::object py_buffer_object,
- iree_hal_element_type_t element_type) {
- // Request a view of the buffer (use the raw python C API to avoid some
- // allocation and copying at the pybind level).
- Py_buffer py_view;
- // Note that only C-Contiguous ND-arrays are presently supported, so
- // only request that via PyBUF_ND. Long term, we should consult an
- // "oracle" in the runtime to determine the precise required format and
- // set flags accordingly (and fallback/copy on failure).
- int flags = PyBUF_FORMAT | PyBUF_ND;
-
- // Acquire the backing buffer and setup RAII release.
- if (PyObject_GetBuffer(py_buffer_object.ptr(), &py_view, flags) != 0) {
- // The GetBuffer call is required to set an appropriate error.
- throw py::error_already_set();
- }
- PyBufferReleaser py_view_releaser(py_view);
-
- // Whether the py object needs to be retained with the argument.
- // Should be set to true if directly mapping, false if copied.
- bool depends_on_pyobject = false;
-
- // Allocate a HalBuffer.
- // This is hard-coded to C-contiguous right now.
- // TODO(laurenzo): Expand to other layouts as needed.
- // TODO(laurenzo): Wrap and retain original buffer (depends_on_pyobject=true).
- iree_hal_buffer_t* raw_buffer;
- CheckApiStatus(
- iree_hal_allocator_allocate_buffer(
- device.allocator(),
- static_cast<iree_hal_memory_type_t>(
- IREE_HAL_MEMORY_TYPE_HOST_LOCAL |
- IREE_HAL_MEMORY_TYPE_DEVICE_VISIBLE),
- IREE_HAL_BUFFER_USAGE_ALL, py_view.len,
- iree_make_const_byte_span(py_view.buf, py_view.len), &raw_buffer),
- "Failed to allocate device visible buffer");
-
- // Only capture the reference to the exporting object (incrementing it)
- // once guaranteed successful.
- if (depends_on_pyobject) {
- // Note for future implementation: there needs to be a place to stash
- // references to be kept alive which back a buffer. This is likely an
- // additional bag of refs returned from this function, which can then
- // be attached to an invocation.
- throw RaisePyError(PyExc_NotImplementedError,
- "Dependent buffer arguments not implemented");
- }
-
- iree_hal_encoding_type_t encoding_type =
- IREE_HAL_ENCODING_TYPE_DENSE_ROW_MAJOR;
-
- // Create the buffer_view. (note that numpy shape is ssize_t)
- std::vector<int> dims(py_view.ndim);
- std::copy(py_view.shape, py_view.shape + py_view.ndim, dims.begin());
- iree_hal_buffer_view_t* buffer_view;
- CheckApiStatus(iree_hal_buffer_view_create(
- raw_buffer, dims.data(), dims.size(), element_type,
- encoding_type, iree_allocator_system(), &buffer_view),
- "Error allocating buffer_view");
- iree_hal_buffer_release(raw_buffer);
- iree_vm_ref_t buffer_view_ref = iree_hal_buffer_view_move_ref(buffer_view);
+void VmVariantList::PushBufferView(HalBufferView& buffer_view) {
+ iree_vm_ref_t buffer_view_ref =
+ iree_hal_buffer_view_retain_ref(buffer_view.raw_ptr());
CheckApiStatus(iree_vm_list_push_ref_move(raw_ptr(), &buffer_view_ref),
"Error moving buffer view");
}
@@ -296,7 +237,7 @@
if (iree_vm_list_isa(v.ref)) {
return GetAsList(index);
} else if (iree_hal_buffer_view_isa(v.ref)) {
- return GetAsNdarray(index);
+ return GetAsBufferView(index);
}
}
@@ -407,7 +348,7 @@
throw RaiseValueError("Unsupported VM to Python Type Conversion");
}
-py::object VmVariantList::GetAsNdarray(int index) {
+py::object VmVariantList::GetAsBufferView(int index) {
iree_vm_variant_t v = iree_vm_variant_empty();
CheckApiStatus(iree_vm_list_get_variant(raw_ptr(), index, &v),
"Could not access list element");
@@ -415,94 +356,8 @@
if (!buffer_view) {
throw RaiseValueError("Could not deref result buffer view (wrong type?)");
}
- iree_hal_buffer_t* raw_buffer = iree_hal_buffer_view_buffer(buffer_view);
- if (!raw_buffer) {
- throw RaiseValueError("Could not deref result buffer (wrong type?)");
- }
- HalBuffer buffer = HalBuffer::RetainAndCreate(raw_buffer);
-
- // Extract dims from the buffer view.
- size_t rank = 0;
- std::vector<int32_t> dims(6);
- iree_status_t status = iree_hal_buffer_view_shape(
- buffer_view, dims.capacity(), dims.data(), &rank);
- if (iree_status_is_out_of_range(status)) {
- dims.resize(rank);
- status = iree_hal_buffer_view_shape(buffer_view, dims.capacity(),
- dims.data(), &rank);
- }
- CheckApiStatus(status, "Error extracting shape");
- dims.resize(rank);
-
- // Convert element type to dtype.
- iree_hal_element_type_t element_type =
- iree_hal_buffer_view_element_type(buffer_view);
- // See: https://docs.python.org/3/c-api/arg.html#numbers
- // TODO: Handle dtypes that do not map to a code (i.e. fp16).
- const char* dtype_code;
- switch (element_type) {
- case IREE_HAL_ELEMENT_TYPE_INT_8:
- case IREE_HAL_ELEMENT_TYPE_SINT_8:
- dtype_code = "b";
- break;
- case IREE_HAL_ELEMENT_TYPE_UINT_8:
- dtype_code = "B";
- break;
- case IREE_HAL_ELEMENT_TYPE_INT_16:
- case IREE_HAL_ELEMENT_TYPE_SINT_16:
- dtype_code = "h";
- break;
- case IREE_HAL_ELEMENT_TYPE_UINT_16:
- dtype_code = "H";
- break;
- case IREE_HAL_ELEMENT_TYPE_INT_32:
- case IREE_HAL_ELEMENT_TYPE_SINT_32:
- dtype_code = "i";
- break;
- case IREE_HAL_ELEMENT_TYPE_UINT_32:
- dtype_code = "I";
- break;
- case IREE_HAL_ELEMENT_TYPE_INT_64:
- case IREE_HAL_ELEMENT_TYPE_SINT_64:
- dtype_code = "l";
- break;
- case IREE_HAL_ELEMENT_TYPE_UINT_64:
- dtype_code = "L";
- break;
- case IREE_HAL_ELEMENT_TYPE_FLOAT_32:
- dtype_code = "f";
- break;
- case IREE_HAL_ELEMENT_TYPE_FLOAT_64:
- dtype_code = "d";
- break;
- case IREE_HAL_ELEMENT_TYPE_VALUE(IREE_HAL_NUMERICAL_TYPE_INTEGER, 1):
- // Due to layering issues it is not uncommon to get i1 buffer views
- // and we just silently promote them to i8 since that is what they are.
- // Really i1 should not exist at this boundary.
- dtype_code = "b";
- break;
- default:
- throw RaiseValueError("Unsupported VM Buffer -> numpy dtype mapping");
- }
- auto dtype = py::dtype(dtype_code);
-
- // Map memory.
- iree_device_size_t byte_length =
- iree_hal_buffer_byte_length(buffer.raw_ptr());
- iree_hal_buffer_mapping_t mapped_memory = {{0}};
- CheckApiStatus(iree_hal_buffer_map_range(
- buffer.raw_ptr(), IREE_HAL_MAPPING_MODE_SCOPED,
- IREE_HAL_MEMORY_ACCESS_READ, 0 /* element_offset */,
- byte_length, &mapped_memory),
- "Could not map memory");
-
- // Turn the mapping into a python object that retains until the array is
- // destroyed.
- HalMappedMemory hal_mapped_memory(mapped_memory, buffer_view);
- py::object py_mapped_memory = py::cast(
- std::move(hal_mapped_memory), py::return_value_policy::take_ownership);
- return py::array(std::move(dtype), dims, mapped_memory.contents.data,
- std::move(py_mapped_memory) /* base */);
+ return py::cast(HalBufferView::BorrowFromRawPtr(buffer_view),
+ py::return_value_policy::move);
}
namespace {
@@ -635,7 +490,7 @@
.def(py::init(&VmVariantList::Create))
.def_property_readonly("size", &VmVariantList::size)
.def("__len__", &VmVariantList::size)
- .def("get_as_ndarray", &VmVariantList::GetAsNdarray)
+ .def("get_as_buffer_view", &VmVariantList::GetAsBufferView)
.def("get_as_list", &VmVariantList::GetAsList)
.def("get_variant", &VmVariantList::GetVariant)
.def("get_serialized_trace_value",
diff --git a/bindings/python/iree/runtime/vm.h b/bindings/python/iree/runtime/vm.h
index caf69db..e9c3db7 100644
--- a/bindings/python/iree/runtime/vm.h
+++ b/bindings/python/iree/runtime/vm.h
@@ -94,10 +94,9 @@
void PushFloat(double fvalue);
void PushInt(int64_t ivalue);
void PushList(VmVariantList& other);
- void PushBufferView(HalDevice& device, py::object py_buffer_object,
- iree_hal_element_type_t element_type);
+ void PushBufferView(HalBufferView& buffer_view);
py::object GetAsList(int index);
- py::object GetAsNdarray(int index);
+ py::object GetAsBufferView(int index);
py::object GetVariant(int index);
py::object GetAsSerializedTraceValue(int index);
diff --git a/bindings/python/iree/runtime/vm_test.py b/bindings/python/iree/runtime/vm_test.py
index 41fac47..6c6b069 100644
--- a/bindings/python/iree/runtime/vm_test.py
+++ b/bindings/python/iree/runtime/vm_test.py
@@ -94,11 +94,20 @@
# TODO: Unimplemented: (np.float16, ET.FLOAT_16)
lst = iree.runtime.VmVariantList(5)
ary1 = np.asarray([1, 2, 3, 4], dtype=dt)
- lst.push_buffer_view(self.device, ary1, et)
- ary2 = lst.get_as_ndarray(0)
+ bv1 = self.device.allocator.allocate_buffer_copy(
+ memory_type=iree.runtime.MemoryType.DEVICE_LOCAL |
+ iree.runtime.MemoryType.DEVICE_VISIBLE,
+ allowed_usage=iree.runtime.BufferUsage.ALL,
+ buffer=ary1,
+ element_type=et)
+ lst.push_buffer_view(bv1)
+ ary2 = iree.runtime.DeviceArray(self.device,
+ lst.get_as_buffer_view(0),
+ override_dtype=dt,
+ implicit_host_transfer=True)
np.testing.assert_array_equal(ary1, ary2)
with self.assertRaises(IndexError):
- lst.get_as_ndarray(1)
+ lst.get_as_buffer_view(1)
def test_variant_list_list(self):
lst1 = iree.runtime.VmVariantList(5)
diff --git a/build_tools/bazel/workspace.bzl b/build_tools/bazel/workspace.bzl
index e37c06f..1b81f05 100644
--- a/build_tools/bazel/workspace.bzl
+++ b/build_tools/bazel/workspace.bzl
@@ -87,10 +87,3 @@
build_file = iree_repo_alias + "//:build_tools/third_party/spirv_cross/BUILD.overlay",
path = paths.join(iree_path, "third_party/spirv_cross"),
)
-
- maybe(
- native.new_local_repository,
- name = "cuda",
- build_file = iree_repo_alias + "//:build_tools/third_party/cuda/BUILD.overlay",
- path = paths.join(iree_path, "third_party/cuda"),
- )
diff --git a/build_tools/bazel_to_cmake/bazel_to_cmake_targets.py b/build_tools/bazel_to_cmake/bazel_to_cmake_targets.py
index 3013d7e..5aa92c1 100644
--- a/build_tools/bazel_to_cmake/bazel_to_cmake_targets.py
+++ b/build_tools/bazel_to_cmake/bazel_to_cmake_targets.py
@@ -129,8 +129,6 @@
# Vulkan
"@vulkan_headers": ["Vulkan::Headers"],
- # Cuda
- "@cuda//:cuda_headers": ["cuda_headers"],
# The Bazel target maps to the IMPORTED target defined by FindVulkan().
"@vulkan_sdk//:sdk": ["Vulkan::Vulkan"],
# Misc single targets
diff --git a/build_tools/cmake/iree_cc_library.cmake b/build_tools/cmake/iree_cc_library.cmake
index d00ceed..517f449 100644
--- a/build_tools/cmake/iree_cc_library.cmake
+++ b/build_tools/cmake/iree_cc_library.cmake
@@ -56,6 +56,7 @@
# DEPS
# iree::package::fantastic_lib
# )
+
function(iree_cc_library)
cmake_parse_arguments(
_RULE
@@ -76,6 +77,7 @@
# Prefix the library with the package name, so we get: iree_package_name.
iree_package_name(_PACKAGE_NAME)
set(_NAME "${_PACKAGE_NAME}_${_RULE_NAME}")
+ set(_OBJECTS_NAME ${_NAME}.objects)
# Check if this is a header-only library.
# Note that as of February 2019, many popular OS's (for example, Ubuntu
@@ -94,18 +96,55 @@
endif()
if(NOT _RULE_IS_INTERFACE)
+ add_library(${_OBJECTS_NAME} OBJECT)
if(_RULE_SHARED)
- add_library(${_NAME} SHARED "")
+ add_library(${_NAME} SHARED "$<TARGET_OBJECTS:${_OBJECTS_NAME}>")
else()
- add_library(${_NAME} STATIC "")
+ add_library(${_NAME} STATIC "$<TARGET_OBJECTS:${_OBJECTS_NAME}>")
endif()
- target_sources(${_NAME}
+ # Sources get added to the object library.
+ target_sources(${_OBJECTS_NAME}
PRIVATE
${_RULE_SRCS}
${_RULE_TEXTUAL_HDRS}
${_RULE_HDRS}
)
+
+ # Keep track of objects transitively in our special property.
+ set_property(TARGET ${_NAME} PROPERTY
+ INTERFACE_IREE_TRANSITIVE_OBJECTS "$<TARGET_OBJECTS:${_OBJECTS_NAME}>")
+ _iree_cc_library_add_object_deps(${_NAME} ${_RULE_DEPS})
+
+ # We define everything else on the regular rule. However, the object
+ # library needs compiler definition related properties, so we forward them.
+ # We also forward link libraries -- not because the OBJECT libraries do
+ # linking but because they get transitive compile definitions from them.
+ # Yes. This is state of the art.
+ # Note that SYSTEM scope matches here, in the property name and in the
+ # include directories below on the main rule. If ever removing this,
+ # remove it from all places.
+ target_include_directories(${_OBJECTS_NAME} SYSTEM
+ PUBLIC
+ $<TARGET_PROPERTY:${_NAME},INTERFACE_SYSTEM_INCLUDE_DIRECTORIES>
+ )
+ target_include_directories(${_OBJECTS_NAME}
+ PUBLIC
+ $<TARGET_PROPERTY:${_NAME},INTERFACE_INCLUDE_DIRECTORIES>
+ )
+ target_compile_options(${_OBJECTS_NAME}
+ PRIVATE
+ $<TARGET_PROPERTY:${_NAME},COMPILE_OPTIONS>
+ )
+ target_compile_definitions(${_OBJECTS_NAME}
+ PUBLIC
+ $<TARGET_PROPERTY:${_NAME},INTERFACE_COMPILE_DEFINITIONS>
+ )
+ target_link_libraries(${_OBJECTS_NAME}
+ PUBLIC
+ $<TARGET_PROPERTY:${_NAME},INTERFACE_LINK_LIBRARIES>
+ )
+
target_include_directories(${_NAME} SYSTEM
PUBLIC
"$<BUILD_INTERFACE:${IREE_SOURCE_DIR}>"
@@ -139,15 +178,19 @@
# Add all IREE targets to a folder in the IDE for organization.
if(_RULE_PUBLIC)
set_property(TARGET ${_NAME} PROPERTY FOLDER ${IREE_IDE_FOLDER})
+ set_property(TARGET ${_OBJECTS_NAME} PROPERTY FOLDER ${IREE_IDE_FOLDER})
elseif(_RULE_TESTONLY)
set_property(TARGET ${_NAME} PROPERTY FOLDER ${IREE_IDE_FOLDER}/test)
+ set_property(TARGET ${_OBJECTS_NAME} PROPERTY FOLDER ${IREE_IDE_FOLDER}/test)
else()
set_property(TARGET ${_NAME} PROPERTY FOLDER ${IREE_IDE_FOLDER}/internal)
+ set_property(TARGET ${_OBJECTS_NAME} PROPERTY FOLDER ${IREE_IDE_FOLDER}/internal)
endif()
- # INTERFACE libraries can't have the CXX_STANDARD property set.
- set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${IREE_CXX_STANDARD})
- set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON)
+ # INTERFACE libraries can't have the CXX_STANDARD property set so only
+ # set here.
+ set_property(TARGET ${_OBJECTS_NAME} PROPERTY CXX_STANDARD ${IREE_CXX_STANDARD})
+ set_property(TARGET ${_OBJECTS_NAME} PROPERTY CXX_STANDARD_REQUIRED ON)
else()
# Generating header-only library.
add_library(${_NAME} INTERFACE)
@@ -165,6 +208,7 @@
INTERFACE
${_RULE_DEPS}
)
+ _iree_cc_library_add_object_deps(${_NAME} ${_RULE_DEPS})
iree_add_data_dependencies(NAME ${_NAME} DATA ${_RULE_DATA})
target_compile_definitions(${_NAME}
INTERFACE
@@ -185,3 +229,130 @@
add_library(${_PACKAGE_NS} ALIAS ${_NAME})
endif()
endfunction()
+
+# _iree_cc_library_add_object_deps()
+#
+# Helper to add deps to an iree_cc_library. This only operates on the unaliased
+# raw name (i.e. 'iree_vm_vm'), not aliased names (i.e. 'iree::vm').
+#
+# This appends to two properties:
+# INTERFACE_IREE_TRANSITIVE_OBJECTS: Transitive list of all objects from
+# this library and all "iree::" prefixed dependent libraries. This will
+# allow you to create mondo objects for any transtive libraries that are
+# part of IREE, but it will not contain outside.
+# INTERFACE_IREE_TRANSITIVE_OBJECT_LIBS: Transitive list of any dependency
+# targets that are not under teh "iree::" namespace but are encountered
+# in the dependency dag.
+function(_iree_cc_library_add_object_deps name)
+ foreach(dep_target ${ARGN})
+ if(dep_target MATCHES "^iree::")
+ set_property(TARGET ${name} APPEND PROPERTY
+ INTERFACE_IREE_TRANSITIVE_OBJECTS
+ "$<GENEX_EVAL:$<TARGET_PROPERTY:${dep_target},INTERFACE_IREE_TRANSITIVE_OBJECTS>>"
+ )
+ set_property(TARGET ${name} APPEND PROPERTY
+ INTERFACE_IREE_TRANSITIVE_OBJECT_LIBS
+ "$<GENEX_EVAL:$<TARGET_PROPERTY:${dep_target},INTERFACE_IREE_TRANSITIVE_OBJECT_LIBS>>"
+ )
+ else()
+ set_property(TARGET ${name} APPEND PROPERTY
+ INTERFACE_IREE_TRANSITIVE_OBJECT_LIBS
+ ${dep_target}
+ )
+ endif()
+ endforeach()
+endfunction()
+
+# iree_cc_unified_library()
+#
+# Creates a unified library out of the iree:: namespaced transitive deps+self
+# of some ROOT library. The resulting library will contain the union of all
+# objects from all transitive library-deps in the iree:: namespace. Such
+# libraries are typically the only libraries that we install for outside use
+# and they must only be used by leaf demos or out of tree libraries/executables.
+# Commingling with any regular libraries will result in duplicate symbols.
+#
+# Note that the resulting library will not contain any libraries outside of the
+# iree:: namespace but will be configured to link to them. For external use
+# it is expected that they will be installed and used separately as needed.
+#
+# Compile and link options are forwarded from the ROOT target non-transitively.
+# Ensure that this target directly references all definitions that need to
+# be exported to end consumers.
+#
+# Parameters:
+# NAME: name of target
+# ROOT: Root target library to extract objects and deps from.
+function(iree_cc_unified_library)
+ cmake_parse_arguments(
+ _RULE
+ "SHARED"
+ "NAME;ROOT"
+ ""
+ ${ARGN}
+ )
+
+ # Replace dependencies passed by ::name with iree::package::name
+ iree_package_ns(_PACKAGE_NS)
+ list(TRANSFORM _RULE_ROOT REPLACE "^::" "${_PACKAGE_NS}::")
+
+ # Prefix the library with the package name, so we get: iree_package_name.
+ iree_package_name(_PACKAGE_NAME)
+ set(_NAME "${_PACKAGE_NAME}_${_RULE_NAME}")
+
+ # Evaluate the object and libs.
+ set(_objects "$<REMOVE_DUPLICATES:$<GENEX_EVAL:$<TARGET_PROPERTY:${_RULE_ROOT},INTERFACE_IREE_TRANSITIVE_OBJECTS>>>")
+ set(_libs "$<REMOVE_DUPLICATES:$<GENEX_EVAL:$<TARGET_PROPERTY:${_RULE_ROOT},INTERFACE_IREE_TRANSITIVE_OBJECT_LIBS>>>")
+
+ # For debugging, write out evaluated objects to a file.
+ file(GENERATE OUTPUT "${_RULE_NAME}.contents.txt" CONTENT
+ "OBJECTS:\n${_objects}\n\nLIBS:\n${_libs}\n")
+ if(_RULE_SHARED)
+ add_library(${_NAME} SHARED ${_objects})
+ else()
+ add_library(${_NAME} STATIC ${_objects})
+ endif()
+
+ target_link_libraries(${_NAME}
+ PUBLIC
+ ${_libs}
+ )
+
+ # Forward compile usage requirements from the root library.
+ # Note that SYSTEM scope matches here, in the property name and in the
+ # include directories below on the main rule. If ever removing this,
+ # remove it from all places.
+ target_include_directories(${_NAME} SYSTEM
+ PUBLIC
+ $<TARGET_PROPERTY:${_RULE_ROOT},INTERFACE_SYSTEM_INCLUDE_DIRECTORIES>
+ )
+ target_include_directories(${_NAME}
+ PUBLIC
+ $<TARGET_PROPERTY:${_RULE_ROOT},INTERFACE_INCLUDE_DIRECTORIES>
+ )
+ target_compile_options(${_NAME}
+ PRIVATE
+ $<TARGET_PROPERTY:${_RULE_ROOT},COMPILE_OPTIONS>
+ )
+ target_compile_definitions(${_NAME}
+ PUBLIC
+ $<TARGET_PROPERTY:${_RULE_ROOT},INTERFACE_COMPILE_DEFINITIONS>
+ )
+ target_link_libraries(${_NAME}
+ PUBLIC
+ $<TARGET_PROPERTY:${_RULE_ROOT},INTERFACE_LINK_LIBRARIES>
+ )
+
+ # Alias the iree_package_name library to iree::package::name.
+ # This lets us more clearly map to Bazel and makes it possible to
+ # disambiguate the underscores in paths vs. the separators.
+ add_library(${_PACKAGE_NS}::${_RULE_NAME} ALIAS ${_NAME})
+
+ # If the library name matches the final component of the package then treat
+ # it as a default. For example, foo/bar/ library 'bar' would end up as
+ # 'foo::bar'.
+ iree_package_dir(_PACKAGE_DIR)
+ if(${_RULE_NAME} STREQUAL ${_PACKAGE_DIR})
+ add_library(${_PACKAGE_NS} ALIAS ${_NAME})
+ endif()
+endfunction()
diff --git a/build_tools/cmake/iree_lit_test.cmake b/build_tools/cmake/iree_lit_test.cmake
index 13ef3aa..b983800 100644
--- a/build_tools/cmake/iree_lit_test.cmake
+++ b/build_tools/cmake/iree_lit_test.cmake
@@ -73,7 +73,6 @@
"${CMAKE_SOURCE_DIR}/build_tools/cmake/run_test.${IREE_HOST_SCRIPT_EXT}"
"${Python3_EXECUTABLE}"
"${LLVM_SOURCE_DIR}/utils/lit/lit.py"
- "-v"
${_LIT_PATH_ARGS}
${_TEST_FILE_PATH}
)
@@ -83,6 +82,7 @@
set_property(TEST ${_NAME_PATH} PROPERTY REQUIRED_FILES "${_TEST_FILE_PATH}")
set_property(TEST ${_NAME_PATH} PROPERTY ENVIRONMENT
"TEST_TMPDIR=${IREE_BINARY_DIR}/tmp/${_NAME}_test_tmpdir"
+ "LIT_OPTS=-v"
"FILECHECK_OPTS=--enable-var-scope")
iree_add_test_environment_properties(${_NAME_PATH})
diff --git a/build_tools/third_party/cuda/BUILD.overlay b/build_tools/third_party/cuda/BUILD.overlay
deleted file mode 100644
index d3ed41a..0000000
--- a/build_tools/third_party/cuda/BUILD.overlay
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2021 The IREE Authors
-#
-# Licensed under the Apache License v2.0 with LLVM Exceptions.
-# See https://llvm.org/LICENSE.txt for license information.
-# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-package(default_visibility = ["//visibility:public"])
-
-cc_library(
- name = "cuda_headers",
- hdrs = ["include/cuda.h"],
- includes = ["include"],
-)
-
-filegroup(
- name = "libdevice_root",
- srcs = ["nvvm/libdevice/libdevice.10.bc"],
-)
-
diff --git a/build_tools/third_party/cuda/CMakeLists.txt b/build_tools/third_party/cuda/CMakeLists.txt
deleted file mode 100644
index ac2d766..0000000
--- a/build_tools/third_party/cuda/CMakeLists.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2021 The IREE Authors
-#
-# Licensed under the Apache License v2.0 with LLVM Exceptions.
-# See https://llvm.org/LICENSE.txt for license information.
-# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-set(CUDA_HEADERS_API_ROOT "${IREE_ROOT_DIR}/third_party/cuda/include")
-
-external_cc_library(
- PACKAGE
- cuda_headers
- NAME
- cuda_headers
- ROOT
- ${CUDA_HEADERS_API_ROOT}
- HDRS
- "cuda.h"
- INCLUDES
- ${CUDA_HEADERS_API_ROOT}
-)
-
diff --git a/experimental/sample_web_static/CMakeLists.txt b/experimental/sample_web_static/CMakeLists.txt
new file mode 100644
index 0000000..2cf9582
--- /dev/null
+++ b/experimental/sample_web_static/CMakeLists.txt
@@ -0,0 +1,103 @@
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+if(NOT EMSCRIPTEN)
+ return()
+endif()
+
+set(_MNIST_OBJECT_NAME "iree_experimental_sample_web_static_mnist")
+add_library(${_MNIST_OBJECT_NAME} STATIC ${CMAKE_CURRENT_BINARY_DIR}/mnist_static.o)
+SET_TARGET_PROPERTIES(${_MNIST_OBJECT_NAME} PROPERTIES LINKER_LANGUAGE C)
+
+#-------------------------------------------------------------------------------
+# Sync
+#-------------------------------------------------------------------------------
+
+set(_NAME "iree_experimental_sample_web_static_sync")
+add_executable(${_NAME} "")
+target_include_directories(${_NAME} PUBLIC
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>
+)
+target_sources(${_NAME}
+ PRIVATE
+ main.c
+ device_sync.c
+ ${CMAKE_CURRENT_BINARY_DIR}/mnist_static.h
+ ${CMAKE_CURRENT_BINARY_DIR}/mnist_bytecode.h
+ ${CMAKE_CURRENT_BINARY_DIR}/mnist_bytecode.c
+)
+set_target_properties(${_NAME} PROPERTIES OUTPUT_NAME "sample-web-static-sync")
+
+# Note: we have to be very careful about dependencies here.
+#
+# The general purpose libraries link in multiple executable loaders and HAL
+# drivers/devices, which include code not compatible with Emscripten.
+target_link_libraries(${_NAME}
+ ${_MNIST_OBJECT_NAME}
+ iree_runtime_runtime
+ iree_hal_local_loaders_static_library_loader
+ iree_hal_local_sync_driver
+)
+
+target_link_options(${_NAME} PRIVATE
+ # https://emscripten.org/docs/porting/connecting_cpp_and_javascript/Interacting-with-code.html#interacting-with-code-ccall-cwrap
+ "-sEXPORTED_FUNCTIONS=['_setup_sample', '_cleanup_sample', '_run_sample']"
+ "-sEXPORTED_RUNTIME_METHODS=['ccall','cwrap']"
+ #
+ "-sASSERTIONS=1"
+ #
+ "-gsource-map"
+ "-source-map-base="
+)
+
+#-------------------------------------------------------------------------------
+# Multithreaded
+#-------------------------------------------------------------------------------
+
+set(_NAME "iree_experimental_sample_web_static_multithreaded")
+add_executable(${_NAME} "")
+target_include_directories(${_NAME} PUBLIC
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>
+)
+target_sources(${_NAME}
+ PRIVATE
+ main.c
+ device_multithreaded.c
+ ${CMAKE_CURRENT_BINARY_DIR}/mnist_static.h
+ ${CMAKE_CURRENT_BINARY_DIR}/mnist_bytecode.h
+ ${CMAKE_CURRENT_BINARY_DIR}/mnist_bytecode.c
+)
+set_target_properties(${_NAME} PROPERTIES OUTPUT_NAME "sample-web-static-multithreaded")
+
+# Note: we have to be very careful about dependencies here.
+#
+# The general purpose libraries link in multiple executable loaders and HAL
+# drivers/devices, which include code not compatible with Emscripten.
+target_link_libraries(${_NAME}
+ ${_MNIST_OBJECT_NAME}
+ iree_runtime_runtime
+ iree_hal_local_loaders_static_library_loader
+ iree_hal_local_task_driver
+ iree_task_api
+)
+
+target_link_options(${_NAME} PRIVATE
+ # https://emscripten.org/docs/porting/connecting_cpp_and_javascript/Interacting-with-code.html#interacting-with-code-ccall-cwrap
+ "-sEXPORTED_FUNCTIONS=['_setup_sample', '_cleanup_sample', '_run_sample']"
+ "-sEXPORTED_RUNTIME_METHODS=['ccall','cwrap']"
+ #
+ "-sASSERTIONS=1"
+ #
+ "-gsource-map"
+ "-source-map-base="
+ #
+ # https://emscripten.org/docs/porting/pthreads.html#compiling-with-pthreads-enabled
+ "-pthread"
+ # "-sINITIAL_MEMORY=67108864" # 64MB
+ "-sPTHREAD_POOL_SIZE=2"
+ # https://emscripten.org/docs/porting/pthreads.html#additional-flags
+ # "-sPROXY_TO_PTHREAD"
+)
diff --git a/experimental/sample_web_static/README.md b/experimental/sample_web_static/README.md
new file mode 100644
index 0000000..c2d55b5
--- /dev/null
+++ b/experimental/sample_web_static/README.md
@@ -0,0 +1,37 @@
+# Static Web Sample
+
+This experimental sample demonstrates one way to target the web platform with
+IREE. The output artifact is a web page containing an interactive MNIST digits
+classifier.
+
+## Quickstart
+
+1. Install IREE's host tools (e.g. by building the `install` target with CMake)
+2. Install the Emscripten SDK by
+ [following these directions](https://emscripten.org/docs/getting_started/downloads.html)
+3. Initialize your Emscripten environment (e.g. run `emsdk_env.bat`)
+4. From this directory, run `bash ./build_static_emscripten_demo.sh`
+ * You may need to set the path to your host tools install
+5. Open the localhost address linked in the script output
+
+To rebuild most parts of the demo (C runtime, sample HTML, CMake config, etc.),
+just `control + C` to stop the local webserver and rerun the script.
+
+## How it works
+
+This [MNIST model](../../iree/samples/models/mnist.mlir), also used in the
+[Vision sample](../../iree/samples/vision/), is compiled using the "static
+library" output setting of IREE's compiler (see the
+[Static library sample](../../iree/samples/static_library)). The resulting
+`.h` and `.o` files are compiled together with `main.c`, while the `.vmfb` is
+embedded into a C file that is similarly linked in.
+
+[Emscripten](https://emscripten.org/) is used (via the `emcmake` CMake wrapper)
+to compile the output binary into WebAssembly and JavaScript files.
+
+The provided `index.html` file can be served together with the output `.js`
+and `.wasm` files.
+
+## Multithreading
+
+TODO(scotttodd): this is incomplete - more changes are needed to the C runtime
diff --git a/experimental/sample_web_static/build_static_emscripten_demo.sh b/experimental/sample_web_static/build_static_emscripten_demo.sh
new file mode 100644
index 0000000..e91b96c
--- /dev/null
+++ b/experimental/sample_web_static/build_static_emscripten_demo.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set -e
+
+###############################################################################
+# Setup and checking for dependencies #
+###############################################################################
+
+if ! command -v emcmake &> /dev/null
+then
+ echo "'emcmake' not found, setup environment according to https://emscripten.org/docs/getting_started/downloads.html"
+ exit
+fi
+
+CMAKE_BIN=${CMAKE_BIN:-$(which cmake)}
+ROOT_DIR=$(git rev-parse --show-toplevel)
+
+BUILD_DIR=${ROOT_DIR?}/build-emscripten
+mkdir -p ${BUILD_DIR}
+
+BINARY_DIR=${BUILD_DIR}/experimental/sample_web_static/
+mkdir -p ${BINARY_DIR}
+
+###############################################################################
+# Compile from .mlir input to static C source files using host tools #
+###############################################################################
+
+# TODO(scotttodd): portable path ... discover from python install if on $PATH?
+INSTALL_ROOT="D:\dev\projects\iree-build\install\bin"
+TRANSLATE_TOOL="${INSTALL_ROOT?}/iree-translate.exe"
+EMBED_DATA_TOOL="${INSTALL_ROOT?}/generate_embed_data.exe"
+INPUT_NAME="mnist"
+INPUT_PATH="${ROOT_DIR?}/iree/samples/models/mnist.mlir"
+
+echo "=== Translating MLIR to static library output (.vmfb, .h, .o) ==="
+${TRANSLATE_TOOL?} ${INPUT_PATH} \
+ --iree-mlir-to-vm-bytecode-module \
+ --iree-input-type=mhlo \
+ --iree-hal-target-backends=llvm \
+ --iree-llvm-target-triple=wasm32-unknown-unknown \
+ --iree-llvm-link-embedded=false \
+ --iree-llvm-link-static \
+ --iree-llvm-static-library-output-path=${BINARY_DIR}/${INPUT_NAME}_static.o \
+ --o ${BINARY_DIR}/${INPUT_NAME}.vmfb
+
+echo "=== Embedding bytecode module (.vmfb) into C source files (.h, .c) ==="
+${EMBED_DATA_TOOL?} ${BINARY_DIR}/${INPUT_NAME}.vmfb \
+ --output_header=${BINARY_DIR}/${INPUT_NAME}_bytecode.h \
+ --output_impl=${BINARY_DIR}/${INPUT_NAME}_bytecode.c \
+ --identifier=iree_static_${INPUT_NAME} \
+ --flatten
+
+###############################################################################
+# Build the web artifacts using Emscripten #
+###############################################################################
+
+echo "=== Building web artifacts using Emscripten ==="
+
+pushd ${ROOT_DIR?}/build-emscripten
+
+# Configure using Emscripten's CMake wrapper, then build.
+# Note: The sample creates a task device directly, so no drivers are required,
+# but some targets are gated on specific CMake options.
+emcmake "${CMAKE_BIN?}" -G Ninja .. \
+ -DIREE_HOST_BINARY_ROOT=$PWD/../build-host/install \
+ -DIREE_BUILD_EXPERIMENTAL_WEB_SAMPLES=ON \
+ -DIREE_HAL_DRIVER_DEFAULTS=OFF \
+ -DIREE_HAL_DRIVER_DYLIB=ON \
+ -DIREE_BUILD_COMPILER=OFF \
+ -DIREE_BUILD_TESTS=OFF
+
+"${CMAKE_BIN?}" --build . --target \
+ iree_experimental_sample_web_static_sync
+ # iree_experimental_sample_web_static_multithreaded
+popd
+
+###############################################################################
+# Serve the demo using a local webserver #
+###############################################################################
+
+echo "=== Copying static files (index.html) to the build directory ==="
+
+cp ${ROOT_DIR?}/experimental/sample_web_static/index.html ${BINARY_DIR}
+
+echo "=== Running local webserver ==="
+echo " open at http://localhost:8000/build-emscripten/experimental/sample_web_static/"
+
+# **Note**: this serves from the root so source maps can reference code in the
+# source tree. A real deployment would bundle the output artifacts and serve
+# them from a build/release directory.
+
+# local_server.py is needed when using SharedArrayBuffer, with multithreading
+# python3 local_server.py --directory ${ROOT_DIR?}
+
+# http.server on its own is fine for single threaded use, and this doesn't
+# break CORS for external resources like easeljs from a CDN
+python3 -m http.server --directory ${ROOT_DIR?}
diff --git a/experimental/sample_web_static/device_multithreaded.c b/experimental/sample_web_static/device_multithreaded.c
new file mode 100644
index 0000000..4121f16
--- /dev/null
+++ b/experimental/sample_web_static/device_multithreaded.c
@@ -0,0 +1,62 @@
+// Copyright 2022 The IREE Authors
+//
+// Licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "iree/hal/local/loaders/static_library_loader.h"
+#include "iree/hal/local/task_device.h"
+#include "iree/task/api.h"
+#include "mnist_static.h"
+
+iree_status_t create_device_with_static_loader(iree_allocator_t host_allocator,
+ iree_hal_device_t** out_device) {
+ iree_hal_task_device_params_t params;
+ iree_hal_task_device_params_initialize(¶ms);
+
+ // Load the statically embedded library.
+ const iree_hal_executable_library_header_t** static_library =
+ mnist_linked_llvm_library_query(
+ IREE_HAL_EXECUTABLE_LIBRARY_LATEST_VERSION,
+ /*reserved=*/NULL);
+ const iree_hal_executable_library_header_t** libraries[1] = {static_library};
+
+ iree_hal_executable_loader_t* library_loader = NULL;
+ iree_status_t status = iree_hal_static_library_loader_create(
+ IREE_ARRAYSIZE(libraries), libraries,
+ iree_hal_executable_import_provider_null(), host_allocator,
+ &library_loader);
+
+ // Create a task executor.
+ iree_task_executor_t* executor = NULL;
+ iree_task_scheduling_mode_t scheduling_mode = 0;
+ iree_host_size_t worker_local_memory = 0;
+ iree_task_topology_t topology;
+ iree_task_topology_initialize(&topology);
+ // TODO(scotttodd): Try with more threads
+ iree_task_topology_initialize_from_group_count(/*group_count=*/1, &topology);
+ if (iree_status_is_ok(status)) {
+ status = iree_task_executor_create(scheduling_mode, &topology,
+ worker_local_memory, host_allocator,
+ &executor);
+ }
+ iree_task_topology_deinitialize(&topology);
+
+ iree_string_view_t identifier = iree_make_cstring_view("task");
+ iree_hal_allocator_t* device_allocator = NULL;
+ if (iree_status_is_ok(status)) {
+ status = iree_hal_allocator_create_heap(identifier, host_allocator,
+ host_allocator, &device_allocator);
+ }
+
+ if (iree_status_is_ok(status)) {
+ status = iree_hal_task_device_create(
+ identifier, ¶ms, executor, /*loader_count=*/1, &library_loader,
+ device_allocator, host_allocator, out_device);
+ }
+
+ iree_hal_allocator_release(device_allocator);
+ iree_task_executor_release(executor);
+ iree_hal_executable_loader_release(library_loader);
+ return status;
+}
diff --git a/experimental/sample_web_static/device_sync.c b/experimental/sample_web_static/device_sync.c
new file mode 100644
index 0000000..82e291d
--- /dev/null
+++ b/experimental/sample_web_static/device_sync.c
@@ -0,0 +1,45 @@
+// Copyright 2022 The IREE Authors
+//
+// Licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "iree/hal/local/loaders/static_library_loader.h"
+#include "iree/hal/local/sync_device.h"
+#include "mnist_static.h"
+
+iree_status_t create_device_with_static_loader(iree_allocator_t host_allocator,
+ iree_hal_device_t** out_device) {
+ iree_hal_sync_device_params_t params;
+ iree_hal_sync_device_params_initialize(¶ms);
+
+ // Load the statically embedded library.
+ const iree_hal_executable_library_header_t** static_library =
+ mnist_linked_llvm_library_query(
+ IREE_HAL_EXECUTABLE_LIBRARY_LATEST_VERSION,
+ /*reserved=*/NULL);
+ const iree_hal_executable_library_header_t** libraries[1] = {static_library};
+
+ iree_hal_executable_loader_t* library_loader = NULL;
+ iree_status_t status = iree_hal_static_library_loader_create(
+ IREE_ARRAYSIZE(libraries), libraries,
+ iree_hal_executable_import_provider_null(), host_allocator,
+ &library_loader);
+
+ iree_string_view_t identifier = iree_make_cstring_view("sync");
+ iree_hal_allocator_t* device_allocator = NULL;
+ if (iree_status_is_ok(status)) {
+ status = iree_hal_allocator_create_heap(identifier, host_allocator,
+ host_allocator, &device_allocator);
+ }
+
+ if (iree_status_is_ok(status)) {
+ status = iree_hal_sync_device_create(
+ identifier, ¶ms, /*loader_count=*/1, &library_loader,
+ device_allocator, host_allocator, out_device);
+ }
+
+ iree_hal_allocator_release(device_allocator);
+ iree_hal_executable_loader_release(library_loader);
+ return status;
+}
diff --git a/experimental/sample_web_static/index.html b/experimental/sample_web_static/index.html
new file mode 100644
index 0000000..d3da38f
--- /dev/null
+++ b/experimental/sample_web_static/index.html
@@ -0,0 +1,235 @@
+<!DOCTYPE html>
+<html>
+
+<!--
+Copyright 2022 The IREE Authors
+
+Licensed under the Apache License v2.0 with LLVM Exceptions.
+See https://llvm.org/LICENSE.txt for license information.
+SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+-->
+
+<head>
+ <meta charset="utf-8" />
+ <title>IREE Static Web Sample</title>
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+
+ <!-- TODO(scotttodd): use local copy for CORS webserver / SharedArrayBuffer workarounds? -->
+ <script src="https://code.createjs.com/1.0.0/easeljs.min.js"></script>
+</head>
+
+<body style="background-color: #2b2c30; color: #ABB2BF">
+ <h1>IREE Static Web Sample</h1>
+
+ <canvas id="drawingCanvas" width="256" height="256"
+ style="border:2px solid #000000; background-color: #FFFFFF;"
+ oncontextmenu="return false;">
+ </canvas>
+ <canvas id="rescaledCanvas" width="28" height="28"
+ style="border:2px solid #000000; background-color: #FFFFFF;">
+ </canvas>
+
+ <br>
+ <div style="border:2px solid #000000; background-color: #CCCCCC; padding: 8px; color: #111111" width="400px" height="300px">
+ <button id="predictButton" disabled onclick="predictDigit()">Predict handwritten digit</button>
+ <br>
+ Prediction result: <div id="predictionResult"></div>
+ </div>
+
+ <script>
+ let setupNativeSample;
+ let cleanupNativeSample;
+ let runNativeSample;
+ let nativeState;
+ const predictionResultElement = document.getElementById("predictionResult");
+ const predictButtonElement = document.getElementById("predictButton");
+ let initialized = false;
+
+ const imagePixelCount = 28 * 28;
+ let imageBuffer;
+
+ var Module = {
+ print: function(text) {
+ console.log(text);
+ },
+ printErr: function(text) {
+ console.error(text);
+ },
+ onRuntimeInitialized: function() {
+ console.log("WebAssembly module onRuntimeInitialized()");
+
+ setupNativeSample = Module.cwrap("setup_sample", "number", []);
+ cleanupNativeSample = Module.cwrap("cleanup_sample", null, ["number"]);
+ runNativeSample = Module.cwrap("run_sample", "number", ["number", "number"]);
+
+ setupSample();
+ },
+ // https://emscripten.org/docs/api_reference/module.html#Module.noInitialRun
+ noInitialRun: true,
+ };
+
+ function setupSample() {
+ nativeState = setupNativeSample();
+ predictButtonElement.disabled = false;
+ imageBuffer = Module._malloc(imagePixelCount * Float32Array.BYTES_PER_ELEMENT);
+ initialized = true;
+ }
+
+ // TODO(scotttodd): call this on page suspend?
+ function cleanupSample() {
+ initialized = false;
+ Module._free(imageDataBuffer);
+ predictButtonElement.disabled = true;
+ cleanupNativeSample();
+ nativeState = null;
+ }
+
+ function predictDigit() {
+ const rawImageData = getRescaledCanvasData();
+ preprocessImageData(rawImageData);
+
+ result = runNativeSample(nativeState, imageBuffer);
+ if (result != -1) {
+ predictionResultElement.innerHTML = result;
+ } else {
+ predictionResultElement.innerHTML = "Error";
+ }
+ }
+
+ // https://becominghuman.ai/passing-and-returning-webassembly-array-parameters-a0f572c65d97
+ // https://developers.google.com/web/updates/2018/03/emscripting-a-c-library#get_an_image_from_javascript_into_wasm
+ function preprocessImageData(rawImageData) {
+ // * getImageData() returns a Uint8ClampedArray with RGBA image data
+ // * this MNIST model takes tensor<1x28x28x1xf32> with grayscale pixels
+ // in [0.0, 1.0]
+
+ // This conversion is terrible, but this is a toy demo with a small image
+ // Hopefully there aren't any logic / iteration order issues...
+ const typedArray = new Float32Array(imagePixelCount);
+ for (let y = 0; y < 28; ++y) {
+ for (let x = 0; x < 28; ++x) {
+ const typedIndex = y * 28 + x;
+ const rawIndex = 4 * (y * 28 + x) + 3; // Assume colorSpace srgb
+ typedArray[typedIndex] = rawImageData.data[rawIndex] / 255.0;
+ }
+ }
+
+ // Copy into Wasm heap.
+ // Note: we could have done the conversion in-place, but this is demo code
+ Module.HEAPF32.set(typedArray, imageBuffer >> 2);
+ }
+
+ </script>
+ <script src="sample-web-static-sync.js"></script>
+ <!-- <script src="sample-web-static-multithreaded.js"></script> -->
+
+
+ <script>
+ // Forked from:
+ // https://createjs.com/demos/easeljs/curveto
+ // https://github.com/CreateJS/EaselJS/blob/master/examples/CurveTo.html
+
+ let drawingCanvasElement;
+ let rescaledCanvasElement, rescaledCanvasContext;
+ let stage;
+ let drawingCanvasShape;
+ let oldPt, oldMidPt;
+ let titleText;
+ const primaryColor = "#000000";
+ const eraseColor = "#FFFFFF";
+ const stroke = 32;
+
+ function initDrawing() {
+ drawingCanvasElement = document.getElementById("drawingCanvas");
+
+ rescaledCanvasElement = document.getElementById("rescaledCanvas");
+ rescaledCanvasContext = rescaledCanvasElement.getContext("2d");
+ rescaledCanvasContext.imageSmoothingEnabled = false;
+ rescaledCanvasContext.mozImageSmoothingEnabled = false;
+ rescaledCanvasContext.webkitImageSmoothingEnabled = false;
+ rescaledCanvasContext.msImageSmoothingEnabled = false;
+
+ stage = new createjs.Stage(drawingCanvasElement);
+ stage.autoClear = false;
+ stage.enableDOMEvents(true);
+
+ createjs.Touch.enable(stage);
+ createjs.Ticker.framerate = 24;
+
+ stage.addEventListener("stagemousedown", handleMouseDown);
+ stage.addEventListener("stagemouseup", handleMouseUp);
+
+ drawingCanvasShape = new createjs.Shape();
+ stage.addChild(drawingCanvasShape);
+
+ // Add instruction text.
+ titleText = new createjs.Text("Click and Drag to draw", "18px Arial", "#000000");
+ titleText.x = 30;
+ titleText.y = 100;
+ stage.addChild(titleText);
+
+ stage.update();
+ }
+
+ function handleMouseDown(event) {
+ if (!event.primary && !event.secondary) { return; }
+
+ if (stage.contains(titleText)) {
+ stage.clear();
+ stage.removeChild(titleText);
+ }
+
+ oldPt = new createjs.Point(stage.mouseX, stage.mouseY);
+ oldMidPt = oldPt.clone();
+ stage.addEventListener("stagemousemove", handleMouseMove);
+ }
+
+ function handleMouseMove(event) {
+ if (!event.primary && !event.secondary) { return; }
+
+ const midPt = new createjs.Point(
+ oldPt.x + stage.mouseX >> 1, oldPt.y + stage.mouseY >> 1);
+
+ const color = event.nativeEvent.which == 1 ? primaryColor : eraseColor;
+ drawingCanvasShape.graphics.clear()
+ .setStrokeStyle(stroke, 'round', 'round')
+ .beginStroke(color).moveTo(midPt.x, midPt.y)
+ .curveTo(oldPt.x, oldPt.y, oldMidPt.x, oldMidPt.y);
+
+ oldPt.x = stage.mouseX;
+ oldPt.y = stage.mouseY;
+ oldMidPt.x = midPt.x;
+ oldMidPt.y = midPt.y;
+
+ stage.update();
+ updateRescaledCanvas();
+
+ if (initialized) {
+ // TODO(scotttodd): debounce / rate limit this
+ predictDigit();
+ }
+ }
+
+ function handleMouseUp(event) {
+ if (!event.primary && !event.default) { return; }
+ stage.removeEventListener("stagemousemove", handleMouseMove);
+ }
+
+ function updateRescaledCanvas() {
+ rescaledCanvasContext.drawImage(
+ drawingCanvasElement,
+ /*sx=*/0, /*sy=*/0,
+ /*sWidth=*/256, /*sHeight=*/256,
+ /*dx=*/0, /*dy=*/0,
+ /*dWidth=*/28, /*dHeight=*/28);
+ }
+
+ function getRescaledCanvasData() {
+ return rescaledCanvasContext.getImageData(0, 0, 28, 28);
+ }
+
+ initDrawing();
+ </script>
+</body>
+
+</html>
diff --git a/experimental/sample_web_static/local_server.py b/experimental/sample_web_static/local_server.py
new file mode 100644
index 0000000..835a760
--- /dev/null
+++ b/experimental/sample_web_static/local_server.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python3
+# Copyright 2022 The IREE Authors
+#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+"""Local server for development, with support for CORS headers and MIME types.
+
+NOTE: This is NOT suitable for production serving, it is just a slightly
+extended version of https://docs.python.org/3/library/http.server.html.
+
+Usage:
+ python3 local_server.py --directory {build_dir}
+ (then open http://localhost:8000/ in your browser)
+"""
+
+import os
+from functools import partial
+from http import server
+
+
+class CORSHTTPRequestHandler(server.SimpleHTTPRequestHandler):
+
+ def __init__(self, *args, **kwargs):
+ # Include MIME types for files we expect to be serving.
+ # https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types
+ self.extensions_map.update({
+ ".js": "application/javascript",
+ ".wasm": "application/wasm",
+ })
+ super().__init__(*args, **kwargs)
+
+ # Inspiration for this hack: https://stackoverflow.com/a/13354482
+ def end_headers(self):
+ self.send_cors_headers()
+
+ server.SimpleHTTPRequestHandler.end_headers(self)
+
+ def send_cors_headers(self):
+ # Emscripten uses SharedArrayBuffer for its multithreading, which requires
+ # Cross Origin Opener Policy and Cross Origin Embedder Policy headers:
+ # * https://emscripten.org/docs/porting/pthreads.html
+ # * https://developer.chrome.com/blog/enabling-shared-array-buffer/
+ self.send_header("Cross-Origin-Embedder-Policy", "require-corp")
+ self.send_header("Cross-Origin-Opener-Policy", "same-origin")
+
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--directory',
+ '-d',
+ default=os.getcwd(),
+ help='Specify alternative directory '
+ '[default:current directory]')
+ parser.add_argument('port',
+ action='store',
+ default=8000,
+ type=int,
+ nargs='?',
+ help='Specify alternate port [default: 8000]')
+ args = parser.parse_args()
+
+ server.test(HandlerClass=partial(CORSHTTPRequestHandler,
+ directory=args.directory),
+ port=args.port)
diff --git a/experimental/sample_web_static/main.c b/experimental/sample_web_static/main.c
new file mode 100644
index 0000000..23441bf
--- /dev/null
+++ b/experimental/sample_web_static/main.c
@@ -0,0 +1,183 @@
+// Copyright 2022 The IREE Authors
+//
+// Licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include <float.h>
+#include <stdio.h>
+
+#include "iree/runtime/api.h"
+#include "iree/vm/bytecode_module.h"
+#include "mnist_bytecode.h"
+
+//===----------------------------------------------------------------------===//
+// Public API
+//===----------------------------------------------------------------------===//
+
+typedef struct iree_sample_state_t iree_sample_state_t;
+static void iree_sample_state_initialize(iree_sample_state_t* out_state);
+
+// TODO(scotttodd): figure out error handling and state management
+// * out_state and return status would make sense, but emscripten...
+iree_sample_state_t* setup_sample();
+void cleanup_sample(iree_sample_state_t* state);
+
+int run_sample(iree_sample_state_t* state, float* image_data);
+
+//===----------------------------------------------------------------------===//
+// Implementation
+//===----------------------------------------------------------------------===//
+
+extern iree_status_t create_device_with_static_loader(
+ iree_allocator_t host_allocator, iree_hal_device_t** out_device);
+
+typedef struct iree_sample_state_t {
+ iree_runtime_instance_t* instance;
+ iree_hal_device_t* device;
+ iree_runtime_session_t* session;
+ iree_vm_module_t* module;
+ iree_runtime_call_t call;
+} iree_sample_state_t;
+
+iree_status_t create_bytecode_module(iree_vm_module_t** out_module) {
+ const struct iree_file_toc_t* module_file_toc = iree_static_mnist_create();
+ iree_const_byte_span_t module_data =
+ iree_make_const_byte_span(module_file_toc->data, module_file_toc->size);
+ return iree_vm_bytecode_module_create(module_data, iree_allocator_null(),
+ iree_allocator_system(), out_module);
+}
+
+iree_sample_state_t* setup_sample() {
+ iree_sample_state_t* state = NULL;
+ iree_status_t status = iree_allocator_malloc(
+ iree_allocator_system(), sizeof(iree_sample_state_t), (void**)&state);
+
+ iree_runtime_instance_options_t instance_options;
+ iree_runtime_instance_options_initialize(IREE_API_VERSION_LATEST,
+ &instance_options);
+ // Note: no call to iree_runtime_instance_options_use_all_available_drivers().
+
+ if (iree_status_is_ok(status)) {
+ status = iree_runtime_instance_create(
+ &instance_options, iree_allocator_system(), &state->instance);
+ }
+
+ if (iree_status_is_ok(status)) {
+ status = create_device_with_static_loader(iree_allocator_system(),
+ &state->device);
+ }
+
+ iree_runtime_session_options_t session_options;
+ iree_runtime_session_options_initialize(&session_options);
+ iree_runtime_session_t* session = NULL;
+ if (iree_status_is_ok(status)) {
+ status = iree_runtime_session_create_with_device(
+ state->instance, &session_options, state->device,
+ iree_runtime_instance_host_allocator(state->instance), &state->session);
+ }
+
+ if (iree_status_is_ok(status)) {
+ status = create_bytecode_module(&state->module);
+ }
+ if (iree_status_is_ok(status)) {
+ status = iree_runtime_session_append_module(state->session, state->module);
+ }
+
+ const char kMainFunctionName[] = "module.predict";
+ if (iree_status_is_ok(status)) {
+ status = iree_runtime_call_initialize_by_name(
+ state->session, iree_make_cstring_view(kMainFunctionName),
+ &state->call);
+ }
+
+ if (!iree_status_is_ok(status)) {
+ iree_status_fprint(stderr, status);
+ iree_status_free(status);
+ cleanup_sample(state);
+ return NULL;
+ }
+
+ return state;
+}
+
+void cleanup_sample(iree_sample_state_t* state) {
+ iree_runtime_call_deinitialize(&state->call);
+
+ // Cleanup session and instance.
+ iree_hal_device_release(state->device);
+ iree_runtime_session_release(state->session);
+ iree_runtime_instance_release(state->instance);
+ iree_vm_module_release(state->module);
+
+ free(state);
+}
+
+int run_sample(iree_sample_state_t* state, float* image_data) {
+ iree_status_t status = iree_ok_status();
+
+ iree_runtime_call_reset(&state->call);
+
+ iree_hal_buffer_view_t* arg_buffer_view = NULL;
+ iree_hal_dim_t buffer_shape[] = {1, 28, 28, 1};
+ iree_hal_memory_type_t input_memory_type =
+ IREE_HAL_MEMORY_TYPE_HOST_LOCAL | IREE_HAL_MEMORY_TYPE_DEVICE_VISIBLE;
+ if (iree_status_is_ok(status)) {
+ status = iree_hal_buffer_view_allocate_buffer(
+ iree_hal_device_allocator(state->device), buffer_shape,
+ IREE_ARRAYSIZE(buffer_shape), IREE_HAL_ELEMENT_TYPE_FLOAT_32,
+ IREE_HAL_ENCODING_TYPE_DENSE_ROW_MAJOR, input_memory_type,
+ IREE_HAL_BUFFER_USAGE_DISPATCH | IREE_HAL_BUFFER_USAGE_TRANSFER,
+ iree_make_const_byte_span((void*)image_data, sizeof(float) * 28 * 28),
+ &arg_buffer_view);
+ }
+ if (iree_status_is_ok(status)) {
+ status = iree_runtime_call_inputs_push_back_buffer_view(&state->call,
+ arg_buffer_view);
+ }
+ iree_hal_buffer_view_release(arg_buffer_view);
+
+ if (iree_status_is_ok(status)) {
+ status = iree_runtime_call_invoke(&state->call, /*flags=*/0);
+ }
+
+ // Get the result buffers from the invocation.
+ iree_hal_buffer_view_t* ret_buffer_view = NULL;
+ if (iree_status_is_ok(status)) {
+ status = iree_runtime_call_outputs_pop_front_buffer_view(&state->call,
+ &ret_buffer_view);
+ }
+
+ // Read back the results. The output of the mnist model is a 1x10 prediction
+ // confidence values for each digit in [0, 9].
+ float predictions[1 * 10] = {0.0f};
+ if (iree_status_is_ok(status)) {
+ status =
+ iree_hal_buffer_read_data(iree_hal_buffer_view_buffer(ret_buffer_view),
+ 0, predictions, sizeof(predictions));
+ }
+ iree_hal_buffer_view_release(ret_buffer_view);
+
+ if (!iree_status_is_ok(status)) {
+ iree_status_fprint(stderr, status);
+ iree_status_free(status);
+ return -1;
+ }
+
+ // Get the highest index from the output.
+ float result_val = FLT_MIN;
+ int result_idx = 0;
+ for (iree_host_size_t i = 0; i < IREE_ARRAYSIZE(predictions); ++i) {
+ if (predictions[i] > result_val) {
+ result_val = predictions[i];
+ result_idx = i;
+ }
+ }
+ fprintf(stdout,
+ "Prediction: %d, confidences: [%.2f, %.2f, %.2f, %.2f, %.2f, %.2f, "
+ "%.2f, %.2f, %.2f, %.2f]\n",
+ result_idx, predictions[0], predictions[1], predictions[2],
+ predictions[3], predictions[4], predictions[5], predictions[6],
+ predictions[7], predictions[8], predictions[9]);
+ return result_idx;
+}
diff --git a/integrations/tensorflow/WORKSPACE b/integrations/tensorflow/WORKSPACE
index c2f5d1e..7543d7b 100644
--- a/integrations/tensorflow/WORKSPACE
+++ b/integrations/tensorflow/WORKSPACE
@@ -7,7 +7,7 @@
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
-TENSORFLOW_COMMIT = "7e6137a2bc46a10664fd58c1686719a520e024c2"
+TENSORFLOW_COMMIT = "91d9e658c12b8e2c7fb8eacba01e89f614d410ac"
git_repository(
name = "org_tensorflow",
diff --git a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/ConvertToLoops.cpp b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/ConvertToLoops.cpp
index 985c004..4f9f8c5 100644
--- a/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/ConvertToLoops.cpp
+++ b/integrations/tensorflow/iree-dialects/lib/Dialect/LinalgExt/Transforms/ConvertToLoops.cpp
@@ -99,7 +99,7 @@
void runOnOperation() override {
MLIRContext *context = &getContext();
- OwningRewritePatternList patterns(context);
+ RewritePatternSet patterns(context);
patterns.insert<TiledOpInterfaceLowerToLoopsPattern>(context);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/ConvertToMHLO.cpp b/integrations/tensorflow/iree_tf_compiler/TF/ConvertToMHLO.cpp
index 969483b..5543fdc 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/ConvertToMHLO.cpp
+++ b/integrations/tensorflow/iree_tf_compiler/TF/ConvertToMHLO.cpp
@@ -58,20 +58,20 @@
// Lower TF Patterns must be separate from canonocalization patterns as
// they are sometimes inversions of eachother.
- OwningRewritePatternList lowerTfPatterns(&getContext());
+ RewritePatternSet lowerTfPatterns(&getContext());
mlir::TF::PopulateTFLoweringBeforeHLOPatterns(context, &lowerTfPatterns);
- OwningRewritePatternList canonicalizePatterns(&getContext());
+ RewritePatternSet canonicalizePatterns(&getContext());
for (auto op : context->getRegisteredOperations()) {
op.getCanonicalizationPatterns(canonicalizePatterns, context);
}
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
// Note that the `OperationConverter` orders patterns lexicographically by:
// 1) Ascending legalization depth (i.e., minimum number of patterns
// necessary to arrive at conversion target).
// 2) Descending pattern benefit.
- // 3) Order of patterns in `OwningRewritePatternList`.
+ // 3) Order of patterns in `RewritePatternSet`.
// Add TF->HLO legalization patterns.
mhlo::PopulateLegalizeTfPatterns(context, &patterns);
diff --git a/integrations/tensorflow/iree_tf_compiler/TF/LowerGlobalTensors.cpp b/integrations/tensorflow/iree_tf_compiler/TF/LowerGlobalTensors.cpp
index 20ca6e7..70dbbc8 100644
--- a/integrations/tensorflow/iree_tf_compiler/TF/LowerGlobalTensors.cpp
+++ b/integrations/tensorflow/iree_tf_compiler/TF/LowerGlobalTensors.cpp
@@ -7,6 +7,7 @@
#include "iree-dialects/Dialect/Input/InputDialect.h"
#include "iree-dialects/Dialect/Input/InputOps.h"
#include "iree_tf_compiler/TF/Passes.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "mlir/Dialect/Affine/Utils.h"
@@ -85,7 +86,7 @@
if (!tf_saved_model::IsExported(func)) {
continue;
}
- SmallVector<unsigned, 4> argsToErase;
+ llvm::BitVector argsToErase(func.getNumArguments());
OpBuilder builder(func.getBody());
SmallVector<Value, 8> typeConversionWorklist;
for (int i = 0, e = func.getNumArguments(); i < e; i++) {
@@ -104,7 +105,7 @@
symNameToFlowSymName[globalTensor.sym_name()]));
typeConversionWorklist.push_back(variableAddressOp.getResult());
func.getArgument(i).replaceAllUsesWith(variableAddressOp.getResult());
- argsToErase.push_back(i);
+ argsToErase.set(i);
}
func.eraseArguments(argsToErase);
diff --git a/integrations/tensorflow/test/iree_tf_tests/math/llvmaot__reduce_all.run b/integrations/tensorflow/test/iree_tf_tests/math/llvmaot__reduce_all.run
index b8ccb5f..e09eb37 100644
--- a/integrations/tensorflow/test/iree_tf_tests/math/llvmaot__reduce_all.run
+++ b/integrations/tensorflow/test/iree_tf_tests/math/llvmaot__reduce_all.run
@@ -1,3 +1,2 @@
-# XFAIL: *
# REQUIRES: llvmaot
# RUN: %PYTHON -m iree_tf_tests.math.math_test --target_backends=iree_llvmaot --dynamic_dims=false --functions=reduce_all -artifacts_dir=%t
diff --git a/integrations/tensorflow/test/iree_tf_tests/uncategorized/llvmaot__broadcast_to.run b/integrations/tensorflow/test/iree_tf_tests/uncategorized/llvmaot__broadcast_to.run
index 4fa6fd1..f4d8bfe 100644
--- a/integrations/tensorflow/test/iree_tf_tests/uncategorized/llvmaot__broadcast_to.run
+++ b/integrations/tensorflow/test/iree_tf_tests/uncategorized/llvmaot__broadcast_to.run
@@ -1,3 +1,2 @@
# REQUIRES: llvmaot
# RUN: %PYTHON -m iree_tf_tests.uncategorized.broadcast_to_test --target_backends=iree_llvmaot -artifacts_dir=%t
-# XFAIL: *
diff --git a/integrations/tensorflow/test/iree_tf_tests/uncategorized/llvmaot__range.run b/integrations/tensorflow/test/iree_tf_tests/uncategorized/llvmaot__range.run
index 95792cc..c6db59b 100644
--- a/integrations/tensorflow/test/iree_tf_tests/uncategorized/llvmaot__range.run
+++ b/integrations/tensorflow/test/iree_tf_tests/uncategorized/llvmaot__range.run
@@ -1,3 +1,2 @@
# REQUIRES: llvmaot
# RUN: %PYTHON -m iree_tf_tests.uncategorized.range_test --target_backends=iree_llvmaot -artifacts_dir=%t
-# XFAIL: *
diff --git a/integrations/tensorflow/test/iree_tf_tests/uncategorized/vulkan__broadcast_to.run b/integrations/tensorflow/test/iree_tf_tests/uncategorized/vulkan__broadcast_to.run
index 6a5212f..093208c 100644
--- a/integrations/tensorflow/test/iree_tf_tests/uncategorized/vulkan__broadcast_to.run
+++ b/integrations/tensorflow/test/iree_tf_tests/uncategorized/vulkan__broadcast_to.run
@@ -1,3 +1,2 @@
# REQUIRES: vulkan
# RUN: %PYTHON -m iree_tf_tests.uncategorized.broadcast_to_test --target_backends=iree_vulkan -artifacts_dir=%t
-# XFAIL: *
diff --git a/integrations/tensorflow/test/iree_tf_tests/uncategorized/vulkan__range.run b/integrations/tensorflow/test/iree_tf_tests/uncategorized/vulkan__range.run
index 256ccc1..f02a669 100644
--- a/integrations/tensorflow/test/iree_tf_tests/uncategorized/vulkan__range.run
+++ b/integrations/tensorflow/test/iree_tf_tests/uncategorized/vulkan__range.run
@@ -1,3 +1,2 @@
# REQUIRES: vulkan
# RUN: %PYTHON -m iree_tf_tests.uncategorized.range_test --target_backends=iree_vulkan -artifacts_dir=%t
-# XFAIL: *
diff --git a/iree/build_defs.oss.bzl b/iree/build_defs.oss.bzl
index 0be258b..f9693c6 100644
--- a/iree/build_defs.oss.bzl
+++ b/iree/build_defs.oss.bzl
@@ -9,7 +9,10 @@
# Target to the FileCheck binary.
INTREE_FILECHECK_TARGET = "@llvm-project//llvm:FileCheck"
-IREE_CUDA_DEPS = ["//iree/hal/cuda/registration"]
+# Temporarily disabled pending build system changes.
+IREE_CUDA_DEPS = [
+ # "//iree/hal/cuda/registration"
+]
def platform_trampoline_deps(basename, path = "base"):
"""Produce a list of deps for the given `basename` platform target.
diff --git a/iree/compiler/Codegen/Common/BUILD b/iree/compiler/Codegen/Common/BUILD
index 0490c72..6717bad 100644
--- a/iree/compiler/Codegen/Common/BUILD
+++ b/iree/compiler/Codegen/Common/BUILD
@@ -45,8 +45,10 @@
"IREEComprehensiveBufferizePass.cpp",
"LinalgBufferizePass.cpp",
"OptimizeVectorTransferPass.cpp",
+ "PolynomialApproximationPass.cpp",
"RemoveTrivialLoops.cpp",
"SetNumWorkgroupsPass.cpp",
+ "TypePropagationPass.cpp",
"VectorizeConv.cpp",
"VectorizeMMT4d.cpp",
],
@@ -70,8 +72,8 @@
"@llvm-project//mlir:AffineBufferizableOpInterfaceImpl",
"@llvm-project//mlir:AffineUtils",
"@llvm-project//mlir:Analysis",
- "@llvm-project//mlir:ArithBufferizableOpInterfaceImpl",
"@llvm-project//mlir:ArithmeticDialect",
+ "@llvm-project//mlir:ArithmeticTransforms",
"@llvm-project//mlir:BufferizationDialect",
"@llvm-project//mlir:BufferizationTransforms",
"@llvm-project//mlir:CFGTransforms",
@@ -84,12 +86,14 @@
"@llvm-project//mlir:LinalgInterfaces",
"@llvm-project//mlir:LinalgOps",
"@llvm-project//mlir:LinalgTransforms",
+ "@llvm-project//mlir:MathTransforms",
"@llvm-project//mlir:MemRefDialect",
"@llvm-project//mlir:MemRefTransforms",
"@llvm-project//mlir:ModuleBufferization",
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:SCFBufferizableOpInterfaceImpl",
"@llvm-project//mlir:SCFDialect",
+ "@llvm-project//mlir:SCFTransforms",
"@llvm-project//mlir:SideEffectInterfaces",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:Support",
diff --git a/iree/compiler/Codegen/Common/BufferizationAnalysis.cpp b/iree/compiler/Codegen/Common/BufferizationAnalysis.cpp
index 9192a78..8b58f38 100644
--- a/iree/compiler/Codegen/Common/BufferizationAnalysis.cpp
+++ b/iree/compiler/Codegen/Common/BufferizationAnalysis.cpp
@@ -362,56 +362,102 @@
return success();
}
-/// Look for destructive update loop pattern.
-///
-/// ```mlir
-/// %result = scf.for %arg0 = ... iter_args(%arg1 = %init) {
-/// %st = subtensor %arg1[...]
-///
-/// %yieldVal = tensor.insert_slice %val, %arg1[...]
-/// scf.yield %yieldVal
-/// }
-///
-/// `%result`, `%arg1` and `%yieldVal` are all already in the same equivalence
-/// class. `%st` and `%arg` can be added to the same equivalence class even
-/// though `%arg1` has multiple uses. Same is true for `%yieldVal` and
-/// `%arg1`. Here we also verify there are no other "value" uses of
-/// `%arg1`. This might be overly constraining, but we can relax gradually.
-static LogicalResult hasDestructiveUpdateLoopPattern(scf::ForOp forOp,
- BufferizationPlan &plan) {
- for (BlockArgument arg : forOp.getRegionIterArgs()) {
- auto isDestructiveUpdateUses = [&](OpOperand &use) -> bool {
- Operation *user = use.getOwner();
- return TypeSwitch<Operation *, bool>(user)
- .Case<tensor::ExtractSliceOp>([&](tensor::ExtractSliceOp sliceOp) {
- return sliceOp.source() == arg;
- })
- .Case<tensor::InsertSliceOp>(
- [&](tensor::InsertSliceOp subTensorInsertOp) {
- return subTensorInsertOp.dest() == arg;
- })
- .Case<memref::DimOp, scf::YieldOp, tensor::DimOp>(
- [&](auto op) { return true; })
- .Default([&](Operation *op) { return false; });
- };
- if (llvm::all_of(arg.getUses(), isDestructiveUpdateUses)) {
- for (Operation *user : arg.getUsers()) {
- TypeSwitch<Operation *>(user)
- .Case<tensor::ExtractSliceOp>([&](tensor::ExtractSliceOp sliceOp) {
- plan.unionSets(sliceOp.source(), sliceOp.result());
- })
- .Case<tensor::InsertSliceOp>(
- [&](tensor::InsertSliceOp subTensorInsertOp) {
- if (!isFromReadOnlyTensor(subTensorInsertOp.source(), plan)) {
- plan.unionSets(subTensorInsertOp.source(),
- subTensorInsertOp.dest());
- }
- })
- .Default([&](Operation *) {});
+/// Look for destructive update loop pattern involving `source` using these
+/// constraints
+/// - single tensor.insert_slice operation where `source` is the `dest` operand.
+/// - all `tensor.extract_slice` operations dominate the `tensor.insert_slice`
+/// op.
+static void hasDestructiveUpdatePattern(Value source, BufferizationPlan &plan) {
+ auto isUpdateOp = [](Operation *op) {
+ return isa<tensor::InsertSliceOp, vector::TransferWriteOp>(op);
+ };
+ auto isReadOp = [](Operation *op) {
+ return isa<tensor::ExtractSliceOp, vector::TransferReadOp>(op);
+ };
+ auto getDest = [](Operation *op) -> Value {
+ if (auto insertSliceOp = dyn_cast<tensor::InsertSliceOp>(op)) {
+ return insertSliceOp.dest();
+ }
+ if (auto transferWriteOp = dyn_cast<vector::TransferWriteOp>(op)) {
+ return transferWriteOp.source();
+ }
+ return nullptr;
+ };
+ auto getSource = [](Operation *op) -> Value {
+ if (auto extractSliceOp = dyn_cast<tensor::ExtractSliceOp>(op)) {
+ return extractSliceOp.source();
+ }
+ if (auto transferReadOp = dyn_cast<vector::TransferReadOp>(op)) {
+ return transferReadOp.source();
+ }
+ return nullptr;
+ };
+ // Source should have only one use that is a tensor::InsertSliceOp as a `dest`
+ // operand.
+ Operation *updateOp = nullptr;
+ for (OpOperand &use : source.getUses()) {
+ auto user = use.getOwner();
+ // Process only update ops uses here.
+ if (!isUpdateOp(user)) continue;
+ // If this is not the first use in a tensor::InsertSliceOp abort.
+ if (updateOp) {
+ return;
+ }
+ // If the use is not the `dest` operand, abort.
+ Value dest = getDest(user);
+ assert(dest && "unable to get dest of update op");
+ if (use.get() != dest) {
+ return;
+ }
+ if (isFromReadOnlyTensor(dest, plan)) {
+ return;
+ }
+ updateOp = user;
+ }
+ // Need to have one use of tensor::InsertSliceOp for destructive update
+ // pattern.
+ if (!updateOp) {
+ return;
+ }
+
+ Block *updateOpBlock = updateOp->getBlock();
+ for (OpOperand &use : source.getUses()) {
+ Operation *user = use.getOwner();
+ if (user == updateOp) continue;
+ if (isReadOp(user)) {
+ Value source = getSource(user);
+ assert(source && "unable to find source from read op");
+ if (source != use.get()) {
+ return;
+ }
+ // The read must dominate the insert op. For now just check its in the
+ // same block and before it.
+ if (user->getBlock() != updateOpBlock ||
+ !user->isBeforeInBlock(updateOp)) {
+ return;
+ }
+ continue;
+ } else if (isa<scf::YieldOp, tensor::DimOp>(user)) {
+ continue;
+ }
+ // Unaccounted for use. Return without doing anything;
+ return;
+ }
+
+ // Found destructive update pattern. Tie all the
+ // - extract_slice source and result
+ // - insert_slice value and dest
+ for (Operation *user : source.getUsers()) {
+ if (auto extractSliceOp = dyn_cast<tensor::ExtractSliceOp>(user)) {
+ plan.unionSets(extractSliceOp.source(), extractSliceOp.result());
+ continue;
+ }
+ if (auto insertSliceOp = dyn_cast<tensor::InsertSliceOp>(user)) {
+ if (!isFromReadOnlyTensor(insertSliceOp.source(), plan)) {
+ plan.unionSets(insertSliceOp.source(), insertSliceOp.dest());
}
}
}
- return success();
}
/// Ties together operands for operand fusion as exists today by reusing buffer
@@ -521,11 +567,16 @@
})
.Case<vector::TransferReadOp>(
[&](vector::TransferReadOp transferReadOp) {
- plan.insert(transferReadOp.source());
+ if (transferReadOp.source().getType().isa<RankedTensorType>()) {
+ plan.insert(transferReadOp.source());
+ }
return success();
})
.Case<vector::TransferWriteOp>(
[&](vector::TransferWriteOp transferWriteOp) {
+ if (!transferWriteOp.result().getType().isa<RankedTensorType>()) {
+ return success();
+ }
return analyseDestructiveUpdateOp(transferWriteOp, nullptr,
transferWriteOp.source(),
transferWriteOp.result(), plan);
@@ -544,13 +595,17 @@
plan.dump();
});
- if (funcOp
- .walk([&](scf::ForOp forOp) -> WalkResult {
- return hasDestructiveUpdateLoopPattern(forOp, plan);
- })
- .wasInterrupted()) {
- return failure();
- }
+ funcOp.walk([&](Operation *updateOp) {
+ if (auto insertSliceOp = dyn_cast<tensor::InsertSliceOp>(updateOp)) {
+ hasDestructiveUpdatePattern(insertSliceOp.dest(), plan);
+ return;
+ }
+ if (auto vectorWriteOp = dyn_cast<vector::TransferWriteOp>(updateOp)) {
+ if (vectorWriteOp.source().getType().isa<RankedTensorType>()) {
+ hasDestructiveUpdatePattern(vectorWriteOp.source(), plan);
+ }
+ }
+ });
DEBUG_WITH_TYPE(DEBUG_TYPE, {
llvm::dbgs() << "After Destructive update walk ";
plan.dump();
diff --git a/iree/compiler/Codegen/Common/CMakeLists.txt b/iree/compiler/Codegen/Common/CMakeLists.txt
index a69bc1d..c5bcdc0 100644
--- a/iree/compiler/Codegen/Common/CMakeLists.txt
+++ b/iree/compiler/Codegen/Common/CMakeLists.txt
@@ -36,8 +36,10 @@
"IREEComprehensiveBufferizePass.cpp"
"LinalgBufferizePass.cpp"
"OptimizeVectorTransferPass.cpp"
+ "PolynomialApproximationPass.cpp"
"RemoveTrivialLoops.cpp"
"SetNumWorkgroupsPass.cpp"
+ "TypePropagationPass.cpp"
"VectorizeConv.cpp"
"VectorizeMMT4d.cpp"
DEPS
@@ -48,8 +50,8 @@
MLIRAffineBufferizableOpInterfaceImpl
MLIRAffineUtils
MLIRAnalysis
- MLIRArithBufferizableOpInterfaceImpl
MLIRArithmetic
+ MLIRArithmeticTransforms
MLIRBufferization
MLIRBufferizationTransforms
MLIRGPUOps
@@ -59,6 +61,7 @@
MLIRLinalg
MLIRLinalgBufferizableOpInterfaceImpl
MLIRLinalgTransforms
+ MLIRMathTransforms
MLIRMemRef
MLIRMemRefTransforms
MLIRModuleBufferization
@@ -66,6 +69,7 @@
MLIRSCF
MLIRSCFBufferizableOpInterfaceImpl
MLIRSCFToStandard
+ MLIRSCFTransforms
MLIRSideEffectInterfaces
MLIRStandard
MLIRSupport
diff --git a/iree/compiler/Codegen/Common/CleanupBufferAllocViewPass.cpp b/iree/compiler/Codegen/Common/CleanupBufferAllocViewPass.cpp
index 6f1dbda..df88166 100644
--- a/iree/compiler/Codegen/Common/CleanupBufferAllocViewPass.cpp
+++ b/iree/compiler/Codegen/Common/CleanupBufferAllocViewPass.cpp
@@ -116,7 +116,7 @@
struct CleanupBufferAllocViewPass
: public CleanupBufferAllocViewBase<CleanupBufferAllocViewPass> {
void runOnOperation() override {
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
patterns.insert<FoldReshapeIntoInterfaceTensorLoad<tensor::CollapseShapeOp>,
FoldReshapeIntoInterfaceTensorLoad<tensor::ExpandShapeOp>,
RemoveDeadMemAllocs>(&getContext());
diff --git a/iree/compiler/Codegen/Common/DemoteF32ToF16.cpp b/iree/compiler/Codegen/Common/DemoteF32ToF16.cpp
index 5bf49bd..7e09857 100644
--- a/iree/compiler/Codegen/Common/DemoteF32ToF16.cpp
+++ b/iree/compiler/Codegen/Common/DemoteF32ToF16.cpp
@@ -131,7 +131,7 @@
ModuleOp moduleOp = getOperation();
FloatTypeConverter converter;
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
patterns.insert<GenericTypeConvert>(context, converter);
populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
converter);
diff --git a/iree/compiler/Codegen/Common/FlattenMemRefSubspanPass.cpp b/iree/compiler/Codegen/Common/FlattenMemRefSubspanPass.cpp
index 99c15e8..ea2ad56 100644
--- a/iree/compiler/Codegen/Common/FlattenMemRefSubspanPass.cpp
+++ b/iree/compiler/Codegen/Common/FlattenMemRefSubspanPass.cpp
@@ -467,7 +467,7 @@
/// Note that this should be kept consistent with how the byte offset was
/// calculated in the subspan ops!
Optional<int64_t> getNumBytes(Type type) {
- if (type.isIntOrFloat()) return (type.getIntOrFloatBitWidth() + 7) / 8;
+ if (type.isIntOrFloat()) return IREE::Util::getRoundedElementByteWidth(type);
if (auto vectorType = type.dyn_cast<VectorType>()) {
auto elementBytes = getNumBytes(vectorType.getElementType());
if (!elementBytes) return llvm::None;
@@ -579,7 +579,7 @@
MLIRContext &context = getContext();
// This pass currently doesn't support alignment hints so remove them first.
- OwningRewritePatternList patterns(&context);
+ RewritePatternSet patterns(&context);
patterns.add<RemoveAssumeAlignOp>(&context);
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
diff --git a/iree/compiler/Codegen/Common/FoldTensorExtractOpPass.cpp b/iree/compiler/Codegen/Common/FoldTensorExtractOpPass.cpp
index 959b32b..5d680c1 100644
--- a/iree/compiler/Codegen/Common/FoldTensorExtractOpPass.cpp
+++ b/iree/compiler/Codegen/Common/FoldTensorExtractOpPass.cpp
@@ -56,7 +56,7 @@
} // namespace
void FoldTensorExtractOpPass::runOnOperation() {
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
populateWithGenerated(patterns);
if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
signalPassFailure();
diff --git a/iree/compiler/Codegen/Common/ForOpCanonicalizationPass.cpp b/iree/compiler/Codegen/Common/ForOpCanonicalizationPass.cpp
index 7b54aca..298012f 100644
--- a/iree/compiler/Codegen/Common/ForOpCanonicalizationPass.cpp
+++ b/iree/compiler/Codegen/Common/ForOpCanonicalizationPass.cpp
@@ -227,7 +227,7 @@
void runOnOperation() override {
FuncOp fn = getOperation();
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
patterns.insert<CanonicalizeForOpInductionVarShape,
PackForOpInductionVarVector>(fn.getContext());
if (failed(applyPatternsAndFoldGreedily(fn, std::move(patterns)))) {
diff --git a/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp b/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp
index 36adc84..19aa7d7 100644
--- a/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp
+++ b/iree/compiler/Codegen/Common/LinalgBufferizePass.cpp
@@ -79,10 +79,16 @@
static SmallVector<Value, 4> getDynamicDims(OpBuilder &b, Location loc,
Value v) {
SmallVector<Value, 4> dynamicDims;
- for (auto shape : enumerate(v.getType().cast<ShapedType>().getShape())) {
+ Type t = v.getType();
+ for (auto shape : enumerate(t.cast<ShapedType>().getShape())) {
if (shape.value() == ShapedType::kDynamicSize) {
- dynamicDims.push_back(
- b.createOrFold<memref::DimOp>(loc, v, shape.index()));
+ if (t.isa<MemRefType>()) {
+ dynamicDims.push_back(
+ b.createOrFold<memref::DimOp>(loc, v, shape.index()));
+ } else {
+ dynamicDims.push_back(
+ b.createOrFold<tensor::DimOp>(loc, v, shape.index()));
+ }
}
}
return dynamicDims;
diff --git a/iree/compiler/Codegen/Common/OptimizeVectorTransferPass.cpp b/iree/compiler/Codegen/Common/OptimizeVectorTransferPass.cpp
index 02a43d4..c958752 100644
--- a/iree/compiler/Codegen/Common/OptimizeVectorTransferPass.cpp
+++ b/iree/compiler/Codegen/Common/OptimizeVectorTransferPass.cpp
@@ -93,7 +93,7 @@
// Generate vector.shape_cast for dropping leading one dimensions in vector
// ops. This increases the chance that we can forward more transfer writes
// to transfer reads.
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
mlir::vector::populateVectorTransferDropUnitDimsPatterns(patterns);
mlir::vector::populateFlattenVectorTransferPatterns(patterns);
mlir::vector::populateCastAwayVectorLeadingOneDimPatterns(patterns);
diff --git a/iree/compiler/Codegen/Common/PolynomialApproximationPass.cpp b/iree/compiler/Codegen/Common/PolynomialApproximationPass.cpp
new file mode 100644
index 0000000..088afa2
--- /dev/null
+++ b/iree/compiler/Codegen/Common/PolynomialApproximationPass.cpp
@@ -0,0 +1,50 @@
+// Copyright 2022 The IREE Authors
+//
+// Licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#include "iree/compiler/Codegen/PassDetail.h"
+#include "iree/compiler/Codegen/Passes.h"
+#include "mlir/Dialect/Math/Transforms/Approximation.h"
+#include "mlir/Dialect/Math/Transforms/Passes.h"
+#include "mlir/Pass/Pass.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+
+namespace mlir {
+namespace iree_compiler {
+
+/// Command line to use native hardware operations instead of polynomial
+/// approximation.
+static llvm::cl::opt<bool> clNativeMathPrecision(
+ "iree-codegen-gpu-native-math-precision",
+ llvm::cl::desc(
+ "Skip polynomial lowering for math op natively available on GPU"),
+ llvm::cl::init(false));
+
+namespace {
+
+/// math dialect elementry functions -> polynomial form.
+class PolynomialApproximationPass
+ : public PolynomialApproximationPassBase<PolynomialApproximationPass> {
+ void runOnOperation() override {
+ RewritePatternSet mathPatterns(&getContext());
+ if (clNativeMathPrecision) {
+ mathPatterns.add<math::ErfPolynomialApproximation>(&getContext());
+ } else {
+ populateMathPolynomialApproximationPatterns(mathPatterns);
+ }
+ if (failed(applyPatternsAndFoldGreedily(getOperation(),
+ std::move(mathPatterns)))) {
+ return signalPassFailure();
+ }
+ }
+};
+
+} // namespace
+
+std::unique_ptr<OperationPass<>> createPolynomialApproximationPass() {
+ return std::make_unique<PolynomialApproximationPass>();
+}
+
+} // namespace iree_compiler
+} // namespace mlir
diff --git a/iree/compiler/Codegen/Common/RemoveTrivialLoops.cpp b/iree/compiler/Codegen/Common/RemoveTrivialLoops.cpp
index d078fe4..9fe746b 100644
--- a/iree/compiler/Codegen/Common/RemoveTrivialLoops.cpp
+++ b/iree/compiler/Codegen/Common/RemoveTrivialLoops.cpp
@@ -139,7 +139,7 @@
return getWorkgroupRange(processorValue, dims, symbols, numWorkgroups,
workgroupSize);
};
- OwningRewritePatternList patterns(funcOp.getContext());
+ RewritePatternSet patterns(funcOp.getContext());
populateRemoveSingleIterationLoopPattern(patterns, getWorkgroupRangeFn);
return applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}
diff --git a/iree/compiler/Codegen/Common/SetNumWorkgroupsPass.cpp b/iree/compiler/Codegen/Common/SetNumWorkgroupsPass.cpp
index a1a5507..269e639 100644
--- a/iree/compiler/Codegen/Common/SetNumWorkgroupsPass.cpp
+++ b/iree/compiler/Codegen/Common/SetNumWorkgroupsPass.cpp
@@ -99,7 +99,7 @@
if (!currWorkloadPerWorkgroup.empty()) {
// Fold hal.workgroup.size ops.
- OwningRewritePatternList patterns(funcOp.getContext());
+ RewritePatternSet patterns(funcOp.getContext());
patterns.insert<SetWorkgroupSizePattern>(funcOp.getContext(),
currWorkloadPerWorkgroup);
if (failed(applyPatternsAndFoldGreedily(funcOp, std::move(patterns)))) {
@@ -145,7 +145,7 @@
}
// Apply post distribution canonicalization passes.
- OwningRewritePatternList canonicalization(context);
+ RewritePatternSet canonicalization(context);
AffineMinOp::getCanonicalizationPatterns(canonicalization, context);
populateAffineMinSCFCanonicalizationPattern(canonicalization);
IREE::Flow::populateFlowDispatchCanonicalizationPatterns(canonicalization,
diff --git a/iree/compiler/Codegen/Common/TypePropagationPass.cpp b/iree/compiler/Codegen/Common/TypePropagationPass.cpp
new file mode 100644
index 0000000..851e64c
--- /dev/null
+++ b/iree/compiler/Codegen/Common/TypePropagationPass.cpp
@@ -0,0 +1,387 @@
+// Copyright 2021 The IREE Authors
+//
+// Licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//===- TypePropagationPass.cpp -------------------------------------------===//
+//
+// The dispatch regions passed to the backends legalizes the bitwidth of
+// element types used for the input/output buffers. To avoid illegal load/stores
+// within the dispatch, the type needs to be propagated to avoid having tensors
+// of illegal bitwidths.
+//
+// This pass uses the dialect conversion framework to propagate the types,
+// - All ops are marked dynamically illegal if their operands/result uses
+// unsupported element type.
+// - A generic pattern is added to legalize all such ops that triggers on every
+// operation.
+// - For operations with illegal result types, it creates a new
+// operations with legalized return types.
+// - This pattern uses the generic operation creation methods to be
+// op-agnostic.
+// - For ops that need specifc handling, patterns are added with higher benefit,
+// so that they trigger first during legalization.
+//
+//===---------------------------------------------------------------------===//
+
+#include "iree/compiler/Codegen/PassDetail.h"
+#include "iree/compiler/Codegen/Passes.h"
+#include "iree/compiler/Dialect/Util/IR/UtilTypes.h"
+#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
+#include "mlir/Dialect/Linalg/IR/Linalg.h"
+#include "mlir/Dialect/Tensor/IR/Tensor.h"
+#include "mlir/IR/BuiltinOps.h"
+#include "mlir/IR/BuiltinTypes.h"
+#include "mlir/Transforms/DialectConversion.h"
+
+namespace mlir {
+namespace iree_compiler {
+
+/// Returns the legal element type to use instead of the passed in element type.
+/// If the type is already legal, returns llvm::None.
+static Optional<Type> getLegalizedElementType(Type elementType) {
+ if (auto intType = elementType.dyn_cast<IntegerType>()) {
+ unsigned bitWidth = intType.getWidth();
+ unsigned byteAlignedBitWidth =
+ IREE::Util::getRoundedElementByteWidth(intType) * 8;
+ if (byteAlignedBitWidth == bitWidth) return elementType;
+ return IntegerType::get(elementType.getContext(), byteAlignedBitWidth);
+ }
+ return elementType;
+}
+
+/// Insert instructions to convert from one element type to another.
+static Value convertElementType(OpBuilder &b, Location loc, Type targetType,
+ Value source) {
+ Type sourceType = source.getType();
+ if (sourceType == targetType) return source;
+ if (sourceType.isa<IntegerType>() && targetType.isa<IntegerType>()) {
+ unsigned sourceBitWidth = sourceType.getIntOrFloatBitWidth();
+ unsigned destBitWidth = targetType.getIntOrFloatBitWidth();
+ if (sourceBitWidth > destBitWidth) {
+ return b.create<arith::TruncIOp>(loc, targetType, source);
+ } else {
+ return b.create<arith::ExtUIOp>(loc, targetType, source);
+ }
+ }
+ return nullptr;
+}
+
+/// Legalizes the given type. If the type is already legal, returns llvm::None.
+static Optional<Type> getLegalizedType(Type t) {
+ if (auto shapedType = t.dyn_cast<RankedTensorType>()) {
+ Type elementType = shapedType.getElementType();
+ Optional<Type> legalizedElementType = getLegalizedElementType(elementType);
+ if (!legalizedElementType) return llvm::None;
+ return RankedTensorType::get(shapedType.getShape(),
+ legalizedElementType.getValue());
+ }
+ return llvm::None;
+}
+
+namespace {
+
+/// Type converter to use for type propagation.
+struct TypePropagationTypeConverter : public TypeConverter {
+ TypePropagationTypeConverter() {
+ addConversion([](Type t) {
+ auto convertedType = getLegalizedType(t);
+ if (!convertedType) return t;
+ return convertedType.getValue();
+ });
+ }
+};
+
+/// Base class for patterns that handle individual operations.
+template <typename T>
+struct TypePropagationPattern : public OpConversionPattern<T> {
+ TypePropagationPattern(TypePropagationTypeConverter &typeConverter,
+ MLIRContext *context)
+ : OpConversionPattern<T>(typeConverter, context, 100) {}
+};
+
+/// Propagates the type for `linalg.generic` operation.
+/// - Convert operands whose type has changed.
+/// - Convert corresponding basic block argument type and introduce element
+/// conversion ops to get back the original type.
+/// - Convert the result type if the `outs` operand has changed.
+struct GenericOpTypePropagation
+ : public TypePropagationPattern<linalg::GenericOp> {
+ using TypePropagationPattern<linalg::GenericOp>::TypePropagationPattern;
+
+ LogicalResult matchAndRewrite(
+ linalg::GenericOp genericOp, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
+ llvm::SmallSetVector<unsigned, 8> modifiedOperandIndex;
+ SmallVector<Type> resultTypes;
+
+ // 1. Check if any of the operands needs to be legalized.
+ for (auto operand : llvm::enumerate(genericOp->getOpOperands())) {
+ Type operandType = operand.value().get().getType();
+ Type legalizedType = this->getTypeConverter()->convertType(operandType);
+ if (operandType != legalizedType) {
+ modifiedOperandIndex.insert(operand.index());
+ }
+ // If the operand is an `outs` tensor, its type needs to be changed.
+ if (genericOp.isOutputTensor(&operand.value())) {
+ resultTypes.push_back(legalizedType);
+ }
+ }
+
+ // 2. If there are no operands modified, just return failure.
+ if (modifiedOperandIndex.empty()) {
+ return rewriter.notifyMatchFailure(genericOp, "all types legal");
+ }
+
+ // 3. Create a clone of the operation without cloning its regions.
+ auto linalgOp = cast<linalg::LinalgOp>(genericOp.getOperation());
+ auto modifiedOp = cast<linalg::LinalgOp>(linalgOp.cloneWithoutRegions(
+ rewriter, genericOp.getLoc(), resultTypes, adaptor.getOperands()));
+
+ if (genericOp->getNumRegions() != 1) {
+ return genericOp.emitOpError("unhanled linalg op with numRegions != 1");
+ }
+
+ // 4. Inline the region from the original operation into the new
+ // operation.
+ rewriter.inlineRegionBefore(genericOp->getRegions().front(),
+ modifiedOp->getRegions().front(),
+ modifiedOp->getRegions().front().begin());
+ Region &modifiedOpRegion = modifiedOp->getRegions().front();
+
+ // 5. Convert the signature of the region to use the corresponding element
+ // type.
+ TypeConverter::SignatureConversion signatureConverter(
+ modifiedOpRegion.getNumArguments());
+ for (auto arg : llvm::enumerate(modifiedOpRegion.getArguments())) {
+ Type argType = arg.value().getType();
+ if (!modifiedOperandIndex.count(arg.index())) {
+ signatureConverter.addInputs(arg.index(), argType);
+ continue;
+ }
+ Optional<Type> legalizedArgType = getLegalizedElementType(argType);
+ if (!legalizedArgType) {
+ return genericOp.emitOpError("failed to get legalized type for arg ")
+ << arg.index();
+ }
+ signatureConverter.addInputs(arg.index(), legalizedArgType.getValue());
+ }
+ rewriter.applySignatureConversion(&modifiedOpRegion, signatureConverter);
+
+ // 6. Introduce scalar conversion operations to convert back to the
+ // original scalar type.
+ {
+ OpBuilder::InsertionGuard g(rewriter);
+ Block *entryBlock = modifiedOp.getBlock();
+ for (auto modifiedOperandIndex : modifiedOperandIndex) {
+ OpOperand *modifiedOpOperand =
+ &modifiedOp->getOpOperand(modifiedOperandIndex);
+ BlockArgument source =
+ modifiedOp.getTiedBlockArgument(modifiedOpOperand);
+ Type destType = getElementTypeOrSelf(
+ genericOp.getOperand(modifiedOperandIndex).getType());
+
+ // 6a. If the value of the argument is used the argument is in the
+ // legalized type. Convert it to a value that is in the original
+ // element type for replacement of all uses in the block.
+ rewriter.setInsertionPointToStart(entryBlock);
+ Value replacement =
+ convertElementType(rewriter, source.getLoc(), destType, source);
+ rewriter.replaceUsesOfBlockArgument(source, replacement);
+ }
+
+ // 6b. If any of the operands modified were outputs, the yield values
+ // need to be modified as well.
+ Operation *yieldOp = entryBlock->getTerminator();
+ rewriter.setInsertionPoint(yieldOp);
+ bool modifyYield = false;
+ SmallVector<Value> yieldOperands(yieldOp->operand_begin(),
+ yieldOp->operand_end());
+ for (auto modifiedOperandIndex : modifiedOperandIndex) {
+ OpOperand *modifiedOpOperand =
+ &modifiedOp->getOpOperand(modifiedOperandIndex);
+ if (modifiedOp.isOutputTensor(modifiedOpOperand)) {
+ modifyYield = true;
+ OpOperand *yieldOperand =
+ modifiedOp.getTiedYieldValue(modifiedOpOperand);
+ Optional<Type> legalizedType =
+ getLegalizedElementType(yieldOperand->get().getType());
+ if (!legalizedType) {
+ return genericOp.emitOpError(
+ "failed to get legalized type for yield value");
+ }
+ yieldOperands[yieldOperand->getOperandNumber()] =
+ convertElementType(rewriter, yieldOp->getLoc(),
+ legalizedType.getValue(), yieldOperand->get());
+ }
+ }
+ if (modifyYield) {
+ rewriter.replaceOpWithNewOp<linalg::YieldOp>(yieldOp, yieldOperands);
+ }
+ }
+
+ rewriter.replaceOp(genericOp, modifiedOp->getResults());
+ return success();
+ }
+};
+
+/// Legalizes `linalg.fill` operation.
+struct LinalgFillTypePropagation
+ : public TypePropagationPattern<linalg::FillOp> {
+ using TypePropagationPattern<linalg::FillOp>::TypePropagationPattern;
+
+ LogicalResult matchAndRewrite(
+ linalg::FillOp fillOp, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
+ auto outputType = fillOp.output().getType();
+ auto legalizedOutputType = this->typeConverter->convertType(outputType);
+ if (outputType == legalizedOutputType) {
+ return rewriter.notifyMatchFailure(fillOp, "op already legal");
+ }
+ Value value = adaptor.value();
+ Optional<Type> legalizedElementType =
+ getLegalizedElementType(value.getType());
+ if (!legalizedElementType) {
+ return fillOp.emitOpError("failed to get legalized type for value");
+ }
+ Value legalizedValue = convertElementType(
+ rewriter, fillOp->getLoc(), legalizedElementType.getValue(), value);
+ rewriter.replaceOpWithNewOp<linalg::FillOp>(fillOp, legalizedValue,
+ adaptor.output());
+ return success();
+ }
+};
+
+/// Simple rewrite pattern that just forwards the source as the result if the
+/// result type is not legal (but source type is)
+template <typename OpTy>
+struct ForwardSourceType : public TypePropagationPattern<OpTy> {
+ using TypePropagationPattern<OpTy>::TypePropagationPattern;
+
+ LogicalResult matchAndRewrite(
+ OpTy op, typename OpTy::Adaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
+ if (op->getNumResults() != 1 || adaptor.getOperands().size() != 1) {
+ return rewriter.notifyMatchFailure(
+ op, "unhandled op with multiple operands/results");
+ }
+ Type outputType = op->getResult(0).getType();
+ Type legalizedOutputType = this->typeConverter->convertType(outputType);
+ Value input = adaptor.getOperands()[0];
+ Value originalInput = op->getOperand(0);
+ if (outputType == legalizedOutputType &&
+ input.getType() == originalInput.getType()) {
+ return rewriter.notifyMatchFailure(op, "op is legal");
+ }
+ rewriter.replaceOp(op, input);
+ return success();
+ }
+};
+
+/// Rewrite pattern to replace the element type (if it is not legal) with the
+/// legal element type.
+struct LegalizeResultElementType : public ConversionPattern {
+ LegalizeResultElementType(TypePropagationTypeConverter &typeConverter,
+ MLIRContext *context)
+ : ConversionPattern(typeConverter, MatchAnyOpTypeTag(), /*benefit=*/1,
+ context) {}
+
+ LogicalResult matchAndRewrite(
+ Operation *op, ArrayRef<Value> convertedOperands,
+ ConversionPatternRewriter &rewriter) const final {
+ if (op->getNumSuccessors()) {
+ return rewriter.notifyMatchFailure(op, "unhandled ops with successors");
+ }
+ Location loc = op->getLoc();
+ bool illegalOp = llvm::any_of(
+ llvm::zip(op->getOperands(), convertedOperands),
+ [](std::tuple<Value, Value> tuple) {
+ return std::get<0>(tuple).getType() != std::get<1>(tuple).getType();
+ });
+ SmallVector<Type> resultTypes;
+ for (Type resultType : op->getResultTypes()) {
+ Type legalizedType = this->typeConverter->convertType(resultType);
+ resultTypes.push_back(legalizedType);
+ illegalOp |= legalizedType != resultType;
+ }
+ if (!illegalOp) {
+ return rewriter.notifyMatchFailure(op, "op is already legal");
+ }
+ OperationState state(loc, op->getName(), convertedOperands, resultTypes,
+ op->getAttrs());
+ for (unsigned i = 0, e = op->getNumRegions(); i != e; ++i) {
+ state.addRegion();
+ }
+ Operation *newOp = rewriter.createOperation(state);
+
+ // Move all the regions from the old op to the new op and legalize its
+ // signature.
+ for (auto ®ion : llvm::enumerate(op->getRegions())) {
+ Region &newOpRegion = newOp->getRegion(region.index());
+ rewriter.inlineRegionBefore(region.value(), newOpRegion,
+ newOpRegion.begin());
+ TypeConverter::SignatureConversion signatureConverter(
+ newOpRegion.getNumArguments());
+ bool doSignatureConversion = false;
+ for (auto arg : llvm::enumerate(newOpRegion.getArguments())) {
+ Type argType = arg.value().getType();
+ Type legalizedType = this->typeConverter->convertType(argType);
+ signatureConverter.addInputs(arg.index(), legalizedType);
+ doSignatureConversion |= argType != legalizedType;
+ }
+ if (doSignatureConversion) {
+ rewriter.applySignatureConversion(&newOpRegion, signatureConverter);
+ }
+ }
+ rewriter.replaceOp(op, newOp->getResults());
+ return success();
+ }
+};
+
+struct TypePropagationPass : public TypePropagationBase<TypePropagationPass> {
+ TypePropagationPass() = default;
+ void getDependentDialects(DialectRegistry ®istry) const override {
+ registry.insert<arith::ArithmeticDialect>();
+ }
+ void runOnOperation() override {
+ MLIRContext *context = &getContext();
+ RewritePatternSet patterns(context);
+
+ TypePropagationTypeConverter typeConverter;
+ patterns
+ .insert<ForwardSourceType<arith::ExtUIOp>,
+ ForwardSourceType<arith::TruncIOp>, GenericOpTypePropagation,
+ LinalgFillTypePropagation, LegalizeResultElementType>(
+ typeConverter, context);
+
+ ConversionTarget target(*context);
+ target.markUnknownOpDynamicallyLegal([&](Operation *op) {
+ for (auto operand : op->getOperands()) {
+ Type operandType = operand.getType();
+ if (operandType != typeConverter.convertType(operandType)) {
+ return false;
+ }
+ }
+ for (auto result : op->getResults()) {
+ Type resultType = result.getType();
+ if (resultType != typeConverter.convertType(resultType)) {
+ return false;
+ }
+ }
+ return true;
+ });
+
+ if (failed(applyPartialConversion(getOperation(), target,
+ std::move(patterns)))) {
+ signalPassFailure();
+ }
+ }
+};
+} // namespace
+
+std::unique_ptr<OperationPass<FuncOp>> createTypePropagationPass() {
+ return std::make_unique<TypePropagationPass>();
+}
+
+} // namespace iree_compiler
+} // namespace mlir
diff --git a/iree/compiler/Codegen/Common/VectorizeConv.cpp b/iree/compiler/Codegen/Common/VectorizeConv.cpp
index 139cd1d..cb2c10e 100644
--- a/iree/compiler/Codegen/Common/VectorizeConv.cpp
+++ b/iree/compiler/Codegen/Common/VectorizeConv.cpp
@@ -365,7 +365,7 @@
void runOnOperation() override {
MLIRContext *context = &getContext();
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
patterns.insert<VectorizeLinalgConv, VectorizeLinalgDepthwiseConv>(context);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
@@ -376,8 +376,8 @@
} // namespace
-void populateLinalgToVectorVectorizeConvPatterns(
- MLIRContext *context, OwningRewritePatternList &patterns) {
+void populateLinalgToVectorVectorizeConvPatterns(MLIRContext *context,
+ RewritePatternSet &patterns) {
patterns.insert<VectorizeLinalgConv, VectorizeLinalgDepthwiseConv>(context);
}
diff --git a/iree/compiler/Codegen/Common/VectorizeMMT4d.cpp b/iree/compiler/Codegen/Common/VectorizeMMT4d.cpp
index f0c549d..715310a 100644
--- a/iree/compiler/Codegen/Common/VectorizeMMT4d.cpp
+++ b/iree/compiler/Codegen/Common/VectorizeMMT4d.cpp
@@ -147,7 +147,7 @@
void runOnOperation() override {
MLIRContext *context = &getContext();
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
patterns.insert<VectorizeMMT4DOp>(context);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
@@ -158,8 +158,8 @@
} // namespace
-void populateLinalgToVectorVectorizeMMT4dPatterns(
- MLIRContext *context, OwningRewritePatternList &patterns) {
+void populateLinalgToVectorVectorizeMMT4dPatterns(MLIRContext *context,
+ RewritePatternSet &patterns) {
patterns.insert<VectorizeMMT4DOp>(context);
}
diff --git a/iree/compiler/Codegen/Common/test/BUILD b/iree/compiler/Codegen/Common/test/BUILD
index 3c47ed9..5e6e422 100644
--- a/iree/compiler/Codegen/Common/test/BUILD
+++ b/iree/compiler/Codegen/Common/test/BUILD
@@ -33,6 +33,7 @@
"remove_dead_allocs.mlir",
"remove_trivial_loops.mlir",
"transpose_canonicalization.mlir",
+ "type_propagation.mlir",
"vectorize_linalg_conv.mlir",
"vectorize_linalg_mmt4d.mlir",
],
diff --git a/iree/compiler/Codegen/Common/test/CMakeLists.txt b/iree/compiler/Codegen/Common/test/CMakeLists.txt
index dd6ad5b..e8b8100 100644
--- a/iree/compiler/Codegen/Common/test/CMakeLists.txt
+++ b/iree/compiler/Codegen/Common/test/CMakeLists.txt
@@ -28,6 +28,7 @@
"remove_dead_allocs.mlir"
"remove_trivial_loops.mlir"
"transpose_canonicalization.mlir"
+ "type_propagation.mlir"
"vectorize_linalg_conv.mlir"
"vectorize_linalg_mmt4d.mlir"
TOOLS
diff --git a/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir b/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir
index d74b33e..773c778 100644
--- a/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir
+++ b/iree/compiler/Codegen/Common/test/linalg_bufferize.mlir
@@ -297,6 +297,7 @@
#map = affine_map<(d0, d1) -> (d0, d1)>
func @tile_from_pointwise_outs_inplace() {
+ %f1 = arith.constant 1.0 : f32
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
@@ -319,7 +320,8 @@
%9 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]}
ins(%8 : tensor<1x1xf32>) outs(%shape : tensor<1x1xf32>) {
^bb0(%arg2: f32, %s: f32): // no predecessors
- linalg.yield %arg2 : f32
+ %add = arith.addf %arg2, %f1 : f32
+ linalg.yield %add : f32
} -> tensor<1x1xf32>
%10 = linalg.matmul ins(%6, %7 : tensor<1x3xf32>, tensor<3x1xf32>) outs(%9 : tensor<1x1xf32>) -> tensor<1x1xf32>
flow.dispatch.tensor.store %10, %2, offsets = [%arg0, %arg1], sizes = [%c1, %c1], strides = [%c1, %c1] : tensor<1x1xf32> -> !flow.dispatch.tensor<readwrite:?x?xf32>{%M, %N}
@@ -2587,3 +2589,53 @@
return
}
// CHECK: func @forward_dispatch_3()
+
+// -----
+
+func @dot_general_nontrivial_batching_mutliple_parallel_dimension() {
+ %cst = arith.constant dense<0.000000e+00> : vector<1x4x2xf32>
+ %c1 = arith.constant 1 : index
+ %c6 = arith.constant 6 : index
+ %c2 = arith.constant 2 : index
+ %cst_0 = arith.constant 0.000000e+00 : f32
+ %c0 = arith.constant 0 : index
+ %c64 = arith.constant 64 : index
+ %0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(32) : !flow.dispatch.tensor<readonly:2x6x1xf32>
+ %1 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c64) alignment(32) : !flow.dispatch.tensor<readonly:2x1x2xf32>
+ %2 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) offset(%c0) alignment(32) : !flow.dispatch.tensor<writeonly:2x6x2xf32>
+ %workgroup_id_x = hal.interface.workgroup.id[0] : index
+ %workgroup_count_x = hal.interface.workgroup.count[0] : index
+ %workgroup_id_y = hal.interface.workgroup.id[1] : index
+ %workgroup_count_y = hal.interface.workgroup.count[1] : index
+ %workgroup_id_z = hal.interface.workgroup.id[2] : index
+ %workgroup_count_z = hal.interface.workgroup.count[2] : index
+ %3 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_id_y]
+ %4 = affine.apply affine_map<()[s0] -> (s0 * 4)>()[%workgroup_count_y]
+ %5 = affine.apply affine_map<()[s0] -> (s0 * 2)>()[%workgroup_id_x]
+ %6 = affine.apply affine_map<()[s0] -> (s0 * 2)>()[%workgroup_count_x]
+ scf.for %arg0 = %workgroup_id_z to %c2 step %workgroup_count_z {
+ scf.for %arg1 = %3 to %c6 step %4 {
+ %7 = affine.min affine_map<(d0) -> (4, -d0 + 6)>(%arg1)
+ %8 = flow.dispatch.tensor.load %0, offsets = [%arg0, %arg1, 0], sizes = [1, %7, 1], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:2x6x1xf32> -> tensor<1x?x1xf32>
+ %9 = tensor.extract_slice %8[0, 0, 0] [1, %7, 1] [1, 1, 1] : tensor<1x?x1xf32> to tensor<1x?x1xf32>
+ %10 = vector.transfer_read %9[%c0, %c0, %c0], %cst_0 {in_bounds = [true, false, true]} : tensor<1x?x1xf32>, vector<1x4x1xf32>
+ scf.for %arg2 = %5 to %c2 step %6 {
+ %11 = flow.dispatch.tensor.load %2, offsets = [%arg0, %arg1, %arg2], sizes = [1, %7, 2], strides = [1, 1, 1] : !flow.dispatch.tensor<writeonly:2x6x2xf32> -> tensor<1x?x2xf32>
+ %12 = flow.dispatch.tensor.load %1, offsets = [%arg0, 0, %arg2], sizes = [1, 1, 2], strides = [1, 1, 1] : !flow.dispatch.tensor<readonly:2x1x2xf32> -> tensor<1x1x2xf32>
+ %13 = tensor.extract_slice %11[0, 0, 0] [1, %7, 2] [1, 1, 1] : tensor<1x?x2xf32> to tensor<1x?x2xf32>
+ %14 = vector.transfer_write %cst, %13[%c0, %c0, %c0] {in_bounds = [true, false, true]} : vector<1x4x2xf32>, tensor<1x?x2xf32>
+ %15 = tensor.extract_slice %14[0, 0, 0] [1, %7, 2] [1, 1, 1] : tensor<1x?x2xf32> to tensor<1x?x2xf32>
+ %16 = vector.transfer_read %12[%c0, %c0, %c0], %cst_0 {in_bounds = [true, true, true]} : tensor<1x1x2xf32>, vector<1x1x2xf32>
+ %17 = vector.transfer_read %15[%c0, %c0, %c0], %cst_0 {in_bounds = [true, false, true]} : tensor<1x?x2xf32>, vector<1x4x2xf32>
+ %18 = vector.contract {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel", "reduction"], kind = #vector.kind<add>} %10, %16, %17 : vector<1x4x1xf32>, vector<1x1x2xf32> into vector<1x4x2xf32>
+ %19 = vector.transfer_write %18, %15[%c0, %c0, %c0] {in_bounds = [true, false, true]} : vector<1x4x2xf32>, tensor<1x?x2xf32>
+ %20 = tensor.insert_slice %19 into %14[0, 0, 0] [1, %7, 2] [1, 1, 1] : tensor<1x?x2xf32> into tensor<1x?x2xf32>
+ %21 = tensor.insert_slice %20 into %11[0, 0, 0] [1, %7, 2] [1, 1, 1] : tensor<1x?x2xf32> into tensor<1x?x2xf32>
+ flow.dispatch.tensor.store %21, %2, offsets = [%arg0, %arg1, %arg2], sizes = [%c1, %7, %c2], strides = [1, 1, 1] : tensor<1x?x2xf32> -> !flow.dispatch.tensor<writeonly:2x6x2xf32>
+ }
+ }
+ }
+ return
+}
+// CHECK-LABEL: func @dot_general_nontrivial_batching_mutliple_parallel_dimension()
+// CHECK-NOT: memref.alloc
diff --git a/iree/compiler/Codegen/Common/test/type_propagation.mlir b/iree/compiler/Codegen/Common/test/type_propagation.mlir
new file mode 100644
index 0000000..75ea3d6
--- /dev/null
+++ b/iree/compiler/Codegen/Common/test/type_propagation.mlir
@@ -0,0 +1,239 @@
+// RUN: iree-opt -iree-codegen-type-propagation -split-input-file %s | FileCheck %s
+
+func @generic_op_illegal_operand() {
+ %d = hal.interface.constant.load[0] : index
+ %0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : !flow.dispatch.tensor<readonly:?xi8>{%d}
+ %1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ %2 = flow.dispatch.tensor.load %0, offsets = [0], sizes=[%d], strides=[1] : !flow.dispatch.tensor<readonly:?xi8>{%d} -> tensor<?xi8>
+ %3 = arith.trunci %2 : tensor<?xi8> to tensor<?xi1>
+ %4 = linalg.init_tensor [%d] : tensor<?xi8>
+ %5 = linalg.generic {
+ indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>],
+ iterator_types = ["parallel"]}
+ ins(%3 : tensor<?xi1>) outs(%4 : tensor<?xi8>) {
+ ^bb0(%arg0 : i1, %arg1 : i8):
+ %6 = arith.extui %arg0 : i1 to i8
+ linalg.yield %6 : i8
+ } -> tensor<?xi8>
+ flow.dispatch.tensor.store %5, %1, offsets = [0], sizes=[%d], strides=[1] : tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ return
+}
+// CHECK-LABEL: func @generic_op_illegal_operand()
+// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
+// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
+// CHECK-DAG: %[[INTENSOR:.+]] = flow.dispatch.tensor.load %[[IN]]
+// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor [%{{.+}}] : tensor<?xi8>
+// CHECK: %[[GENERIC:.+]] = linalg.generic
+// CHECK-SAME: ins(%[[INTENSOR]] : tensor<?xi8>)
+// CHECK-SAME: outs(%[[INIT]] : tensor<?xi8>)
+// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: i8, %[[ARG1:[a-zA-Z0-9]+]]: i8)
+// CHECK-DAG: %[[TRUNC:.+]] = arith.trunci %[[ARG0]] : i8 to i1
+// CHECK-DAG: %[[EXTUI:.+]] = arith.extui %[[TRUNC]] : i1 to i8
+// CHECK: linalg.yield %[[EXTUI]]
+// CHECK: flow.dispatch.tensor.store %[[GENERIC]], %[[OUT]]
+
+// -----
+
+func @generic_op_illegal_operand_i7() {
+ %d = hal.interface.constant.load[0] : index
+ %0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : !flow.dispatch.tensor<readonly:?xi8>{%d}
+ %1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ %2 = flow.dispatch.tensor.load %0, offsets = [0], sizes=[%d], strides=[1] : !flow.dispatch.tensor<readonly:?xi8>{%d} -> tensor<?xi8>
+ %3 = arith.trunci %2 : tensor<?xi8> to tensor<?xi7>
+ %4 = linalg.init_tensor [%d] : tensor<?xi8>
+ %5 = linalg.generic {
+ indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>],
+ iterator_types = ["parallel"]}
+ ins(%3 : tensor<?xi7>) outs(%4 : tensor<?xi8>) {
+ ^bb0(%arg0 : i7, %arg1 : i8):
+ %6 = arith.extui %arg0 : i7 to i8
+ linalg.yield %6 : i8
+ } -> tensor<?xi8>
+ flow.dispatch.tensor.store %5, %1, offsets = [0], sizes=[%d], strides=[1] : tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ return
+}
+// CHECK-LABEL: func @generic_op_illegal_operand_i7()
+// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
+// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
+// CHECK-DAG: %[[INTENSOR:.+]] = flow.dispatch.tensor.load %[[IN]]
+// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor [%{{.+}}] : tensor<?xi8>
+// CHECK: %[[GENERIC:.+]] = linalg.generic
+// CHECK-SAME: ins(%[[INTENSOR]] : tensor<?xi8>)
+// CHECK-SAME: outs(%[[INIT]] : tensor<?xi8>)
+// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: i8, %[[ARG1:[a-zA-Z0-9]+]]: i8)
+// CHECK-DAG: %[[TRUNC:.+]] = arith.trunci %[[ARG0]] : i8 to i7
+// CHECK-DAG: %[[EXTUI:.+]] = arith.extui %[[TRUNC]] : i7 to i8
+// CHECK: linalg.yield %[[EXTUI]]
+// CHECK: flow.dispatch.tensor.store %[[GENERIC]], %[[OUT]]
+
+// -----
+
+func @generic_op_illegal_operand_i33() {
+ %d = hal.interface.constant.load[0] : index
+ %0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : !flow.dispatch.tensor<readonly:?xi64>{%d}
+ %1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : !flow.dispatch.tensor<writeonly:?xi64>{%d}
+ %2 = flow.dispatch.tensor.load %0, offsets = [0], sizes=[%d], strides=[1] : !flow.dispatch.tensor<readonly:?xi64>{%d} -> tensor<?xi64>
+ %3 = arith.trunci %2 : tensor<?xi64> to tensor<?xi33>
+ %4 = linalg.init_tensor [%d] : tensor<?xi64>
+ %5 = linalg.generic {
+ indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>],
+ iterator_types = ["parallel"]}
+ ins(%3 : tensor<?xi33>) outs(%4 : tensor<?xi64>) {
+ ^bb0(%arg0 : i33, %arg1 : i64):
+ %6 = arith.extui %arg0 : i33 to i64
+ linalg.yield %6 : i64
+ } -> tensor<?xi64>
+ flow.dispatch.tensor.store %5, %1, offsets = [0], sizes=[%d], strides=[1] : tensor<?xi64> -> !flow.dispatch.tensor<writeonly:?xi64>{%d}
+ return
+}
+// CHECK-LABEL: func @generic_op_illegal_operand_i33()
+// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
+// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
+// CHECK-DAG: %[[INTENSOR:.+]] = flow.dispatch.tensor.load %[[IN]]
+// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor [%{{.+}}] : tensor<?xi64>
+// CHECK: %[[GENERIC:.+]] = linalg.generic
+// CHECK-SAME: ins(%[[INTENSOR]] : tensor<?xi64>)
+// CHECK-SAME: outs(%[[INIT]] : tensor<?xi64>)
+// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: i64, %[[ARG1:[a-zA-Z0-9]+]]: i64)
+// CHECK-DAG: %[[TRUNC:.+]] = arith.trunci %[[ARG0]] : i64 to i33
+// CHECK-DAG: %[[EXTUI:.+]] = arith.extui %[[TRUNC]] : i33 to i64
+// CHECK: linalg.yield %[[EXTUI]]
+// CHECK: flow.dispatch.tensor.store %[[GENERIC]], %[[OUT]]
+
+
+// -----
+
+func @generic_op_illegal_result() {
+ %d = hal.interface.constant.load[0] : index
+ %0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : !flow.dispatch.tensor<readonly:?xi8>{%d}
+ %1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ %2 = flow.dispatch.tensor.load %0, offsets = [0], sizes=[%d], strides=[1] : !flow.dispatch.tensor<readonly:?xi8>{%d} -> tensor<?xi8>
+ %3 = linalg.init_tensor [%d] : tensor<?xi1>
+ %4 = linalg.generic {
+ indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>],
+ iterator_types = ["parallel"]}
+ ins(%2 : tensor<?xi8>) outs(%3 : tensor<?xi1>) {
+ ^bb0(%arg0 : i8, %arg1 : i1):
+ %5 = arith.trunci %arg0 : i8 to i1
+ linalg.yield %5 : i1
+ } -> tensor<?xi1>
+ %5 = arith.extui %4 : tensor<?xi1> to tensor<?xi8>
+ flow.dispatch.tensor.store %5, %1, offsets = [0], sizes=[%d], strides=[1] : tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ return
+}
+// CHECK-LABEL: func @generic_op_illegal_result()
+// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
+// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
+// CHECK-DAG: %[[INTENSOR:.+]] = flow.dispatch.tensor.load %[[IN]]
+// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor [%{{.+}}] : tensor<?xi8>
+// CHECK: %[[GENERIC:.+]] = linalg.generic
+// CHECK-SAME: ins(%[[INTENSOR]] : tensor<?xi8>)
+// CHECK-SAME: outs(%[[INIT]] : tensor<?xi8>)
+// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: i8, %[[ARG1:[a-zA-Z0-9]+]]: i8)
+// CHECK-DAG: %[[TRUNC:.+]] = arith.trunci %[[ARG0]] : i8 to i1
+// CHECK-DAG: %[[EXTUI:.+]] = arith.extui %[[TRUNC]] : i1 to i8
+// CHECK: linalg.yield %[[EXTUI]]
+// CHECK: flow.dispatch.tensor.store %[[GENERIC]], %[[OUT]]
+
+// -----
+
+func @tensor_extract() {
+ %d = hal.interface.constant.load[0] : index
+ %offset = hal.interface.constant.load[1] : index
+ %size = hal.interface.constant.load[2] : index
+ %0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : !flow.dispatch.tensor<readonly:?xi8>{%d}
+ %1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ %2 = flow.dispatch.tensor.load %0, offsets = [0], sizes=[%d], strides=[1] : !flow.dispatch.tensor<readonly:?xi8>{%d} -> tensor<?xi8>
+ %3 = tensor.extract_slice %2[%offset] [%size] [1] : tensor<?xi8> to tensor<?xi8>
+ %4 = arith.trunci %3 : tensor<?xi8> to tensor<?xi1>
+ %5 = arith.extui %4 : tensor<?xi1> to tensor<?xi8>
+ flow.dispatch.tensor.store %5, %1, offsets = [%offset], sizes=[%size], strides=[1] : tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ return
+}
+// CHECK-LABEL: func @tensor_extract()
+// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
+// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
+// CHECK-DAG: %[[INTENSOR:.+]] = flow.dispatch.tensor.load %[[IN]]
+// CHECK: %[[EXTRACT:.+]] = tensor.extract_slice %[[INTENSOR]]
+// CHECK: flow.dispatch.tensor.store %[[EXTRACT]], %[[OUT]]
+
+// -----
+
+func @tensor_insert() {
+ %d = hal.interface.constant.load[0] : index
+ %offset = hal.interface.constant.load[1] : index
+ %size = hal.interface.constant.load[2] : index
+ %0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : !flow.dispatch.tensor<readonly:?xi8>{%d}
+ %1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : !flow.dispatch.tensor<readonly:?xi8>{%d}
+ %2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) : !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ %3 = flow.dispatch.tensor.load %0, offsets = [%offset], sizes=[%size], strides=[1] : !flow.dispatch.tensor<readonly:?xi8>{%d} -> tensor<?xi8>
+ %4 = flow.dispatch.tensor.load %1, offsets = [0], sizes=[%d], strides=[1] : !flow.dispatch.tensor<readonly:?xi8>{%d} -> tensor<?xi8>
+ %5 = arith.trunci %3 : tensor<?xi8> to tensor<?xi1>
+ %6 = arith.trunci %4 : tensor<?xi8> to tensor<?xi1>
+ %7 = tensor.insert_slice %5 into %6[%offset] [%size] [1] : tensor<?xi1> into tensor<?xi1>
+ %8 = arith.extui %7 : tensor<?xi1> to tensor<?xi8>
+ flow.dispatch.tensor.store %8, %2, offsets = [0], sizes=[%d], strides=[1] : tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ return
+}
+// CHECK-LABEL: func @tensor_insert()
+// CHECK-DAG: %[[IN1:.+]] = hal.interface.binding.subspan set(0) binding(0)
+// CHECK-DAG: %[[IN2:.+]] = hal.interface.binding.subspan set(0) binding(1)
+// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(2)
+// CHECK-DAG: %[[IN1TENSOR:.+]] = flow.dispatch.tensor.load %[[IN1]]
+// CHECK-DAG: %[[IN2TENSOR:.+]] = flow.dispatch.tensor.load %[[IN2]]
+// CHECK: %[[INSERT:.+]] = tensor.insert_slice %[[IN1TENSOR]] into %[[IN2TENSOR]]
+// CHECK: flow.dispatch.tensor.store %[[INSERT]], %[[OUT]]
+
+// -----
+
+func @for_loop() {
+ %d = hal.interface.constant.load[0] : index
+ %lb = hal.interface.constant.load[1] : index
+ %step = hal.interface.constant.load[2] : index
+ %0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : !flow.dispatch.tensor<readonly:?xi8>{%d}
+ %1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ %2 = flow.dispatch.tensor.load %0, offsets=[0], sizes=[%d], strides=[1] : !flow.dispatch.tensor<readonly:?xi8>{%d} -> tensor<?xi8>
+ %3 = flow.dispatch.tensor.load %1, offsets=[0], sizes=[%d], strides=[1] : !flow.dispatch.tensor<writeonly:?xi8>{%d} -> tensor<?xi8>
+ %4 = arith.trunci %2 : tensor<?xi8> to tensor<?xi1>
+ %5 = arith.trunci %3 : tensor<?xi8> to tensor<?xi1>
+ %c0 = arith.constant 0 : index
+ %6 = scf.for %arg0 = %c0 to %d step %step iter_args(%arg1 = %5) -> tensor<?xi1> {
+ %7 = tensor.extract_slice %4[%arg0][%step][1] : tensor<?xi1> to tensor<?xi1>
+ %8 = tensor.insert_slice %7 into %arg1[%arg0][%step][1] : tensor<?xi1> into tensor<?xi1>
+ scf.yield %8 : tensor<?xi1>
+ }
+ %8 = arith.extui %6 : tensor<?xi1> to tensor<?xi8>
+ flow.dispatch.tensor.store %8, %1, offsets=[0], sizes=[%d], strides=[1]: tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ return
+}
+// CHECK-LABEL: func @for_loop()
+// CHECK-DAG: %[[IN:.+]] = hal.interface.binding.subspan set(0) binding(0)
+// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(1)
+// CHECK-DAG: %[[INTENSOR:.+]] = flow.dispatch.tensor.load %[[IN]]
+// CHECK-DAG: %[[OUTTENSOR:.+]] = flow.dispatch.tensor.load %[[OUT]]
+// CHECK: %[[FOR:.+]] = scf.for
+// CHECK-SAME: iter_args(%[[ARG1:.+]] = %[[OUTTENSOR]])
+// CHECK: %[[SLICE:.+]] = tensor.extract_slice %[[INTENSOR]]
+// CHECK: %[[INSERT:.+]] = tensor.insert_slice %[[SLICE]] into %[[ARG1]]
+// CHECK: scf.yield %[[INSERT]]
+// CHECK: flow.dispatch.tensor.store %[[FOR]], %[[OUT]]
+
+// -----
+
+func @fill_op() {
+ %d = hal.interface.constant.load[0] : index
+ %0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ %1 = linalg.init_tensor [%d] : tensor<?xi1>
+ %false = arith.constant false
+ %2 = linalg.fill(%false, %1) : i1, tensor<?xi1> -> tensor<?xi1>
+ %3 = arith.extui %2 : tensor<?xi1> to tensor<?xi8>
+ flow.dispatch.tensor.store %3, %0, offsets=[0], sizes=[%d], strides=[1] : tensor<?xi8> -> !flow.dispatch.tensor<writeonly:?xi8>{%d}
+ return
+}
+// CHECK-LABEL: func @fill_op()
+// CHECK-DAG: %[[OUT:.+]] = hal.interface.binding.subspan set(0) binding(0)
+// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor
+// CHECK-DAG: %[[FALSE:.+]] = arith.constant false
+// CHECK-DAG: %[[EXT_SCALAR:.+]] = arith.extui %[[FALSE]]
+// CHECK: %[[FILL:.+]] = linalg.fill(%[[EXT_SCALAR]], %[[INIT]])
+// CHECK: flow.dispatch.tensor.store %[[FILL]], %[[OUT]]
diff --git a/iree/compiler/Codegen/Dialect/LoweringConfig.td b/iree/compiler/Codegen/Dialect/LoweringConfig.td
index 9c646c8..4609041 100644
--- a/iree/compiler/Codegen/Dialect/LoweringConfig.td
+++ b/iree/compiler/Codegen/Dialect/LoweringConfig.td
@@ -16,8 +16,6 @@
: StrEnumAttrCase<"CPUSingleTilingExpert">;
def CPU_DoubleTilingExpert
: StrEnumAttrCase<"CPUDoubleTilingExpert">;
-def CPU_TensorToVectors
- : StrEnumAttrCase<"CPUTensorToVectors">;
def CPU_TileFuseAndVectorize
: StrEnumAttrCase<"CPUTileFuseAndVectorize">;
@@ -48,10 +46,10 @@
"DispatchLoweringPassPipeline",
"identifier for pass pipeline use to lower dispatch region",
[CPU_Default, CPU_SingleTilingExpert, CPU_DoubleTilingExpert,
- CPU_TensorToVectors, CPU_TileFuseAndVectorize, LLVMGPU_SimpleDistribute,
- LLVMGPU_Vectorize, LLVMGPU_MatmulSimt, LLVMGPU_MatmulTensorCore,
- SPIRV_Distribute, SPIRV_DistributeCopy, SPIRV_Vectorize,
- SPIRV_VectorizeToCooperativeOps, None]> {
+ CPU_TileFuseAndVectorize, LLVMGPU_SimpleDistribute, LLVMGPU_Vectorize,
+ LLVMGPU_MatmulSimt, LLVMGPU_MatmulTensorCore, SPIRV_Distribute,
+ SPIRV_DistributeCopy, SPIRV_Vectorize,SPIRV_VectorizeToCooperativeOps,
+ None]> {
let cppNamespace = "::mlir::iree_compiler::IREE::Codegen";
}
diff --git a/iree/compiler/Codegen/Interfaces/BUILD b/iree/compiler/Codegen/Interfaces/BUILD
index 44ff531..1a1fa95 100644
--- a/iree/compiler/Codegen/Interfaces/BUILD
+++ b/iree/compiler/Codegen/Interfaces/BUILD
@@ -56,7 +56,7 @@
"//iree/compiler/Dialect/Flow/IR",
"//iree/compiler/Dialect/HAL/IR",
"@llvm-project//mlir:AffineBufferizableOpInterfaceImpl",
- "@llvm-project//mlir:ArithBufferizableOpInterfaceImpl",
+ "@llvm-project//mlir:ArithmeticTransforms",
"@llvm-project//mlir:BufferizationDialect",
"@llvm-project//mlir:BufferizationTransforms",
"@llvm-project//mlir:LinalgBufferizableOpInterfaceImpl",
diff --git a/iree/compiler/Codegen/Interfaces/BufferizationInterfaces.cpp b/iree/compiler/Codegen/Interfaces/BufferizationInterfaces.cpp
index 750c09b..8f71296 100644
--- a/iree/compiler/Codegen/Interfaces/BufferizationInterfaces.cpp
+++ b/iree/compiler/Codegen/Interfaces/BufferizationInterfaces.cpp
@@ -10,11 +10,11 @@
#include "iree/compiler/Dialect/Flow/IR/FlowOps.h"
#include "iree/compiler/Dialect/Flow/IR/FlowTypes.h"
#include "iree/compiler/Dialect/HAL/IR/HALOps.h"
+#include "mlir/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
#include "mlir/Dialect/Linalg/ComprehensiveBufferize/AffineInterfaceImpl.h"
-#include "mlir/Dialect/Linalg/ComprehensiveBufferize/ArithInterfaceImpl.h"
#include "mlir/Dialect/Linalg/ComprehensiveBufferize/LinalgInterfaceImpl.h"
#include "mlir/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.h"
#include "mlir/Dialect/Linalg/ComprehensiveBufferize/SCFInterfaceImpl.h"
@@ -281,8 +281,7 @@
void registerBufferizationInterfaces(DialectRegistry ®istry) {
linalg::comprehensive_bufferize::affine_ext::
registerBufferizableOpInterfaceExternalModels(registry);
- linalg::comprehensive_bufferize::arith_ext::
- registerBufferizableOpInterfaceExternalModels(registry);
+ arith::registerBufferizableOpInterfaceExternalModels(registry);
linalg::comprehensive_bufferize::linalg_ext::
registerBufferizableOpInterfaceExternalModels(registry);
linalg::comprehensive_bufferize::scf_ext::
diff --git a/iree/compiler/Codegen/Interfaces/CMakeLists.txt b/iree/compiler/Codegen/Interfaces/CMakeLists.txt
index 288584c..87d7d88 100644
--- a/iree/compiler/Codegen/Interfaces/CMakeLists.txt
+++ b/iree/compiler/Codegen/Interfaces/CMakeLists.txt
@@ -32,7 +32,7 @@
"BufferizationInterfaces.cpp"
DEPS
MLIRAffineBufferizableOpInterfaceImpl
- MLIRArithBufferizableOpInterfaceImpl
+ MLIRArithmeticTransforms
MLIRBufferization
MLIRBufferizationTransforms
MLIRLinalgBufferizableOpInterfaceImpl
diff --git a/iree/compiler/Codegen/LLVMCPU/BUILD b/iree/compiler/Codegen/LLVMCPU/BUILD
index ea350c1..4d61f31 100644
--- a/iree/compiler/Codegen/LLVMCPU/BUILD
+++ b/iree/compiler/Codegen/LLVMCPU/BUILD
@@ -18,7 +18,6 @@
"LLVMCPUCheckIRBeforeLLVMConversion.cpp",
"LLVMCPULowerExecutableTarget.cpp",
"LLVMCPUSynchronizeSymbolVisibility.cpp",
- "LLVMCPUTileAndVectorizeLinalgTensorOps.cpp",
"LLVMCPUTileFuseAndVectorizeLinalgTensorOps.cpp",
"LLVMCPUUnfuseFMAOps.cpp",
"Passes.cpp",
@@ -45,6 +44,8 @@
"@llvm-project//mlir:Analysis",
"@llvm-project//mlir:ArithmeticToLLVM",
"@llvm-project//mlir:ArithmeticTransforms",
+ "@llvm-project//mlir:ArmNeon",
+ "@llvm-project//mlir:ArmNeon2dToIntr",
"@llvm-project//mlir:CFGTransforms",
"@llvm-project//mlir:DialectUtils",
"@llvm-project//mlir:IR",
@@ -64,6 +65,7 @@
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:ReconcileUnrealizedCasts",
"@llvm-project//mlir:SCFDialect",
+ "@llvm-project//mlir:SCFTransforms",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:StandardOpsTransforms",
"@llvm-project//mlir:TensorDialect",
diff --git a/iree/compiler/Codegen/LLVMCPU/CMakeLists.txt b/iree/compiler/Codegen/LLVMCPU/CMakeLists.txt
index 63674ee..bc801a5 100644
--- a/iree/compiler/Codegen/LLVMCPU/CMakeLists.txt
+++ b/iree/compiler/Codegen/LLVMCPU/CMakeLists.txt
@@ -21,7 +21,6 @@
"LLVMCPUCheckIRBeforeLLVMConversion.cpp"
"LLVMCPULowerExecutableTarget.cpp"
"LLVMCPUSynchronizeSymbolVisibility.cpp"
- "LLVMCPUTileAndVectorizeLinalgTensorOps.cpp"
"LLVMCPUTileFuseAndVectorizeLinalgTensorOps.cpp"
"LLVMCPUUnfuseFMAOps.cpp"
"Passes.cpp"
@@ -34,6 +33,8 @@
MLIRAnalysis
MLIRArithmeticToLLVM
MLIRArithmeticTransforms
+ MLIRArmNeon
+ MLIRArmNeon2dToIntr
MLIRIR
MLIRLLVMCommonConversion
MLIRLLVMIR
@@ -50,6 +51,7 @@
MLIRReconcileUnrealizedCasts
MLIRSCF
MLIRSCFToStandard
+ MLIRSCFTransforms
MLIRStandard
MLIRStandardOpsTransforms
MLIRStandardToLLVM
diff --git a/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp b/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp
index 935fce4..1e752d2 100644
--- a/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp
+++ b/iree/compiler/Codegen/LLVMCPU/ConvertToLLVM.cpp
@@ -16,6 +16,7 @@
#include "mlir/Analysis/DataLayoutAnalysis.h"
#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
#include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h"
+#include "mlir/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.h"
#include "mlir/Conversion/LLVMCommon/ConversionTarget.h"
#include "mlir/Conversion/LLVMCommon/LoweringOptions.h"
#include "mlir/Conversion/LLVMCommon/Pattern.h"
@@ -30,6 +31,7 @@
#include "mlir/Conversion/TosaToStandard/TosaToStandard.h"
#include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
+#include "mlir/Dialect/ArmNeon/ArmNeonDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
#include "mlir/Dialect/Math/IR/Math.h"
@@ -615,7 +617,7 @@
ConvertToLLVMPass() = default;
ConvertToLLVMPass(const ConvertToLLVMPass &pass) {}
void getDependentDialects(DialectRegistry ®istry) const override {
- registry.insert<LLVM::LLVMDialect>();
+ registry.insert<LLVM::LLVMDialect, arm_neon::ArmNeonDialect>();
}
void runOnOperation() override;
@@ -666,20 +668,21 @@
// Run Vector -> Vector transformations ahead of conversion to LLVM.
{
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
vector::populateVectorToVectorCanonicalizationPatterns(patterns);
vector::populateVectorBroadcastLoweringPatterns(patterns);
vector::populateVectorContractLoweringPatterns(patterns);
vector::populateVectorMaskOpLoweringPatterns(patterns);
vector::populateVectorShapeCastLoweringPatterns(patterns);
vector::populateVectorTransposeLoweringPatterns(patterns);
+ populateConvertArmNeon2dToIntrPatterns(patterns);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
return signalPassFailure();
}
}
{
- OwningRewritePatternList vectorToLoopsPatterns(&getContext());
+ RewritePatternSet vectorToLoopsPatterns(&getContext());
populateVectorToSCFConversionPatterns(
vectorToLoopsPatterns, VectorTransferToSCFOptions().enableFullUnroll());
if (failed(applyPatternsAndFoldGreedily(
@@ -688,16 +691,6 @@
}
}
- // math dialect elementry functions -> polynomial form.
- {
- OwningRewritePatternList mathPatterns(&getContext());
- populateMathPolynomialApproximationPatterns(mathPatterns);
- if (failed(applyPatternsAndFoldGreedily(getOperation(),
- std::move(mathPatterns)))) {
- return signalPassFailure();
- }
- }
-
const auto &dataLayoutAnalysis = getAnalysis<DataLayoutAnalysis>();
LowerToLLVMOptions options(&getContext(),
dataLayoutAnalysis.getAtOrAbove(module));
@@ -705,7 +698,7 @@
options.overrideIndexBitwidth(options.dataLayout.getPointerSizeInBits());
LLVMTypeConverter converter(&getContext(), options, &dataLayoutAnalysis);
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
// Use the default 64-bit lowering for TOSA's ApplyScale operator:
// This lowering widens integer types to 64-bit an performs the non-fused
@@ -771,7 +764,7 @@
// Post conversion patterns.
{
- OwningRewritePatternList postPatterns(&getContext());
+ RewritePatternSet postPatterns(&getContext());
// TODO(ravishankarm): Move this to a separate pass.
llvm::Triple triple(targetTripleStr);
if (triple.isWasm()) {
diff --git a/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp b/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp
index cd3b6bf..e4fe3bc 100644
--- a/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp
+++ b/iree/compiler/Codegen/LLVMCPU/KernelDispatch.cpp
@@ -63,13 +63,6 @@
"linalg.generic and linalg.indexed_generic workgroup tile size"),
llvm::cl::init(64));
-// TODO(hanchung): Enable the flag by default after addressing perf
-// regresssions.
-static llvm::cl::opt<bool> useDoubleTilingExpert(
- "iree-codegen-use-double-tiling-expert",
- llvm::cl::desc("DEVELOPMENT ONLY, DO NOT USE THE FLAG."),
- llvm::cl::init(false));
-
using IREE::Codegen::DispatchLoweringPassPipeline;
static bool isVMVX(FuncOp entryPointFn) {
@@ -132,8 +125,7 @@
static int64_t getVectorSize(FuncOp entryPointFn, ShapedType shapedType) {
Type elementType = shapedType.getElementType();
if (!elementType.isIntOrFloat()) return 1;
- unsigned byteWidth =
- std::max<unsigned>(1, elementType.getIntOrFloatBitWidth() / 8);
+ unsigned byteWidth = IREE::Util::getRoundedElementByteWidth(elementType);
return getVectorSize(entryPointFn, byteWidth);
}
@@ -157,7 +149,7 @@
.Default([&](Type t) -> Type { return nullptr; });
if (!elementType || !elementType.isIntOrFloat()) return;
unsigned typeWidthInBytes =
- std::max<unsigned>(elementType.getIntOrFloatBitWidth() / 8, 1);
+ IREE::Util::getRoundedElementByteWidth(elementType);
referenceTypeLengthInBytes =
std::min<unsigned>(referenceTypeLengthInBytes, typeWidthInBytes);
});
@@ -278,12 +270,38 @@
return maxSize;
}
-static LogicalResult setX86RootConfig(FuncOp entryPointFn,
- linalg::ContractionOpInterface op,
- SmallVector<int64_t> workloadPerWorkgroup,
- int vectorSize) {
+static LogicalResult setX86SandboxRootConfig(
+ FuncOp entryPointFn, linalg::ContractionOpInterface op,
+ SmallVector<int64_t> workloadPerWorkgroup, int vectorSize) {
setTranslationInfo(entryPointFn,
- getDispatchLoweringPassPipeline(entryPointFn, op),
+ DispatchLoweringPassPipeline::CPUDoubleTilingExpert,
+ workloadPerWorkgroup,
+ /*workgroupSize=*/ArrayRef<int64_t>{});
+
+ // Hardcoded tile sizes. The configuration is derived from iree-llvm-sandbox.
+ // L1 tile sizes are {1, ..., 8, 32, 16}
+ SmallVector<int64_t> l1TileSizes;
+ int64_t nLoops = cast<linalg::LinalgOp>(op.getOperation()).getNumLoops();
+ l1TileSizes.append(nLoops - 3, 1);
+ l1TileSizes.push_back(8);
+ l1TileSizes.push_back(32);
+ l1TileSizes.push_back(16);
+
+ TileSizesListType tileSizes;
+ tileSizes.push_back({});
+ tileSizes.push_back(l1TileSizes);
+ auto config = IREE::Codegen::LoweringConfigAttr::get(
+ entryPointFn.getContext(), tileSizes, {});
+ setLoweringConfig(op, config);
+
+ return success();
+}
+
+static LogicalResult setX86TileFuseAndVectorizeRootConfig(
+ FuncOp entryPointFn, linalg::ContractionOpInterface op,
+ SmallVector<int64_t> workloadPerWorkgroup, int vectorSize) {
+ setTranslationInfo(entryPointFn,
+ DispatchLoweringPassPipeline::CPUTileFuseAndVectorize,
workloadPerWorkgroup,
/*workgroupSize=*/ArrayRef<int64_t>{});
@@ -317,39 +335,6 @@
return success();
}
-static LogicalResult setX86SandboxRootConfig(
- FuncOp entryPointFn, linalg::ContractionOpInterface op,
- SmallVector<int64_t> workloadPerWorkgroup, int vectorSize) {
- setTranslationInfo(entryPointFn,
- DispatchLoweringPassPipeline::CPUDoubleTilingExpert,
- workloadPerWorkgroup,
- /*workgroupSize=*/ArrayRef<int64_t>{});
-
- // Hardcoded tile sizes. The configuration is derived from iree-llvm-sandbox.
- // L1 tile sizes are {1, 1, ..., 288, 128, 512}.
- // Vector tile sizes are {1, ..., 9, 32, 16}
- SmallVector<int64_t> l1TileSizes, vectorTileSizes;
- int64_t nLoops = cast<linalg::LinalgOp>(op.getOperation()).getNumLoops();
- l1TileSizes.append(nLoops - 3, 1);
- l1TileSizes.push_back(288);
- l1TileSizes.push_back(128);
- l1TileSizes.push_back(512);
- vectorTileSizes.append(nLoops - 3, 1);
- vectorTileSizes.push_back(9);
- vectorTileSizes.push_back(32);
- vectorTileSizes.push_back(16);
-
- TileSizesListType tileSizes;
- tileSizes.push_back({});
- tileSizes.push_back(l1TileSizes);
- tileSizes.push_back(vectorTileSizes);
- auto config = IREE::Codegen::LoweringConfigAttr::get(
- entryPointFn.getContext(), tileSizes, vectorTileSizes);
- setLoweringConfig(op, config);
-
- return success();
-}
-
static LogicalResult setARMRootConfig(FuncOp entryPointFn,
linalg::ContractionOpInterface op,
SmallVector<int64_t> workloadPerWorkgroup,
@@ -426,17 +411,21 @@
Optional<llvm::Triple> triple = getTargetTriple(entryPointFn);
if (triple && triple.getValue().isX86()) {
- // For DoubleTilingExpert, we will use LinalgSingleTilingExpertPassOptions
- // to control transforms. There is a tileInterchange option that needs to be
- // configured. However, we don't know the number of loops when adding the
- // pass to pass manager. Thus, we don't use double tiling expert for batch
- // gemms for now.
- if (!numBatchDims && useDoubleTilingExpert) {
+ // There is a tileInterchange option. If it needs to be configured, we can
+ // only apply the pipeline to linalg.matmul. Because we don't know the
+ // number of loops when adding the pass to pass manager.
+ // TODO(hanchung): Embed options into attributes, so we can control options
+ // more heuristically.
+ Type lhsElemType = getElementTypeOrSelf(contractionOp.lhs().getType());
+ Type rhsElemType = getElementTypeOrSelf(contractionOp.rhs().getType());
+ Type resElemType =
+ getElementTypeOrSelf(contractionOp->getResult(0).getType());
+ if (lhsElemType == rhsElemType && rhsElemType == resElemType) {
return setX86SandboxRootConfig(entryPointFn, contractionOp,
workloadPerWorkgroup, vectorSize);
} else {
- return setX86RootConfig(entryPointFn, contractionOp, workloadPerWorkgroup,
- vectorSize);
+ return setX86TileFuseAndVectorizeRootConfig(
+ entryPointFn, contractionOp, workloadPerWorkgroup, vectorSize);
}
}
// Fall back to ARM configurations.
diff --git a/iree/compiler/Codegen/LLVMCPU/LLVMCPULowerExecutableTarget.cpp b/iree/compiler/Codegen/LLVMCPU/LLVMCPULowerExecutableTarget.cpp
index d396212..7cb17f8 100644
--- a/iree/compiler/Codegen/LLVMCPU/LLVMCPULowerExecutableTarget.cpp
+++ b/iree/compiler/Codegen/LLVMCPU/LLVMCPULowerExecutableTarget.cpp
@@ -153,10 +153,10 @@
if (translationInfo.hasValue()) {
LogicalResult verificationStatus = success();
switch (translationInfo.getValue().getDispatchLoweringPassPipeline()) {
- case IREE::Codegen::DispatchLoweringPassPipeline::CPUTensorToVectors:
+ case IREE::Codegen::DispatchLoweringPassPipeline::CPUDoubleTilingExpert:
verificationStatus = verifyLoweringConfiguration(
moduleOp, translationInfo.getValue(),
- verifyTensorToVectorsPassPipelineConfig);
+ verifyDoubleTilingExpertPassPipelineConfig);
break;
default:;
}
@@ -177,19 +177,12 @@
break;
case IREE::Codegen::DispatchLoweringPassPipeline::
CPUSingleTilingExpert:
- nestedModulePM.addNestedPass<FuncOp>(
- createConvertToDestinationPassingStylePass());
addSingleTilingExpertPassPipeline(nestedModulePM);
break;
case IREE::Codegen::DispatchLoweringPassPipeline::
CPUDoubleTilingExpert:
- nestedModulePM.addNestedPass<FuncOp>(
- createConvertToDestinationPassingStylePass());
addDoubleTilingExpertPassPipeline(nestedModulePM);
break;
- case IREE::Codegen::DispatchLoweringPassPipeline::CPUTensorToVectors:
- addTensorToVectorsPassPipeline(nestedModulePM, lowerToVectors);
- break;
case IREE::Codegen::DispatchLoweringPassPipeline::
CPUTileFuseAndVectorize:
addTileFuseAndVectorizePassPipeline(nestedModulePM, lowerToVectors);
diff --git a/iree/compiler/Codegen/LLVMCPU/LLVMCPUTileAndVectorizeLinalgTensorOps.cpp b/iree/compiler/Codegen/LLVMCPU/LLVMCPUTileAndVectorizeLinalgTensorOps.cpp
deleted file mode 100644
index 5e5aa11..0000000
--- a/iree/compiler/Codegen/LLVMCPU/LLVMCPUTileAndVectorizeLinalgTensorOps.cpp
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2021 The IREE Authors
-//
-// Licensed under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-#include "iree/compiler/Codegen/Dialect/LoweringConfig.h"
-#include "iree/compiler/Codegen/LLVMCPU/KernelDispatch.h"
-#include "iree/compiler/Codegen/PassDetail.h"
-#include "iree/compiler/Codegen/Passes.h"
-#include "iree/compiler/Codegen/Transforms/Transforms.h"
-#include "iree/compiler/Codegen/Utils/MarkerUtils.h"
-#include "llvm/Support/Debug.h"
-#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
-#include "mlir/Dialect/Linalg/IR/Linalg.h"
-#include "mlir/Dialect/Linalg/Transforms/Hoisting.h"
-#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
-#include "mlir/Dialect/MemRef/IR/MemRef.h"
-#include "mlir/Dialect/MemRef/Transforms/Passes.h"
-#include "mlir/Dialect/SCF/Transforms.h"
-#include "mlir/Dialect/Vector/VectorTransforms.h"
-#include "mlir/Pass/Pass.h"
-#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
-
-#define DEBUG_TYPE "iree-llvmcpu-tile-and-vectorize"
-
-namespace mlir {
-namespace iree_compiler {
-
-namespace {
-// Could just be linalg::TilingPattern with a ContractionOpInterface filter, but
-// that is always templated on an op.
-struct TileWorkgroups : public linalg::LinalgTilingPattern {
- using Base = linalg::LinalgTilingPattern;
- TileWorkgroups(MLIRContext *context, linalg::LinalgTilingOptions options,
- linalg::LinalgTransformationFilter marker)
- : LinalgTilingPattern(context, options, marker) {}
- LogicalResult matchAndRewrite(linalg::LinalgOp linalgOp,
- PatternRewriter &rewriter) const override {
- if (!isa<linalg::ContractionOpInterface>(linalgOp.getOperation()))
- return failure();
- return Base::returningMatchAndRewrite(linalgOp, rewriter);
- }
-};
-
-} // namespace
-
-namespace {
-struct LLVMCPUTileAndVectorizePass
- : public LLVMCPUTileAndVectorizeBase<LLVMCPUTileAndVectorizePass> {
- LLVMCPUTileAndVectorizePass(bool vectorize = true)
- : lowerToVectors(vectorize) {}
- LLVMCPUTileAndVectorizePass(const LLVMCPUTileAndVectorizePass &pass) {
- lowerToVectors = pass.lowerToVectors;
- }
- void getDependentDialects(DialectRegistry ®istry) const override {
- registry.insert<linalg::LinalgDialect, memref::MemRefDialect,
- vector::VectorDialect>();
- }
- void runOnOperation() override;
-
- private:
- bool lowerToVectors;
-};
-} // namespace
-
-void LLVMCPUTileAndVectorizePass::runOnOperation() {
- MLIRContext *context = &getContext();
- auto funcOp = getOperation();
-
- DEBUG_WITH_TYPE(DEBUG_TYPE, {
- llvm::dbgs() << "\n--- Before LLVMCPUTileAndVectorizePass ---\n";
- funcOp.print(llvm::dbgs(), OpPrintingFlags().useLocalScope());
- llvm::dbgs() << "\n\n";
- });
-
- // First level of tiling patterns
- {
- OwningRewritePatternList l1patterns(&getContext());
- l1patterns.insert<TileWorkgroups>(
- context,
- linalg::LinalgTilingOptions().setTileSizeComputationFunction(
- [](OpBuilder &builder, Operation *op) -> SmallVector<Value, 4> {
- return getTileSizes(builder, op,
- static_cast<unsigned>(TilingLevel::L1Tiles));
- }),
- linalg::LinalgTransformationFilter(
- ArrayRef<StringAttr>{},
- StringAttr::get(context, getWorkgroupL1TileMarker())));
-
- if (failed(applyPatternsAndFoldGreedily(funcOp, std::move(l1patterns)))) {
- return signalPassFailure();
- }
-
- DEBUG_WITH_TYPE(DEBUG_TYPE, {
- llvm::dbgs() << "\n--- After first level of tiling patterns ---\n";
- funcOp.print(llvm::dbgs(), OpPrintingFlags().useLocalScope());
- llvm::dbgs() << "\n\n";
- });
- }
-
- // Apply canoncalization
- {
- OwningRewritePatternList canonicalizationPatterns(&getContext());
- linalg::populateLinalgTilingCanonicalizationPatterns(
- canonicalizationPatterns);
- memref::DimOp::getCanonicalizationPatterns(canonicalizationPatterns,
- context);
- scf::populateSCFForLoopCanonicalizationPatterns(canonicalizationPatterns);
- if (failed(applyPatternsAndFoldGreedily(
- funcOp, std::move(canonicalizationPatterns)))) {
- return signalPassFailure();
- }
-
- DEBUG_WITH_TYPE(DEBUG_TYPE, {
- llvm::dbgs() << "\n--- After canonicalization ---\n";
- funcOp.print(llvm::dbgs(), OpPrintingFlags().useLocalScope());
- llvm::dbgs() << "\n\n";
- });
- }
-
- // Second level of tiling patterns{
- {
- OwningRewritePatternList l2patterns(&getContext());
- l2patterns.insert<TileWorkgroups>(
- context,
- linalg::LinalgTilingOptions().setTileSizeComputationFunction(
- [](OpBuilder &builder, Operation *op) -> SmallVector<Value, 4> {
- return getTileSizes(
- builder, op, static_cast<unsigned>(TilingLevel::VectorTiles));
- }),
- linalg::LinalgTransformationFilter(
- StringAttr::get(context, getWorkgroupL1TileMarker()),
- StringAttr::get(context, getVectorizeMarker())));
-
- if (failed(applyPatternsAndFoldGreedily(funcOp, std::move(l2patterns)))) {
- return signalPassFailure();
- }
-
- DEBUG_WITH_TYPE(DEBUG_TYPE, {
- llvm::dbgs() << "\n--- After second level of tiling patterns ---\n";
- funcOp.print(llvm::dbgs(), OpPrintingFlags().useLocalScope());
- llvm::dbgs() << "\n\n";
- });
- }
-
- // Apply canoncalization
- {
- OwningRewritePatternList canonicalizationPatterns(&getContext());
- linalg::populateLinalgTilingCanonicalizationPatterns(
- canonicalizationPatterns);
- memref::DimOp::getCanonicalizationPatterns(canonicalizationPatterns,
- context);
- scf::populateSCFForLoopCanonicalizationPatterns(canonicalizationPatterns);
- if (failed(applyPatternsAndFoldGreedily(
- funcOp, std::move(canonicalizationPatterns)))) {
- return signalPassFailure();
- }
-
- DEBUG_WITH_TYPE(DEBUG_TYPE, {
- llvm::dbgs() << "\n--- After canonicalization ---\n";
- funcOp.print(llvm::dbgs(), OpPrintingFlags().useLocalScope());
- llvm::dbgs() << "\n\n";
- });
- }
-
- if (!lowerToVectors) {
- return;
- }
-
- // Op specific conversion.
- {
- RewritePatternSet vectorizeOpsPattenrs(context);
- populateLinalgToVectorVectorizeMMT4dPatterns(context, vectorizeOpsPattenrs);
- if (failed(applyPatternsAndFoldGreedily(funcOp,
- std::move(vectorizeOpsPattenrs)))) {
- return signalPassFailure();
- }
- }
-
- // Apply vectorization patterns.
- {
- OwningRewritePatternList vectorizationPatterns(&getContext());
- linalg::LinalgVectorizationOptions opt;
- linalg::LinalgTransformationFilter f(
- StringAttr::get(context, getVectorizeMarker()));
- linalg::VectorizationPatterns<linalg::CopyOp, linalg::FillOp>::insert(
- vectorizationPatterns, opt, f);
- vectorizationPatterns.add<linalg::LinalgVectorizationPattern>(
- context, f.addOpFilter<linalg::ContractionOpInterface>(), opt);
- vector::populateVectorTransferPermutationMapLoweringPatterns(
- vectorizationPatterns);
- vector::populateVectorReductionToContractPatterns(vectorizationPatterns);
- if (failed(applyPatternsAndFoldGreedily(
- funcOp, std::move(vectorizationPatterns)))) {
- return signalPassFailure();
- }
-
- DEBUG_WITH_TYPE(DEBUG_TYPE, {
- llvm::dbgs() << "\n--- After vectorization ---\n";
- funcOp.print(llvm::dbgs(), OpPrintingFlags().useLocalScope());
- llvm::dbgs() << "\n\n";
- });
- }
-
- {
- // Fold consumer add ops into the contraction op itself.
- RewritePatternSet canonicalizationPatterns(context);
- vector::ContractionOp::getCanonicalizationPatterns(canonicalizationPatterns,
- context);
- if (failed(applyPatternsAndFoldGreedily(
- funcOp, std::move(canonicalizationPatterns)))) {
- return signalPassFailure();
- }
-
- DEBUG_WITH_TYPE(DEBUG_TYPE, {
- llvm::dbgs()
- << "\n--- After folding consumer add ops into contraction op "
- "iteself ---\n";
- funcOp.print(llvm::dbgs(), OpPrintingFlags().useLocalScope());
- llvm::dbgs() << "\n\n";
- });
- }
-
- // Apply vector specific operation lowering.
- {
- vector::VectorTransformsOptions vectorTransformsOptions =
- vector::VectorTransformsOptions().setVectorTransformsOptions(
- vector::VectorContractLowering::OuterProduct);
- OwningRewritePatternList vectorContractLoweringPatterns(&getContext());
- vectorContractLoweringPatterns.insert<
- vector::ContractionOpToOuterProductOpLowering,
- vector::ContractionOpToMatmulOpLowering, vector::ContractionOpLowering>(
- vectorTransformsOptions, context);
- vector::populateVectorTransferPermutationMapLoweringPatterns(
- vectorContractLoweringPatterns);
- if (failed(applyPatternsAndFoldGreedily(
- funcOp, std::move(vectorContractLoweringPatterns)))) {
- return signalPassFailure();
- }
-
- DEBUG_WITH_TYPE(DEBUG_TYPE, {
- llvm::dbgs() << "\n--- After vector specific operatrion lowering ---\n";
- funcOp.print(llvm::dbgs(), OpPrintingFlags().useLocalScope());
- llvm::dbgs() << "\n\n";
- });
- }
-}
-
-std::unique_ptr<OperationPass<FuncOp>> createLLVMCPUTileAndVectorizePass(
- bool lowerToVectors) {
- return std::make_unique<LLVMCPUTileAndVectorizePass>(lowerToVectors);
-}
-
-} // namespace iree_compiler
-} // namespace mlir
diff --git a/iree/compiler/Codegen/LLVMCPU/LLVMCPUTileFuseAndVectorizeLinalgTensorOps.cpp b/iree/compiler/Codegen/LLVMCPU/LLVMCPUTileFuseAndVectorizeLinalgTensorOps.cpp
index 093030b..1a5e806 100644
--- a/iree/compiler/Codegen/LLVMCPU/LLVMCPUTileFuseAndVectorizeLinalgTensorOps.cpp
+++ b/iree/compiler/Codegen/LLVMCPU/LLVMCPUTileFuseAndVectorizeLinalgTensorOps.cpp
@@ -28,6 +28,15 @@
namespace mlir {
namespace iree_compiler {
+// A flag to switch between inline asm and intrinsics while we develop these two
+// parallel paths.
+static llvm::cl::opt<bool> clUseMmt4dUseIntrinsics(
+ "iree-codegen-mmt4d-use-intrinsics",
+ llvm::cl::desc("Whether to use instrinsics when lowering vector contracts "
+ "generated from mmt4d matmuls (as opposed to inline asm). "
+ "Not for production use."),
+ llvm::cl::init(false));
+
namespace {
// Could just be linalg::TilingPattern with a ContractionOpInterface filter, but
// that is always templated on an op.
@@ -66,7 +75,7 @@
LogicalResult applyTileAndFuseCanonicalizationPatterns(FuncOp funcOp) {
auto context = funcOp.getContext();
- OwningRewritePatternList patterns(context);
+ RewritePatternSet patterns(context);
linalg::populateLinalgTilingCanonicalizationPatterns(patterns);
tensor::DimOp::getCanonicalizationPatterns(patterns, context);
memref::DimOp::getCanonicalizationPatterns(patterns, context);
@@ -143,7 +152,7 @@
}
{
- OwningRewritePatternList tileReductionPatterns(&getContext());
+ RewritePatternSet tileReductionPatterns(&getContext());
// TODO(hanchung): Add a pattern to fold the tensor.extract_slice op.
// One-trip loop can be removed. But weird patterns could be generated and
@@ -221,7 +230,7 @@
funcOp.walk([&](linalg::ContractionOpInterface op) {
setMarker(op, getWorkgroupL1TileMarker());
});
- OwningRewritePatternList l2patterns(&getContext());
+ RewritePatternSet l2patterns(&getContext());
l2patterns.insert<TileWorkgroups>(
context,
linalg::LinalgTilingOptions().setTileSizeComputationFunction(
@@ -265,7 +274,7 @@
// Apply vectorization patterns.
{
- OwningRewritePatternList vectorizationPatterns(&getContext());
+ RewritePatternSet vectorizationPatterns(&getContext());
linalg::LinalgVectorizationOptions opt;
linalg::LinalgTransformationFilter f(
StringAttr::get(context, getVectorizeMarker()));
@@ -342,6 +351,7 @@
// just before the generic vector ops lowerings.
CustomKernelsTargetInfo info;
if (succeeded(InferCustomKernelsTargetInfoFromParent(funcOp, info))) {
+ info.intrinsics = clUseMmt4dUseIntrinsics;
RewritePatternSet patterns(context);
populateVectorContractCustomKernelsPatterns(info, patterns);
if (failed(applyPatternsAndFoldGreedily(funcOp, std::move(patterns)))) {
@@ -355,7 +365,7 @@
vector::VectorTransformsOptions vectorTransformsOptions =
vector::VectorTransformsOptions().setVectorTransformsOptions(
vector::VectorContractLowering::OuterProduct);
- OwningRewritePatternList vectorContractLoweringPatterns(&getContext());
+ RewritePatternSet vectorContractLoweringPatterns(&getContext());
vectorContractLoweringPatterns.insert<
vector::ContractionOpToOuterProductOpLowering,
vector::ContractionOpToMatmulOpLowering, vector::ContractionOpLowering>(
diff --git a/iree/compiler/Codegen/LLVMCPU/LLVMCPUUnfuseFMAOps.cpp b/iree/compiler/Codegen/LLVMCPU/LLVMCPUUnfuseFMAOps.cpp
index eab7430..353255a 100644
--- a/iree/compiler/Codegen/LLVMCPU/LLVMCPUUnfuseFMAOps.cpp
+++ b/iree/compiler/Codegen/LLVMCPU/LLVMCPUUnfuseFMAOps.cpp
@@ -46,14 +46,14 @@
} // namespace
void populateUnfusedFMAOpsPassPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<UnfusedFMAOpsPassConversion>(context);
}
void LLVMCPUUnfuseFMAOpsPass::runOnOperation() {
auto funcOp = getOperation();
auto context = funcOp.getContext();
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
populateUnfusedFMAOpsPassPatterns(context, patterns);
if (failed(applyPatternsAndFoldGreedily(funcOp, std::move(patterns)))) {
return signalPassFailure();
diff --git a/iree/compiler/Codegen/LLVMCPU/Passes.cpp b/iree/compiler/Codegen/LLVMCPU/Passes.cpp
index e45f9e7..0131ae1 100644
--- a/iree/compiler/Codegen/LLVMCPU/Passes.cpp
+++ b/iree/compiler/Codegen/LLVMCPU/Passes.cpp
@@ -13,9 +13,11 @@
#include "iree/compiler/Codegen/Sandbox/Passes.h"
#include "iree/compiler/Codegen/Utils/Utils.h"
#include "llvm/Support/CommandLine.h"
+#include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h"
#include "mlir/Conversion/SCFToStandard/SCFToStandard.h"
#include "mlir/Dialect/Arithmetic/Transforms/Passes.h"
#include "mlir/Dialect/Linalg/Passes.h"
+#include "mlir/Dialect/MemRef/Transforms/Passes.h"
#include "mlir/Dialect/StandardOps/Transforms/Passes.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
@@ -67,7 +69,7 @@
// Codegen configuration verifications.
//===---------------------------------------------------------------------===//
-LogicalResult verifyTensorToVectorsPassPipelineConfig(
+LogicalResult verifyDoubleTilingExpertPassPipelineConfig(
Operation *op, IREE::Codegen::LoweringConfigAttr loweringConfig,
IREE::Codegen::TranslationInfoAttr translationInfo,
ArrayRef<int64_t> workgroupSize) {
@@ -78,7 +80,7 @@
// Verify that the translation info is using the right pipeline.
auto pipeline =
- IREE::Codegen::DispatchLoweringPassPipeline::CPUTensorToVectors;
+ IREE::Codegen::DispatchLoweringPassPipeline::CPUDoubleTilingExpert;
StringRef pipelineName = stringifyEnum(pipeline);
if (translationInfo.getDispatchLoweringPassPipeline() != pipeline) {
return op->emitOpError("expected pipeline in translation.info to be ")
@@ -106,11 +108,12 @@
return op->emitOpError("invalid to use 0 in workload_per_wg");
}
- if (loweringConfig.getTileSizes().size() != 3) {
- return op->emitOpError("expected three levels of tile sizes for ")
+ if (loweringConfig.getTileSizes().size() != 2) {
+ return op->emitOpError("expected two levels of tile sizes for ")
<< pipelineName << ", got " << loweringConfig.getTileSizes().size();
}
- SmallVector<int64_t> firstLevelTileSizes = loweringConfig.getTileSizeVals(0);
+ SmallVector<int64_t> firstLevelTileSizes = loweringConfig.getTileSizeVals(
+ static_cast<unsigned>(TilingLevel::WorkGroupTiles));
if (!firstLevelTileSizes.empty()) {
// Verify that if the first-level tile sizes are set, they are the same as
// workload_per_wg for the partitioned loops.
@@ -147,9 +150,8 @@
SmallVector<int64_t> nativeVectorSize =
loweringConfig.getNativeVectorSizeVals();
if (!nativeVectorSize.empty()) {
- if (nativeVectorSize !=
- loweringConfig.getTileSizeVals(
- static_cast<unsigned>(TilingLevel::VectorTiles))) {
+ if (nativeVectorSize != loweringConfig.getTileSizeVals(
+ static_cast<unsigned>(TilingLevel::L1Tiles))) {
return op->emitOpError(
"native_vector_size must be same as the last level of tiling");
}
@@ -157,27 +159,13 @@
return success();
}
-void addTensorToVectorsPassPipeline(OpPassManager &passManager,
- bool lowerToVectors) {
- passManager.addPass(createCanonicalizerPass());
-
- // Tile and vectorize linalg ops on tensors.
- passManager.addNestedPass<FuncOp>(
- createLLVMCPUTileAndVectorizePass(lowerToVectors));
- passManager.addNestedPass<FuncOp>(createCSEPass());
- passManager.addNestedPass<FuncOp>(createCanonicalizerPass());
-
- // Use stack allocation on CPU side.
- addLinalgBufferizePasses(passManager, cpuAllocationFunction);
- passManager.addNestedPass<FuncOp>(createCSEPass());
- passManager.addNestedPass<FuncOp>(createCanonicalizerPass());
-
- passManager.addNestedPass<FuncOp>(createForOpCanonicalizationPass());
-
- passManager.addNestedPass<FuncOp>(createOptimizeVectorTransferPass());
-}
+//===---------------------------------------------------------------------===//
+// Codegen pipelines.
+//===---------------------------------------------------------------------===//
void addSingleTilingExpertPassPipeline(OpPassManager &passManager) {
+ passManager.addNestedPass<FuncOp>(
+ createConvertToDestinationPassingStylePass());
passManager.addPass(createCanonicalizerPass());
// Add the sandbox single tiling expert to tile and vectorize.
{
@@ -207,19 +195,28 @@
}
void addDoubleTilingExpertPassPipeline(OpPassManager &passManager) {
+ passManager.addNestedPass<FuncOp>(
+ createConvertToDestinationPassingStylePass());
+
passManager.addPass(createCanonicalizerPass());
+
+ // Run LinalgFusePass firstly in case that we have fill + matmul + generic
+ // ops. At this stage, we do not apply vectorization. The reduction dim won't
+ // get tiled if the case is matmul + generic op. In this case, we have to tile
+ // along reduction dim again, which needs them to be Linalg ops form.
{
- passManager.addNestedPass<FuncOp>(createRemoveSingleIterationLoopPass());
- LinalgSingleTilingExpertPassOptions options;
+ LinalgFusePassOptions options;
options.tilingLevel = static_cast<int64_t>(TilingLevel::L1Tiles);
- options.tileInterchange = {0, 2, 1};
- passManager.addNestedPass<FuncOp>(
- createLinalgSingleTilingExpertPass(options));
+ passManager.addNestedPass<FuncOp>(createLinalgFusePass(options));
passManager.addNestedPass<FuncOp>(createCanonicalizerPass());
passManager.addNestedPass<FuncOp>(createCSEPass());
}
// Add the sandbox single tiling expert to tile and vectorize.
+ // This might create three addtional one-trip loops if the dim sizes are not
+ // divisible by tiling sizes. It would affect performance for some cases,
+ // e.g., matmul( 1x384, 384x384 ), etc.
+ // TODO(hanchung): Add canonicalization patterns to remove one-trip loops.
{
// The options are derived from sandbox codegen driver. hoistPadding options
// does not work in IREE cases. It's fine to not have it, since it's already
@@ -230,10 +227,11 @@
options.pad = true;
options.packPaddings = {1, 1, 0};
// options.hoistPaddings = {5, 6, 0};
- options.tilingLevel = static_cast<int64_t>(TilingLevel::VectorTiles);
- options.tileInterchange = {0, 1, 2};
+ options.tilingLevel = static_cast<int64_t>(TilingLevel::L1Tiles);
passManager.addNestedPass<FuncOp>(
createLinalgSingleTilingExpertPass(options));
+ passManager.addNestedPass<FuncOp>(createCanonicalizerPass());
+ passManager.addNestedPass<FuncOp>(createCSEPass());
}
// TODO(ravishankarm): This is commented cause this is WIP, to be enabled
@@ -246,6 +244,9 @@
// addIREEComprehensiveBufferizePasses(passManager, std::move(callbacks));
addLinalgBufferizePasses(passManager, cpuAllocationFunction);
+ // Run IREE specific passes before vector lowering expert.
+ passManager.addNestedPass<FuncOp>(createRemoveSingleIterationLoopPass());
+
// Add the vector lowering expert.
{
OpPassManager &nestedFuncPassManager = passManager.nest<FuncOp>();
@@ -313,10 +314,14 @@
passManager.addPass(createTensorConstantBufferizePass());
passManager.addPass(createFoldTensorExtractOpPass());
+ // math dialect elementry functions -> polynomial form.
+ passManager.addNestedPass<FuncOp>(createPolynomialApproximationPass());
+
// (HAL, IREE, Linalg, STD) -> LLVM
passManager.addNestedPass<FuncOp>(arith::createArithmeticExpandOpsPass());
- passManager.addNestedPass<FuncOp>(createStdExpandOpsPass());
+ passManager.addNestedPass<FuncOp>(memref::createExpandOpsPass());
passManager.addPass(createConvertToLLVMPass());
+ passManager.addPass(createReconcileUnrealizedCastsPass());
// We rely on MLIR symbol visibility being correct after this point and need
// to mirror the LLVM linkage that was assigned during conversion.
@@ -327,6 +332,8 @@
}
void buildLLVMCPUCodegenPassPipeline(OpPassManager &passManager) {
+ passManager.nest<ModuleOp>().nest<FuncOp>().addPass(
+ createTypePropagationPass());
passManager.addPass(createLLVMCPULowerExecutableTargetPass());
OpPassManager &nestedModulePM = passManager.nest<ModuleOp>();
addLowerToLLVMPasses(nestedModulePM);
diff --git a/iree/compiler/Codegen/LLVMCPU/VectorContractCustomKernels.cpp b/iree/compiler/Codegen/LLVMCPU/VectorContractCustomKernels.cpp
index 11e3bc8..0881503 100644
--- a/iree/compiler/Codegen/LLVMCPU/VectorContractCustomKernels.cpp
+++ b/iree/compiler/Codegen/LLVMCPU/VectorContractCustomKernels.cpp
@@ -11,6 +11,7 @@
#include "llvm/ADT/Triple.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
#include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
+#include "mlir/Dialect/ArmNeon/ArmNeonDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
@@ -150,6 +151,9 @@
// Checks that the Value `extResult` is defined by an arith::ExtSIOp promoting
// from `extSrcType` to `extDstType`, and returns the input of the ExtSIOp.
+// Note that this only looks at the immediately defining operation, so we likely
+// want to have earlier passes that sink widening operations as far down as
+// possible, which is probably just good regardless.
static Value getExtSIInput(Type extSrcType, Type extDstType, Value extResult) {
auto extSIOp = extResult.getDefiningOp<arith::ExtSIOp>();
if (!extSIOp) {
@@ -248,9 +252,6 @@
extract1DSlice(rewriter, loc, int32x4Type, flatAcc, position));
}
- // Start of the code that's specific to inline assembly. An intrinsics
- // code path would diverge here.
-
// Create the inline asm op's operands list.
SmallVector<Value> asmOperands;
// First the inputs operands.
@@ -299,9 +300,6 @@
loc, int32x4Type, asmOp.getRes(), rewriter.getI64ArrayAttr({i})));
}
- // End of the code that's specific to inline assembly. An intrinsics code
- // path would merge here.
-
// Insert the result vectors of size 4 into the overall result vector of
// size 64, still 1D.
VectorType int32x64xType = VectorType::get({64}, I32Type);
@@ -321,11 +319,129 @@
}
};
+/// Converts matrix-times-matrix-transposed vector.contracts with
+/// lhs and rhs inputs defined by arith.extsi promoting from i8 to i32,
+///
+/// %lhs_i32 = arith.extsi %lhs_i8 : i8 to i32
+/// %rhs_i32 = arith.extsi %rhs_i8 : i8 to i32
+/// %result = vector.contract [...]
+/// %lhs_i32 : vector<8x4xi32>,
+/// %rhs_i32 : vector<8x4xi32>,
+/// %acc_i32 : vector<8x8xi32>,
+/// [...]
+///
+/// To vector ops reading directly from the %lhs_i8 and %rhs_i8 values
+/// (bypassing the existing arith.extsi) and passing that to a llvm.inline_asm
+/// block implementing the matrix multiplication arithmetic using Aarch64
+/// dot-product instructions (sdot).
+/// It matches the same patterns as MMT_8x4x8_i8i8i32_Aarch64Dotprod_InlineAsm
+struct MMT_8x4x8_i8i8i32_Aarch64Dotprod_Intrinsics
+ : public OpRewritePattern<vector::ContractionOp> {
+ public:
+ using OpRewritePattern<vector::ContractionOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(vector::ContractionOp contractionOp,
+ PatternRewriter &rewriter) const override {
+ if (!isMatrixTimesMatrixTransposedOfGivenShape(contractionOp, 8, 4, 8)) {
+ return failure();
+ }
+
+ Type I8Type = rewriter.getIntegerType(8);
+ Type I32Type = rewriter.getIntegerType(32);
+
+ auto acc = contractionOp.acc();
+ auto lhs = contractionOp.lhs();
+ auto rhs = contractionOp.rhs();
+ if (acc.getType().cast<VectorType>().getElementType() != I32Type) {
+ return failure();
+ }
+
+ Value inLhs = getExtSIInput(I8Type, I32Type, lhs);
+ Value inRhs = getExtSIInput(I8Type, I32Type, rhs);
+
+ if (!inLhs || !inRhs) return failure();
+
+ auto loc = contractionOp.getLoc();
+
+ auto int32x4VType = VectorType::get({4}, I32Type);
+
+ std::array<Value, 16> accChunks;
+ {
+ int idx = 0;
+ for (int row = 0; row < 8; ++row) {
+ auto accRow = rewriter.create<vector::ExtractOp>(
+ loc, acc, ArrayRef<int64_t>{row});
+ for (int col = 0; col < 8; col += 4) {
+ auto accChunk = rewriter.create<vector::ExtractStridedSliceOp>(
+ loc, accRow, ArrayRef<int64_t>{col}, ArrayRef<int64_t>{4},
+ ArrayRef<int64_t>{1});
+ assert(accChunk.getType() == int32x4VType);
+ accChunks[idx++] = accChunk;
+ }
+ }
+ }
+
+ auto int8x4x4VType = VectorType::get({4, 4}, rewriter.getIntegerType(8));
+ auto extract4x4 = [&](Value in, int rowOffset, int colOffset) {
+ auto chunk = rewriter.create<vector::ExtractStridedSliceOp>(
+ loc, in, ArrayRef<int64_t>{rowOffset, colOffset},
+ ArrayRef<int64_t>{4, 4}, ArrayRef<int64_t>{1, 1});
+ assert(chunk.getType() == int8x4x4VType);
+ return chunk;
+ };
+
+ std::array<Value, 2> lhsHalves = {extract4x4(inLhs, 0, 0),
+ extract4x4(inLhs, 4, 0)};
+ std::array<Value, 2> rhsHalves = {extract4x4(inRhs, 0, 0),
+ extract4x4(inRhs, 4, 0)};
+
+ auto int8Zero4x4 = rewriter.create<arith::ConstantOp>(
+ loc, rewriter.getZeroAttr(int8x4x4VType));
+ auto sdot = [&](Value acc, Value a, Value b, int64_t lane) -> Value {
+ auto bReplicatedLane = rewriter.create<vector::ShuffleOp>(
+ loc, b, int8Zero4x4, ArrayRef<int64_t>{lane, lane, lane, lane});
+
+ return rewriter.create<arm_neon::Sdot2dOp>(loc, int32x4VType, acc, a,
+ bReplicatedLane);
+ };
+
+ std::array<Value, 16> dstChunks;
+ {
+ int idx = 0;
+ for (Value lhs : lhsHalves) {
+ for (int lane = 0; lane < 4; ++lane) {
+ for (Value rhs : rhsHalves) {
+ dstChunks[idx] = sdot(accChunks[idx], rhs, lhs, lane);
+ ++idx;
+ }
+ }
+ }
+ }
+
+ // Put the results back in the accumulator
+ {
+ int idx = 0;
+ for (int row = 0; row < 8; ++row) {
+ for (int col = 0; col < 8; col += 4) {
+ acc = rewriter.create<vector::InsertStridedSliceOp>(
+ loc, dstChunks[idx++], acc, ArrayRef<int64_t>{row, col},
+ ArrayRef<int64_t>{1});
+ }
+ }
+ }
+ rewriter.replaceOp(contractionOp, {acc});
+ return success();
+ }
+};
+
class VectorContractCustomKernelsPass
: public VectorContractCustomKernelsBase<VectorContractCustomKernelsPass> {
public:
void getDependentDialects(DialectRegistry ®istry) const override {
registry.insert<vector::VectorDialect, LLVM::LLVMDialect>();
+ if (target_info.intrinsics) {
+ registry.insert<arm_neon::ArmNeonDialect>();
+ }
}
LogicalResult initializeOptions(StringRef options) override {
if (failed(Pass::initializeOptions(options))) {
@@ -333,11 +449,12 @@
}
target_info.aarch64 = aarch64;
target_info.dotprod = dotprod;
+ target_info.intrinsics = intrinsics;
return success();
}
void runOnOperation() override {
MLIRContext *context = &getContext();
- OwningRewritePatternList patterns(context);
+ RewritePatternSet patterns(context);
populateVectorContractCustomKernelsPatterns(target_info, patterns);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
@@ -352,11 +469,14 @@
} // namespace
void populateVectorContractCustomKernelsPatterns(
- const CustomKernelsTargetInfo &target_info,
- OwningRewritePatternList &patterns) {
+ const CustomKernelsTargetInfo &target_info, RewritePatternSet &patterns) {
MLIRContext *context = patterns.getContext();
if (target_info.aarch64 && target_info.dotprod) {
- patterns.insert<MMT_8x4x8_i8i8i32_Aarch64Dotprod_InlineAsm>(context);
+ if (target_info.intrinsics) {
+ patterns.insert<MMT_8x4x8_i8i8i32_Aarch64Dotprod_Intrinsics>(context);
+ } else {
+ patterns.insert<MMT_8x4x8_i8i8i32_Aarch64Dotprod_InlineAsm>(context);
+ }
}
}
diff --git a/iree/compiler/Codegen/LLVMCPU/test/BUILD b/iree/compiler/Codegen/LLVMCPU/test/BUILD
index d390ba2..57e06d0 100644
--- a/iree/compiler/Codegen/LLVMCPU/test/BUILD
+++ b/iree/compiler/Codegen/LLVMCPU/test/BUILD
@@ -18,20 +18,20 @@
iree_lit_test_suite(
name = "lit",
srcs = enforce_glob(
+ # keep sorted
[
"check_ir_before_llvm_conversion.mlir",
"hal_interface_bindings.mlir",
"hal_interface_constants.mlir",
"hal_interface_workgroup_info.mlir",
"illegal_configuration.mlir",
- "materialize_double_tiling_expert_configuration.mlir",
"materialize_launch_configuration.mlir",
"synchronize_symbol_visibility.mlir",
"test_config_mmt4d.mlir",
- "tile_and_vectorize.mlir",
"tile_fuse_and_vectorize.mlir",
"unfused_fma.mlir",
- "vector_contract_custom_kernels.mlir",
+ "vector_contract_to_arm_asm.mlir",
+ "vector_contract_to_arm_intrinsics.mlir",
],
include = ["*.mlir"],
),
diff --git a/iree/compiler/Codegen/LLVMCPU/test/CMakeLists.txt b/iree/compiler/Codegen/LLVMCPU/test/CMakeLists.txt
index cfd1235..1cb2a67 100644
--- a/iree/compiler/Codegen/LLVMCPU/test/CMakeLists.txt
+++ b/iree/compiler/Codegen/LLVMCPU/test/CMakeLists.txt
@@ -19,14 +19,13 @@
"hal_interface_constants.mlir"
"hal_interface_workgroup_info.mlir"
"illegal_configuration.mlir"
- "materialize_double_tiling_expert_configuration.mlir"
"materialize_launch_configuration.mlir"
"synchronize_symbol_visibility.mlir"
"test_config_mmt4d.mlir"
- "tile_and_vectorize.mlir"
"tile_fuse_and_vectorize.mlir"
"unfused_fma.mlir"
- "vector_contract_custom_kernels.mlir"
+ "vector_contract_to_arm_asm.mlir"
+ "vector_contract_to_arm_intrinsics.mlir"
TOOLS
FileCheck
iree::tools::iree-opt
diff --git a/iree/compiler/Codegen/LLVMCPU/test/illegal_configuration.mlir b/iree/compiler/Codegen/LLVMCPU/test/illegal_configuration.mlir
index 06e2606..2275104 100644
--- a/iree/compiler/Codegen/LLVMCPU/test/illegal_configuration.mlir
+++ b/iree/compiler/Codegen/LLVMCPU/test/illegal_configuration.mlir
@@ -1,7 +1,7 @@
// RUN: iree-opt -pass-pipeline='hal.executable(hal.executable.variant(iree-llvmcpu-lower-executable-target{test-lowering-configuration=true}))' -verify-diagnostics -split-input-file %s
#config = #iree_codegen.lowering.config<tile_sizes = [], native_vector_size = []>
-#translation = #iree_codegen.translation.info<"CPUTensorToVectors", workload_per_wg = []>
+#translation = #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = []>
#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
#hal.descriptor_set.layout<0, bindings = [
#hal.descriptor_set.binding<0, storage_buffer>,
@@ -32,7 +32,7 @@
// -----
#config = #iree_codegen.lowering.config<tile_sizes = [], native_vector_size = []>
-#translation = #iree_codegen.translation.info<"CPUTensorToVectors", workload_per_wg = [1, 0]>
+#translation = #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [1, 0]>
#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
#hal.descriptor_set.layout<0, bindings = [
#hal.descriptor_set.binding<0, storage_buffer>,
@@ -63,7 +63,7 @@
// -----
#config = #iree_codegen.lowering.config<tile_sizes = [], native_vector_size = []>
-#translation = #iree_codegen.translation.info<"CPUTensorToVectors", workload_per_wg = [1, 1, 1, 1]>
+#translation = #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [1, 1, 1, 1]>
#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
#hal.descriptor_set.layout<0, bindings = [
#hal.descriptor_set.binding<0, storage_buffer>,
@@ -94,7 +94,7 @@
// -----
#config = #iree_codegen.lowering.config<tile_sizes = [], native_vector_size = []>
-#translation = #iree_codegen.translation.info<"CPUTensorToVectors", workload_per_wg = [1, 1]>
+#translation = #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [1, 1]>
#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
#hal.descriptor_set.layout<0, bindings = [
#hal.descriptor_set.binding<0, storage_buffer>,
@@ -113,7 +113,7 @@
%lhs = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<4x8xf32>
%rhs = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<8x16xf32>
%result = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) : memref<4x16xf32>
- // expected-error @+1 {{expected three levels of tile sizes for CPUTensorToVectors, got 0}}
+ // expected-error @+1 {{expected two levels of tile sizes for CPUDoubleTilingExpert, got 0}}
linalg.matmul {lowering.config = #config} ins(%lhs, %rhs : memref<4x8xf32>, memref<8x16xf32>)
outs(%result: memref<4x16xf32>)
return
@@ -124,8 +124,8 @@
// -----
-#config = #iree_codegen.lowering.config<tile_sizes = [[4, 8], [], []], native_vector_size = []>
-#translation = #iree_codegen.translation.info<"CPUTensorToVectors", workload_per_wg = [8, 6]>
+#config = #iree_codegen.lowering.config<tile_sizes = [[4, 8], []], native_vector_size = []>
+#translation = #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [8, 6]>
#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
#hal.descriptor_set.layout<0, bindings = [
#hal.descriptor_set.binding<0, storage_buffer>,
@@ -155,8 +155,8 @@
// -----
-#config = #iree_codegen.lowering.config<tile_sizes = [[], [], [8, 8, 8]], native_vector_size = [4, 4, 4]>
-#translation = #iree_codegen.translation.info<"CPUTensorToVectors", workload_per_wg = [8, 4]>
+#config = #iree_codegen.lowering.config<tile_sizes = [[], [8, 8, 8]], native_vector_size = [4, 4, 4]>
+#translation = #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [8, 4]>
#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
#hal.descriptor_set.layout<0, bindings = [
#hal.descriptor_set.binding<0, storage_buffer>,
@@ -183,3 +183,96 @@
}
}
}
+
+// -----
+
+#config = #iree_codegen.lowering.config<tile_sizes = [[], [8, 32, 16]], native_vector_size = []>
+#translation = #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = []>
+#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
+ #hal.descriptor_set.layout<0, bindings = [
+ #hal.descriptor_set.binding<0, storage_buffer>,
+ #hal.descriptor_set.binding<1, storage_buffer>,
+ #hal.descriptor_set.binding<2, storage_buffer>
+ ]>
+]>
+hal.executable private @matmul_tensors {
+ hal.executable.variant @llvm, target = #hal.executable.target<"llvm", "embedded-elf-x86_64", {}> {
+ hal.executable.entry_point @illegal layout(#executable_layout) attributes {
+ translation.info = #translation
+ }
+ builtin.module {
+ func @illegal() {
+ %c0 = arith.constant 0 : index
+ %lhs = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<4x8xf32>
+ %rhs = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<8x16xf32>
+ %result = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) : memref<4x16xf32>
+ // expected-error @+1 {{expected 2 entries for workload_per_wg, but got 0}}
+ linalg.matmul {lowering.config = #config} ins(%lhs, %rhs : memref<4x8xf32>, memref<8x16xf32>)
+ outs(%result: memref<4x16xf32>)
+ return
+ }
+ }
+ }
+}
+
+// -----
+
+#config = #iree_codegen.lowering.config<tile_sizes = [[], [32, 32, 32], [8, 32, 16]], native_vector_size = []>
+#translation = #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [64, 64]>
+#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
+ #hal.descriptor_set.layout<0, bindings = [
+ #hal.descriptor_set.binding<0, storage_buffer>,
+ #hal.descriptor_set.binding<1, storage_buffer>,
+ #hal.descriptor_set.binding<2, storage_buffer>
+ ]>
+]>
+hal.executable private @matmul_tensors {
+ hal.executable.variant @llvm, target = #hal.executable.target<"llvm", "embedded-elf-x86_64", {}> {
+ hal.executable.entry_point @illegal layout(#executable_layout) attributes {
+ translation.info = #translation
+ }
+ builtin.module {
+ func @illegal() {
+ %c0 = arith.constant 0 : index
+ %lhs = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<4x8xf32>
+ %rhs = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<8x16xf32>
+ %result = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) : memref<4x16xf32>
+ // expected-error @+1 {{expected two levels of tile sizes for CPUDoubleTilingExpert, got 3}}
+ linalg.matmul {lowering.config = #config} ins(%lhs, %rhs : memref<4x8xf32>, memref<8x16xf32>)
+ outs(%result: memref<4x16xf32>)
+ return
+ }
+ }
+ }
+}
+
+// -----
+
+#config = #iree_codegen.lowering.config<tile_sizes = [[64, 32], [8, 32, 16]], native_vector_size = []>
+#translation = #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [64, 64]>
+#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
+ #hal.descriptor_set.layout<0, bindings = [
+ #hal.descriptor_set.binding<0, storage_buffer>,
+ #hal.descriptor_set.binding<1, storage_buffer>,
+ #hal.descriptor_set.binding<2, storage_buffer>
+ ]>
+]>
+hal.executable private @matmul_tensors {
+ hal.executable.variant @llvm, target = #hal.executable.target<"llvm", "embedded-elf-x86_64", {}> {
+ hal.executable.entry_point @illegal layout(#executable_layout) attributes {
+ translation.info = #translation
+ }
+ builtin.module {
+ func @illegal() {
+ %c0 = arith.constant 0 : index
+ %lhs = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : memref<4x8xf32>
+ %rhs = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : memref<8x16xf32>
+ %result = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) : memref<4x16xf32>
+ // expected-error @+1 {{mismatch in distributed tile size value 32 at position 1 and workload_per_wg value 64}}
+ linalg.matmul {lowering.config = #config} ins(%lhs, %rhs : memref<4x8xf32>, memref<8x16xf32>)
+ outs(%result: memref<4x16xf32>)
+ return
+ }
+ }
+ }
+}
diff --git a/iree/compiler/Codegen/LLVMCPU/test/materialize_double_tiling_expert_configuration.mlir b/iree/compiler/Codegen/LLVMCPU/test/materialize_double_tiling_expert_configuration.mlir
deleted file mode 100644
index 49c6ef8..0000000
--- a/iree/compiler/Codegen/LLVMCPU/test/materialize_double_tiling_expert_configuration.mlir
+++ /dev/null
@@ -1,60 +0,0 @@
-// RUN: iree-opt --iree-codegen-use-double-tiling-expert -pass-pipeline='hal.executable(hal.executable.variant(iree-llvmcpu-lower-executable-target{test-lowering-configuration=true}))' -cse -canonicalize -split-input-file %s | FileCheck %s
-
-#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
- #hal.descriptor_set.layout<0, bindings = [
- #hal.descriptor_set.binding<0, storage_buffer>,
- #hal.descriptor_set.binding<1, storage_buffer>,
- #hal.descriptor_set.binding<2, storage_buffer>
- ]>
-]>
-hal.executable private @matmul_x86 {
- hal.executable.variant public @embedded_elf_x86_64, target = #hal.executable.target<
- "llvm",
- "embedded-elf-x86_64", {
- data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128",
- native_vector_size = 16 : index,
- target_triple = "x86_64-unknown-unknown-eabi-elf"
- }> {
- hal.executable.entry_point public @matmul_x86 layout(#executable_layout)
- builtin.module {
- func @matmul_x86() {
- %c128 = arith.constant 128 : index
- %c384 = arith.constant 384 : index
- %cst = arith.constant 0.000000e+00 : f32
- %c0 = arith.constant 0 : index
- %0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : !flow.dispatch.tensor<readonly:384x512xf32>
- %1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : !flow.dispatch.tensor<readonly:512x128xf32>
- %2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) : !flow.dispatch.tensor<writeonly:384x128xf32>
- %workgroup_size_x = hal.interface.workgroup.size[0] : index
- %workgroup_size_y = hal.interface.workgroup.size[1] : index
- %workgroup_id_x = hal.interface.workgroup.id[0] : index
- %workgroup_count_x = hal.interface.workgroup.count[0] : index
- %workgroup_id_y = hal.interface.workgroup.id[1] : index
- %workgroup_count_y = hal.interface.workgroup.count[1] : index
- %3 = affine.apply affine_map<()[s0, s1] -> (s0 * s1)>()[%workgroup_id_y, %workgroup_size_y]
- %4 = affine.apply affine_map<()[s0, s1] -> (s0 * s1)>()[%workgroup_count_y, %workgroup_size_y]
- scf.for %arg0 = %3 to %c384 step %4 {
- %5 = affine.apply affine_map<()[s0, s1] -> (s0 * s1)>()[%workgroup_id_x, %workgroup_size_x]
- %6 = affine.apply affine_map<()[s0, s1] -> (s0 * s1)>()[%workgroup_count_x, %workgroup_size_x]
- scf.for %arg1 = %5 to %c128 step %6 {
- %7 = affine.min affine_map<(d0)[s0] -> (s0, -d0 + 384)>(%arg0)[%workgroup_size_y]
- %8 = flow.dispatch.tensor.load %0, offsets = [%arg0, 0], sizes = [%7, 512], strides = [1, 1] : !flow.dispatch.tensor<readonly:384x512xf32> -> tensor<?x512xf32>
- %9 = affine.min affine_map<(d0)[s0] -> (s0, -d0 + 128)>(%arg1)[%workgroup_size_x]
- %10 = flow.dispatch.tensor.load %1, offsets = [0, %arg1], sizes = [512, %9], strides = [1, 1] : !flow.dispatch.tensor<readonly:512x128xf32> -> tensor<512x?xf32>
- %11 = affine.min affine_map<(d0)[s0] -> (-d0 + 384, s0)>(%arg0)[%workgroup_size_y]
- %12 = affine.min affine_map<(d0)[s0] -> (-d0 + 128, s0)>(%arg1)[%workgroup_size_x]
- %13 = linalg.init_tensor [%11, %12] : tensor<?x?xf32>
- %14 = linalg.fill(%cst, %13) : f32, tensor<?x?xf32> -> tensor<?x?xf32>
- %15 = linalg.matmul ins(%8, %10 : tensor<?x512xf32>, tensor<512x?xf32>) outs(%14 : tensor<?x?xf32>) -> tensor<?x?xf32>
- flow.dispatch.tensor.store %15, %2, offsets = [%arg0, %arg1], sizes = [%7, %9], strides = [1, 1] : tensor<?x?xf32> -> !flow.dispatch.tensor<writeonly:384x128xf32>
- }
- }
- return
- }
- }
- }
-}
-
-// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [64, 64]>
-// CHECK-DAG: #[[CONFIG:.+]] = #iree_codegen.lowering.config<tile_sizes = [{{\[}}], [288, 128, 512], [9, 32, 16]], native_vector_size = [9, 32, 16]>
-// CHECK: linalg.matmul {lowering.config = #[[CONFIG]]}
diff --git a/iree/compiler/Codegen/LLVMCPU/test/materialize_launch_configuration.mlir b/iree/compiler/Codegen/LLVMCPU/test/materialize_launch_configuration.mlir
index 7deed89..6438c9e 100644
--- a/iree/compiler/Codegen/LLVMCPU/test/materialize_launch_configuration.mlir
+++ b/iree/compiler/Codegen/LLVMCPU/test/materialize_launch_configuration.mlir
@@ -372,8 +372,8 @@
// -----
#compilation = #iree_codegen.compilation.info<
- #iree_codegen.lowering.config<tile_sizes = [[], [32, 32, 32], [4, 4, 4]], native_vector_size = [4, 4, 4]>,
- #iree_codegen.translation.info<"CPUTensorToVectors", workload_per_wg = [32, 32]>,
+ #iree_codegen.lowering.config<tile_sizes = [[], [32, 32, 32]], native_vector_size = [32, 32, 32]>,
+ #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [32, 32]>,
workgroup_size = []>
#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
#hal.descriptor_set.layout<0, bindings = [
@@ -428,10 +428,10 @@
}
}
-// CHECK-DAG: #[[CONFIG:.+]] = #iree_codegen.lowering.config<tile_sizes = {{\[}}[], [32, 32, 32], [4, 4, 4]{{\]}}, native_vector_size = [4, 4, 4]>
+// CHECK-DAG: #[[CONFIG:.+]] = #iree_codegen.lowering.config<tile_sizes = {{\[}}[], [32, 32, 32]{{\]}}, native_vector_size = [32, 32, 32]>
// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0] -> (s0 ceildiv 32)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 * 32)>
-// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUTensorToVectors", workload_per_wg = [32, 32]>
+// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [32, 32]>
// CHECK: hal.executable.entry_point
// CHECK-SAME: translation.info = #[[TRANSLATION]]
// CHECK-NEXT: ^bb0(%[[ARG0:[a-zA-Z0-9]+]]: index, %[[ARG1:[a-zA-Z0-9]+]]: index
@@ -1378,6 +1378,73 @@
}
}
+// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [64, 64]>
+// CHECK-DAG: #[[CONFIG:.+]] = #iree_codegen.lowering.config<tile_sizes = [{{\[}}], [8, 32, 16]], native_vector_size = []>
+// CHECK: linalg.matmul {lowering.config = #[[CONFIG]]}
+
+// -----
+
+#executable_layout = #hal.executable.layout<push_constants = 0, sets = [
+ #hal.descriptor_set.layout<0, bindings = [
+ #hal.descriptor_set.binding<0, storage_buffer>,
+ #hal.descriptor_set.binding<1, storage_buffer>,
+ #hal.descriptor_set.binding<2, storage_buffer>
+ ]>
+]>
+hal.executable private @matmul_i8_i8_i32 {
+ hal.executable.variant public @embedded_elf_x86_64, target = #hal.executable.target<
+ "llvm",
+ "embedded-elf-x86_64", {
+ data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128",
+ native_vector_size = 4 : index,
+ target_triple = "x86_64-unknown-unknown-eabi-elf"
+ }> {
+ hal.executable.entry_point public @matmul_i8_i8_i32 layout(#executable_layout)
+ builtin.module {
+ func @matmul_i8_i8_i32() {
+ %c0 = arith.constant 0 : index
+ %0 = hal.interface.constant.load[0] : i32
+ %1 = hal.interface.constant.load[1] : i32
+ %2 = hal.interface.constant.load[2] : i32
+ %3 = hal.interface.constant.load[3] : i32
+ %4 = hal.interface.constant.load[4] : i32
+ %5 = hal.interface.constant.load[5] : i32
+ %6 = arith.index_cast %0 : i32 to index
+ %7 = arith.index_cast %1 : i32 to index
+ %8 = arith.index_cast %2 : i32 to index
+ %9 = arith.index_cast %3 : i32 to index
+ %10 = arith.index_cast %4 : i32 to index
+ %11 = arith.index_cast %5 : i32 to index
+ %12 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) offset(%c0) alignment(32) : !flow.dispatch.tensor<readonly:?x?xi8>{%6, %7}
+ %13 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) offset(%c0) alignment(32) : !flow.dispatch.tensor<readonly:?x?xi8>{%8, %9}
+ %14 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) offset(%c0) alignment(32) : !flow.dispatch.tensor<readwrite:?x?xi32>{%10, %11}
+ %workgroup_size_x = hal.interface.workgroup.size[0] : index
+ %workgroup_size_y = hal.interface.workgroup.size[1] : index
+ %workgroup_id_x = hal.interface.workgroup.id[0] : index
+ %workgroup_count_x = hal.interface.workgroup.count[0] : index
+ %workgroup_id_y = hal.interface.workgroup.id[1] : index
+ %workgroup_count_y = hal.interface.workgroup.count[1] : index
+ %15 = affine.apply affine_map<()[s0, s1] -> (s0 * s1)>()[%workgroup_id_y, %workgroup_size_y]
+ %16 = affine.apply affine_map<()[s0, s1] -> (s0 * s1)>()[%workgroup_count_y, %workgroup_size_y]
+ scf.for %arg0 = %15 to %6 step %16 {
+ %17 = affine.apply affine_map<()[s0, s1] -> (s0 * s1)>()[%workgroup_id_x, %workgroup_size_x]
+ %18 = affine.apply affine_map<()[s0, s1] -> (s0 * s1)>()[%workgroup_count_x, %workgroup_size_x]
+ scf.for %arg1 = %17 to %9 step %18 {
+ %19 = affine.min affine_map<(d0)[s0, s1] -> (s1, -d0 + s0)>(%arg0)[%6, %workgroup_size_y]
+ %20 = flow.dispatch.tensor.load %12, offsets = [%arg0, 0], sizes = [%19, %7], strides = [1, 1] : !flow.dispatch.tensor<readonly:?x?xi8>{%6, %7} -> tensor<?x?xi8>
+ %21 = affine.min affine_map<(d0)[s0, s1] -> (s1, -d0 + s0)>(%arg1)[%9, %workgroup_size_x]
+ %22 = flow.dispatch.tensor.load %13, offsets = [0, %arg1], sizes = [%8, %21], strides = [1, 1] : !flow.dispatch.tensor<readonly:?x?xi8>{%8, %9} -> tensor<?x?xi8>
+ %23 = flow.dispatch.tensor.load %14, offsets = [%arg0, %arg1], sizes = [%19, %21], strides = [1, 1] : !flow.dispatch.tensor<readwrite:?x?xi32>{%10, %11} -> tensor<?x?xi32>
+ %24 = linalg.matmul ins(%20, %22 : tensor<?x?xi8>, tensor<?x?xi8>) outs(%23 : tensor<?x?xi32>) -> tensor<?x?xi32>
+ flow.dispatch.tensor.store %24, %14, offsets = [%arg0, %arg1], sizes = [%19, %21], strides = [1, 1] : tensor<?x?xi32> -> !flow.dispatch.tensor<readwrite:?x?xi32>{%10, %11}
+ }
+ }
+ return
+ }
+ }
+ }
+}
+
+// CHECK-DAG: #[[CONFIG:.+]] = #iree_codegen.lowering.config<tile_sizes = {{\[}}[], [8, 8, 8], [1, 4, 4]{{\]}}, native_vector_size = [1, 4, 4]>
// CHECK-DAG: #[[TRANSLATION:.+]] = #iree_codegen.translation.info<"CPUTileFuseAndVectorize", workload_per_wg = [64, 64]>
-// CHECK-DAG: #[[CONFIG:.+]] = #iree_codegen.lowering.config<tile_sizes = [{{\[}}], [8, 8, 8], [1, 4, 4]], native_vector_size = [1, 4, 4]>
// CHECK: linalg.matmul {lowering.config = #[[CONFIG]]}
diff --git a/iree/compiler/Codegen/LLVMCPU/test/tile_and_vectorize.mlir b/iree/compiler/Codegen/LLVMCPU/test/tile_and_vectorize.mlir
deleted file mode 100644
index 69d2f6f..0000000
--- a/iree/compiler/Codegen/LLVMCPU/test/tile_and_vectorize.mlir
+++ /dev/null
@@ -1,82 +0,0 @@
-// RUN: iree-opt %s -cse -iree-llvmcpu-tile-and-vectorize -cse -canonicalize -split-input-file | FileCheck %s
-
-#config0 = #iree_codegen.lowering.config<tile_sizes = [[64, 64]], native_vector_size = []>
-#config1 = #iree_codegen.lowering.config<tile_sizes = [[64, 64], [32, 32, 32], [4, 4, 4]], native_vector_size = [4, 4, 4]>
-#map0 = affine_map<()[s0] -> (s0 * 64)>
-#map1 = affine_map<(d0) -> (64, -d0 + 383)>
-#map2 = affine_map<(d0) -> (64, -d0 + 513)>
-#map3 = affine_map<(d0) -> (-d0 + 383, 64)>
-#map4 = affine_map<(d0) -> (-d0 + 513, 64)>
-module {
- func @dot_383x383x513_dispatch_0() {
- %c0 = arith.constant 0 : index
- %c513 = arith.constant 513 : index
- %c383 = arith.constant 383 : index
- %cst = arith.constant 0.000000e+00 : f32
- %0 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : !flow.dispatch.tensor<readonly:383x383xf32>
- %1 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : !flow.dispatch.tensor<readonly:383x513xf32>
- %2 = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) : !flow.dispatch.tensor<writeonly:383x513xf32>
- %workgroup_id_x = hal.interface.workgroup.id[0] : index
- %workgroup_count_x = hal.interface.workgroup.count[0] : index
- %workgroup_id_y = hal.interface.workgroup.id[1] : index
- %workgroup_count_y = hal.interface.workgroup.count[1] : index
- %3 = affine.apply #map0()[%workgroup_id_y]
- %4 = affine.apply #map0()[%workgroup_count_y]
- scf.for %arg0 = %3 to %c383 step %4 {
- %5 = affine.apply #map0()[%workgroup_id_x]
- %6 = affine.apply #map0()[%workgroup_count_x]
- scf.for %arg1 = %5 to %c513 step %6 {
- %7 = affine.min #map1(%arg0)
- %8 = flow.dispatch.tensor.load %0, offsets = [%arg0, 0], sizes = [%7, 383], strides = [1, 1] : !flow.dispatch.tensor<readonly:383x383xf32> -> tensor<?x383xf32>
- %9 = affine.min #map2(%arg1)
- %10 = flow.dispatch.tensor.load %1, offsets = [0, %arg1], sizes = [383, %9], strides = [1, 1] : !flow.dispatch.tensor<readonly:383x513xf32> -> tensor<383x?xf32>
- %11 = affine.min #map3(%arg0)
- %12 = affine.min #map4(%arg1)
- %13 = linalg.init_tensor [%11, %12] : tensor<?x?xf32>
- %14 = linalg.fill(%cst, %13) {lowering.config = #config0} : f32, tensor<?x?xf32> -> tensor<?x?xf32>
- %15 = linalg.matmul {lowering.config = #config1} ins(%8, %10 : tensor<?x383xf32>, tensor<383x?xf32>) outs(%14 : tensor<?x?xf32>) -> tensor<?x?xf32>
- flow.dispatch.tensor.store %15, %2, offsets = [%arg0, %arg1], sizes = [%7, %9], strides = [1, 1] : tensor<?x?xf32> -> !flow.dispatch.tensor<writeonly:383x513xf32>
- }
- }
- return
- }
-}
-
-// CHECK: #[[MAP1:.+]] = affine_map<(d0) -> (64, -d0 + 383)>
-// CHECK: #[[MAP2:.+]] = affine_map<(d0) -> (64, -d0 + 513)>
-// CHECK: #[[MAP5:.+]] = affine_map<(d0, d1) -> (32, -d0 + d1)>
-// CHECK: #[[MAP6:.+]] = affine_map<(d0) -> (32, -d0 + 383)>
-// CHECK: @dot_383x383x513_dispatch_0
-// CHECK-DAG: %[[CST:.+]] = arith.constant 0.000000e+00 : f32
-// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
-// CHECK-DAG: %[[C4:.+]] = arith.constant 4 : index
-// CHECK-DAG: %[[C383:.+]] = arith.constant 383 : index
-// CHECK-DAG: %[[C513:.+]] = arith.constant 513 : index
-// CHECK-DAG: %[[C32:.+]] = arith.constant 32 : index
-// CHECK: %[[LHS:.+]] = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) : !flow.dispatch.tensor<readonly:383x383xf32>
-// CHECK: %[[RHS:.+]] = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) : !flow.dispatch.tensor<readonly:383x513xf32>
-// CHECK: %[[DST:.+]] = hal.interface.binding.subspan set(0) binding(2) type(storage_buffer) : !flow.dispatch.tensor<writeonly:383x513xf32>
-// CHECK: scf.for %[[I_WG_IDX:.+]] = {{.*}} to %c383
-// CHECK: scf.for %[[J_WG_IDX:.+]] = {{.*}} to %c513
-// CHECK: %[[LHS_WG_TILE_DIM0:.+]] = affine.min #[[MAP1]](%[[I_WG_IDX]])
-// CHECK: %[[LHS_WG_TILE:.+]] = flow.dispatch.tensor.load %[[LHS]]
-// CHECK: %[[RHS_WG_TILE_DIM1:.+]] = affine.min #[[MAP2]](%[[J_WG_IDX]])
-// CHECK: %[[RHS_WG_TILE:.+]] = flow.dispatch.tensor.load %[[RHS]]
-// CHECK: %[[DST_WG_TILE_INIT:.+]] = linalg.init_tensor
-// CHECK: %[[DST_WG_TILE_INIT_C0:.+]] = linalg.fill(%[[CST]], %[[DST_WG_TILE_INIT]])
-// CHECK: {{.*}} = scf.for %[[L1_I:.+]] = %[[C0]] to %[[LHS_WG_TILE_DIM0]] step %[[C32]] iter_args(%[[DST_WG_TILE_0:.+]] = %[[DST_WG_TILE_INIT_C0]])
-// CHECK: {{.*}} = scf.for %[[L1_J:.+]] = %[[C0]] to %[[RHS_WG_TILE_DIM1]] step %[[C32]] iter_args(%[[DST_WG_TILE_1:.+]] = %[[DST_WG_TILE_0]])
-// CHECK: {{.*}} = scf.for %[[L1_K:.+]] = %[[C0]] to %[[C383]] step %[[C32]] iter_args(%[[DST_WG_TILE_2:.+]] = %[[DST_WG_TILE_1]])
-// CHECK: %[[L2_I_BOUND:.+]] = affine.min #[[MAP5]](%[[L1_I]], %[[LHS_WG_TILE_DIM0]])
-// CHECK: %[[L2_K_BOUND:.+]] = affine.min #[[MAP6]](%[[L1_K]])
-// CHECK: %[[L2_J_BOUND:.+]] = affine.min #[[MAP5]](%[[L1_J]], %[[RHS_WG_TILE_DIM1]])
-// CHECK: %[[DST_L1_TILE:.+]] = tensor.extract_slice %[[DST_WG_TILE_2]]
-// CHECK: {{.*}} = scf.for {{.*}} = %[[C0]] to %[[L2_I_BOUND]] step %[[C4]] iter_args(%[[DST_VEC_TILE_0:.+]] = %[[DST_L1_TILE]])
-// CHECK: {{.*}} = scf.for {{.*}} = %[[C0]] to %[[L2_J_BOUND]] step %[[C4]] iter_args(%[[DST_VEC_TILE_1:.+]] = %[[DST_VEC_TILE_0]])
-// CHECK: {{.*}} = scf.for {{.*}} = %[[C0]] to %[[L2_K_BOUND]] step %[[C4]] iter_args(%[[DST_VEC_TILE_2:.+]] = %[[DST_VEC_TILE_1]])
-// CHECK: %[[LHS_VEC_TILE:.+]] = tensor.extract_slice %[[LHS_WG_TILE]]
-// CHECK: %[[RHS_VEC_TILE:.+]] = tensor.extract_slice %[[RHS_WG_TILE]]
-// CHECK: %[[DST_VEC_TILE:.+]] = tensor.extract_slice %[[DST_VEC_TILE_2]]
-// CHECK: linalg.matmul {__internal_linalg_transform__ = "vectorize"
-// CHECK-SAME: ins(%[[LHS_VEC_TILE]], %[[RHS_VEC_TILE]]
-// CHECK-SAME: outs(%[[DST_VEC_TILE]]
diff --git a/iree/compiler/Codegen/LLVMCPU/test/vector_contract_custom_kernels.mlir b/iree/compiler/Codegen/LLVMCPU/test/vector_contract_to_arm_asm.mlir
similarity index 100%
rename from iree/compiler/Codegen/LLVMCPU/test/vector_contract_custom_kernels.mlir
rename to iree/compiler/Codegen/LLVMCPU/test/vector_contract_to_arm_asm.mlir
diff --git a/iree/compiler/Codegen/LLVMCPU/test/vector_contract_to_arm_intrinsics.mlir b/iree/compiler/Codegen/LLVMCPU/test/vector_contract_to_arm_intrinsics.mlir
new file mode 100644
index 0000000..9193ca9
--- /dev/null
+++ b/iree/compiler/Codegen/LLVMCPU/test/vector_contract_to_arm_intrinsics.mlir
@@ -0,0 +1,136 @@
+// RUN: iree-opt -iree-llvmcpu-vector-contract-custom-kernels='aarch64 dotprod intrinsics' %s | FileCheck %s
+
+// CHECK-LABEL: @vector_i8i8i32matmul(
+// CHECK-SAME: %[[LHS:[^:[:space:]]+]]
+// CHECK-SAME: %[[RHS:[^:[:space:]]+]]
+// CHECK-SAME: %[[ACC:[^:[:space:]]+]]
+// CHECK-DAG: %[[ZERO:.*]] = arith.constant dense<0> : vector<4x4xi8>
+// CHECK-DAG: %[[ACC_ROW_0:.*]] = vector.extract %[[ACC]][0] : vector<8x8xi32>
+// CHECK-DAG: %[[ACC_ROW_1:.*]] = vector.extract %[[ACC]][1] : vector<8x8xi32>
+// CHECK-DAG: %[[ACC_ROW_2:.*]] = vector.extract %[[ACC]][2] : vector<8x8xi32>
+// CHECK-DAG: %[[ACC_ROW_3:.*]] = vector.extract %[[ACC]][3] : vector<8x8xi32>
+// CHECK-DAG: %[[ACC_ROW_4:.*]] = vector.extract %[[ACC]][4] : vector<8x8xi32>
+// CHECK-DAG: %[[ACC_ROW_5:.*]] = vector.extract %[[ACC]][5] : vector<8x8xi32>
+// CHECK-DAG: %[[ACC_ROW_6:.*]] = vector.extract %[[ACC]][6] : vector<8x8xi32>
+// CHECK-DAG: %[[ACC_ROW_7:.*]] = vector.extract %[[ACC]][7] : vector<8x8xi32>
+// CHECK-DAG: %[[ACC_CHUNK_00:.*]] = vector.extract_strided_slice %[[ACC_ROW_0]] {offsets = [0]
+// CHECK-DAG: %[[ACC_CHUNK_01:.*]] = vector.extract_strided_slice %[[ACC_ROW_0]] {offsets = [4]
+// CHECK-DAG: %[[ACC_CHUNK_02:.*]] = vector.extract_strided_slice %[[ACC_ROW_1]] {offsets = [0]
+// CHECK-DAG: %[[ACC_CHUNK_03:.*]] = vector.extract_strided_slice %[[ACC_ROW_1]] {offsets = [4]
+// CHECK-DAG: %[[ACC_CHUNK_04:.*]] = vector.extract_strided_slice %[[ACC_ROW_2]] {offsets = [0]
+// CHECK-DAG: %[[ACC_CHUNK_05:.*]] = vector.extract_strided_slice %[[ACC_ROW_2]] {offsets = [4]
+// CHECK-DAG: %[[ACC_CHUNK_06:.*]] = vector.extract_strided_slice %[[ACC_ROW_3]] {offsets = [0]
+// CHECK-DAG: %[[ACC_CHUNK_07:.*]] = vector.extract_strided_slice %[[ACC_ROW_3]] {offsets = [4]
+// CHECK-DAG: %[[ACC_CHUNK_08:.*]] = vector.extract_strided_slice %[[ACC_ROW_4]] {offsets = [0]
+// CHECK-DAG: %[[ACC_CHUNK_09:.*]] = vector.extract_strided_slice %[[ACC_ROW_4]] {offsets = [4]
+// CHECK-DAG: %[[ACC_CHUNK_10:.*]] = vector.extract_strided_slice %[[ACC_ROW_5]] {offsets = [0]
+// CHECK-DAG: %[[ACC_CHUNK_11:.*]] = vector.extract_strided_slice %[[ACC_ROW_5]] {offsets = [4]
+// CHECK-DAG: %[[ACC_CHUNK_12:.*]] = vector.extract_strided_slice %[[ACC_ROW_6]] {offsets = [0]
+// CHECK-DAG: %[[ACC_CHUNK_13:.*]] = vector.extract_strided_slice %[[ACC_ROW_6]] {offsets = [4]
+// CHECK-DAG: %[[ACC_CHUNK_14:.*]] = vector.extract_strided_slice %[[ACC_ROW_7]] {offsets = [0]
+// CHECK-DAG: %[[ACC_CHUNK_15:.*]] = vector.extract_strided_slice %[[ACC_ROW_7]] {offsets = [4]
+// CHECK-DAG: %[[LHS_HALF_0:.*]] = vector.extract_strided_slice %[[LHS]] {offsets = [0, 0]
+// CHECK-DAG: %[[LHS_HALF_1:.*]] = vector.extract_strided_slice %[[LHS]] {offsets = [4, 0]
+// CHECK-DAG: %[[RHS_HALF_0:.*]] = vector.extract_strided_slice %[[RHS]] {offsets = [0, 0]
+// CHECK-DAG: %[[RHS_HALF_1:.*]] = vector.extract_strided_slice %[[RHS]] {offsets = [4, 0]
+// CHECK-DAG: %[[LHS_CHUNK_00:.*]] = vector.shuffle %[[LHS_HALF_0]], %[[ZERO]] [0, 0, 0, 0]
+// CHECK-DAG: %[[LHS_CHUNK_01:.*]] = vector.shuffle %[[LHS_HALF_0]], %[[ZERO]] [0, 0, 0, 0]
+// CHECK-DAG: %[[LHS_CHUNK_02:.*]] = vector.shuffle %[[LHS_HALF_0]], %[[ZERO]] [1, 1, 1, 1]
+// CHECK-DAG: %[[LHS_CHUNK_03:.*]] = vector.shuffle %[[LHS_HALF_0]], %[[ZERO]] [1, 1, 1, 1]
+// CHECK-DAG: %[[LHS_CHUNK_04:.*]] = vector.shuffle %[[LHS_HALF_0]], %[[ZERO]] [2, 2, 2, 2]
+// CHECK-DAG: %[[LHS_CHUNK_05:.*]] = vector.shuffle %[[LHS_HALF_0]], %[[ZERO]] [2, 2, 2, 2]
+// CHECK-DAG: %[[LHS_CHUNK_06:.*]] = vector.shuffle %[[LHS_HALF_0]], %[[ZERO]] [3, 3, 3, 3]
+// CHECK-DAG: %[[LHS_CHUNK_07:.*]] = vector.shuffle %[[LHS_HALF_0]], %[[ZERO]] [3, 3, 3, 3]
+// CHECK-DAG: %[[LHS_CHUNK_08:.*]] = vector.shuffle %[[LHS_HALF_1]], %[[ZERO]] [0, 0, 0, 0]
+// CHECK-DAG: %[[LHS_CHUNK_09:.*]] = vector.shuffle %[[LHS_HALF_1]], %[[ZERO]] [0, 0, 0, 0]
+// CHECK-DAG: %[[LHS_CHUNK_10:.*]] = vector.shuffle %[[LHS_HALF_1]], %[[ZERO]] [1, 1, 1, 1]
+// CHECK-DAG: %[[LHS_CHUNK_11:.*]] = vector.shuffle %[[LHS_HALF_1]], %[[ZERO]] [1, 1, 1, 1]
+// CHECK-DAG: %[[LHS_CHUNK_12:.*]] = vector.shuffle %[[LHS_HALF_1]], %[[ZERO]] [2, 2, 2, 2]
+// CHECK-DAG: %[[LHS_CHUNK_13:.*]] = vector.shuffle %[[LHS_HALF_1]], %[[ZERO]] [2, 2, 2, 2]
+// CHECK-DAG: %[[LHS_CHUNK_14:.*]] = vector.shuffle %[[LHS_HALF_1]], %[[ZERO]] [3, 3, 3, 3]
+// CHECK-DAG: %[[LHS_CHUNK_15:.*]] = vector.shuffle %[[LHS_HALF_1]], %[[ZERO]] [3, 3, 3, 3]
+// CHECK-DAG: %[[SDOT_CHUNK_00:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_00]], %[[RHS_HALF_0]], %[[LHS_CHUNK_00]]
+// CHECK-DAG: %[[SDOT_CHUNK_01:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_01]], %[[RHS_HALF_1]], %[[LHS_CHUNK_01]]
+// CHECK-DAG: %[[SDOT_CHUNK_02:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_02]], %[[RHS_HALF_0]], %[[LHS_CHUNK_02]]
+// CHECK-DAG: %[[SDOT_CHUNK_03:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_03]], %[[RHS_HALF_1]], %[[LHS_CHUNK_03]]
+// CHECK-DAG: %[[SDOT_CHUNK_04:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_04]], %[[RHS_HALF_0]], %[[LHS_CHUNK_04]]
+// CHECK-DAG: %[[SDOT_CHUNK_05:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_05]], %[[RHS_HALF_1]], %[[LHS_CHUNK_05]]
+// CHECK-DAG: %[[SDOT_CHUNK_06:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_06]], %[[RHS_HALF_0]], %[[LHS_CHUNK_06]]
+// CHECK-DAG: %[[SDOT_CHUNK_07:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_07]], %[[RHS_HALF_1]], %[[LHS_CHUNK_07]]
+// CHECK-DAG: %[[SDOT_CHUNK_08:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_08]], %[[RHS_HALF_0]], %[[LHS_CHUNK_08]]
+// CHECK-DAG: %[[SDOT_CHUNK_09:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_09]], %[[RHS_HALF_1]], %[[LHS_CHUNK_09]]
+// CHECK-DAG: %[[SDOT_CHUNK_10:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_10]], %[[RHS_HALF_0]], %[[LHS_CHUNK_10]]
+// CHECK-DAG: %[[SDOT_CHUNK_11:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_11]], %[[RHS_HALF_1]], %[[LHS_CHUNK_11]]
+// CHECK-DAG: %[[SDOT_CHUNK_12:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_12]], %[[RHS_HALF_0]], %[[LHS_CHUNK_12]]
+// CHECK-DAG: %[[SDOT_CHUNK_13:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_13]], %[[RHS_HALF_1]], %[[LHS_CHUNK_13]]
+// CHECK-DAG: %[[SDOT_CHUNK_14:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_14]], %[[RHS_HALF_0]], %[[LHS_CHUNK_14]]
+// CHECK-DAG: %[[SDOT_CHUNK_15:.*]] = arm_neon.2d.sdot %[[ACC_CHUNK_15]], %[[RHS_HALF_1]], %[[LHS_CHUNK_15]]
+// CHECK-DAG: %[[RES_00:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_00]], %[[ACC]] {offsets = [0, 0]
+// CHECK-DAG: %[[RES_01:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_01]], %[[RES_00]] {offsets = [0, 4]
+// CHECK-DAG: %[[RES_02:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_02]], %[[RES_01]] {offsets = [1, 0]
+// CHECK-DAG: %[[RES_03:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_03]], %[[RES_02]] {offsets = [1, 4]
+// CHECK-DAG: %[[RES_04:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_04]], %[[RES_03]] {offsets = [2, 0]
+// CHECK-DAG: %[[RES_05:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_05]], %[[RES_04]] {offsets = [2, 4]
+// CHECK-DAG: %[[RES_06:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_06]], %[[RES_05]] {offsets = [3, 0]
+// CHECK-DAG: %[[RES_07:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_07]], %[[RES_06]] {offsets = [3, 4]
+// CHECK-DAG: %[[RES_08:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_08]], %[[RES_07]] {offsets = [4, 0]
+// CHECK-DAG: %[[RES_09:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_09]], %[[RES_08]] {offsets = [4, 4]
+// CHECK-DAG: %[[RES_10:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_10]], %[[RES_09]] {offsets = [5, 0]
+// CHECK-DAG: %[[RES_11:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_11]], %[[RES_10]] {offsets = [5, 4]
+// CHECK-DAG: %[[RES_12:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_12]], %[[RES_11]] {offsets = [6, 0]
+// CHECK-DAG: %[[RES_13:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_13]], %[[RES_12]] {offsets = [6, 4]
+// CHECK-DAG: %[[RES_14:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_14]], %[[RES_13]] {offsets = [7, 0]
+// CHECK-DAG: %[[RES_15:.*]] = vector.insert_strided_slice %[[SDOT_CHUNK_15]], %[[RES_14]] {offsets = [7, 4]
+// CHECK: return %[[RES_15]]
+func @vector_i8i8i32matmul(
+ %lhs: vector<8x4xi8>,
+ %rhs: vector<8x4xi8>,
+ %acc: vector<8x8xi32>) -> vector<8x8xi32> {
+ %lhs_wide = arith.extsi %lhs : vector<8x4xi8> to vector<8x4xi32>
+ %rhs_wide = arith.extsi %rhs : vector<8x4xi8> to vector<8x4xi32>
+ %res = vector.contract {
+ indexing_maps = [
+ affine_map<(d0, d1, d2) -> (d0, d2)>,
+ affine_map<(d0, d1, d2) -> (d1, d2)>,
+ affine_map<(d0, d1, d2) -> (d0, d1)>
+ ], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>
+ } %lhs_wide, %rhs_wide, %acc : vector<8x4xi32>, vector<8x4xi32> into vector<8x8xi32>
+ return %res : vector<8x8xi32>
+}
+
+// -----
+
+// CHECK-LABEL: @vector_f32f32f32matmul(
+func @vector_f32f32f32matmul(
+ %lhs: vector<8x4xf32>,
+ %rhs: vector<8x4xf32>,
+ %acc: vector<8x8xf32>) -> vector<8x8xf32> {
+ // CHECK: vector.contract
+ %res = vector.contract {
+ indexing_maps = [
+ affine_map<(d0, d1, d2) -> (d0, d2)>,
+ affine_map<(d0, d1, d2) -> (d1, d2)>,
+ affine_map<(d0, d1, d2) -> (d0, d1)>
+ ], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>
+ } %lhs, %rhs, %acc : vector<8x4xf32>, vector<8x4xf32> into vector<8x8xf32>
+ return %res : vector<8x8xf32>
+}
+
+
+// -----
+
+// CHECK-LABEL: @vector_i32i32i32matmul(
+func @vector_i32i32i32matmul(
+ %lhs: vector<8x4xi32>,
+ %rhs: vector<8x4xi32>,
+ %acc: vector<8x8xi32>) -> vector<8x8xi32> {
+ // CHECK: vector.contract
+ %res = vector.contract {
+ indexing_maps = [
+ affine_map<(d0, d1, d2) -> (d0, d2)>,
+ affine_map<(d0, d1, d2) -> (d1, d2)>,
+ affine_map<(d0, d1, d2) -> (d0, d1)>
+ ], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>
+ } %lhs, %rhs, %acc : vector<8x4xi32>, vector<8x4xi32> into vector<8x8xi32>
+ return %res : vector<8x8xi32>
+}
diff --git a/iree/compiler/Codegen/LLVMGPU/BUILD b/iree/compiler/Codegen/LLVMGPU/BUILD
index ed5ad74..2d5293c 100644
--- a/iree/compiler/Codegen/LLVMGPU/BUILD
+++ b/iree/compiler/Codegen/LLVMGPU/BUILD
@@ -66,6 +66,7 @@
"@llvm-project//mlir:ROCDLDialect",
"@llvm-project//mlir:SCFDialect",
"@llvm-project//mlir:SCFToStandard",
+ "@llvm-project//mlir:SCFTransforms",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:StandardOpsTransforms",
"@llvm-project//mlir:Support",
diff --git a/iree/compiler/Codegen/LLVMGPU/CMakeLists.txt b/iree/compiler/Codegen/LLVMGPU/CMakeLists.txt
index b1c09d2..dfdc0cd 100644
--- a/iree/compiler/Codegen/LLVMGPU/CMakeLists.txt
+++ b/iree/compiler/Codegen/LLVMGPU/CMakeLists.txt
@@ -56,6 +56,7 @@
MLIRROCDLIR
MLIRSCF
MLIRSCFToStandard
+ MLIRSCFTransforms
MLIRStandard
MLIRStandardOpsTransforms
MLIRStandardToLLVM
diff --git a/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp b/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp
index 1f60620..a8e77bd 100644
--- a/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp
+++ b/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.cpp
@@ -104,7 +104,7 @@
registry.insert<vector::VectorDialect>();
}
void runOnOperation() override {
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
populateScalarizeMathOps(patterns);
populateConvertSharedMemoryAllocOps(patterns);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
@@ -349,7 +349,7 @@
} // anonymous namespace
void populateLLVMConversionPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
LLVMTypeConverter &converter) {
patterns
.insert<ConvertFunc, ConvertIREEBindingSubspanOp, ConvertIREEConstantOp>(
diff --git a/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.h b/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.h
index ba99be7..de231b5 100644
--- a/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.h
+++ b/iree/compiler/Codegen/LLVMGPU/ConvertToLLVM.h
@@ -13,7 +13,7 @@
namespace iree_compiler {
void populateLLVMConversionPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
LLVMTypeConverter &converter);
void populateScalarizeMathOps(RewritePatternSet &patterns);
diff --git a/iree/compiler/Codegen/LLVMGPU/ConvertToNVVM.cpp b/iree/compiler/Codegen/LLVMGPU/ConvertToNVVM.cpp
index 74536fa..fb38835 100644
--- a/iree/compiler/Codegen/LLVMGPU/ConvertToNVVM.cpp
+++ b/iree/compiler/Codegen/LLVMGPU/ConvertToNVVM.cpp
@@ -55,7 +55,7 @@
// conversion pass.
// Run Vector -> Vector transformations ahead of conversion to LLVM.
{
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
populateScalarizeMathOps(patterns);
populateConvertSharedMemoryAllocOps(patterns);
populateLowerHALInterfaceOp(patterns);
@@ -74,14 +74,14 @@
}
}
{
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
populateGpuRewritePatterns(patterns);
if (failed(applyPatternsAndFoldGreedily(m, std::move(patterns)))) {
return signalPassFailure();
}
}
{
- OwningRewritePatternList llvmPatterns(&getContext());
+ RewritePatternSet llvmPatterns(&getContext());
populateLLVMConversionPatterns(&getContext(), llvmPatterns, converter);
populateMathToLLVMConversionPatterns(converter, llvmPatterns);
populateMemRefToLLVMConversionPatterns(converter, llvmPatterns);
diff --git a/iree/compiler/Codegen/LLVMGPU/ConvertToROCDL.cpp b/iree/compiler/Codegen/LLVMGPU/ConvertToROCDL.cpp
index 9d1b8c1..8555f81 100644
--- a/iree/compiler/Codegen/LLVMGPU/ConvertToROCDL.cpp
+++ b/iree/compiler/Codegen/LLVMGPU/ConvertToROCDL.cpp
@@ -51,7 +51,7 @@
// conversion pass.
// Run Vector -> Vector transformations ahead of conversion to LLVM.
{
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
populateScalarizeMathOps(patterns);
populateConvertSharedMemoryAllocOps(patterns);
populateLowerHALInterfaceOp(patterns);
@@ -70,14 +70,14 @@
}
}
{
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
populateGpuRewritePatterns(patterns);
if (failed(applyPatternsAndFoldGreedily(m, std::move(patterns)))) {
return signalPassFailure();
}
}
{
- OwningRewritePatternList llvmPatterns(&getContext());
+ RewritePatternSet llvmPatterns(&getContext());
populateLLVMConversionPatterns(&getContext(), llvmPatterns, converter);
populateMathToLLVMConversionPatterns(converter, llvmPatterns);
populateMemRefToLLVMConversionPatterns(converter, llvmPatterns);
diff --git a/iree/compiler/Codegen/LLVMGPU/LLVMGPUDistributeSharedMemoryCopy.cpp b/iree/compiler/Codegen/LLVMGPU/LLVMGPUDistributeSharedMemoryCopy.cpp
index 350fa36..04f6db5 100644
--- a/iree/compiler/Codegen/LLVMGPU/LLVMGPUDistributeSharedMemoryCopy.cpp
+++ b/iree/compiler/Codegen/LLVMGPU/LLVMGPUDistributeSharedMemoryCopy.cpp
@@ -33,7 +33,7 @@
/// part of the launch config but needs to be distributed on the workgroup
/// picked by the root op.
static void populateTilingCopyToWorkgroupMemPatterns(
- OwningRewritePatternList &patterns, ArrayRef<int64_t> workgroupSize) {
+ RewritePatternSet &patterns, ArrayRef<int64_t> workgroupSize) {
// Tile and distribute copy to workgroup memory.
linalg::TileSizeComputationFunction wgCopyTileSizeFn =
[](OpBuilder &builder, Operation *operation) {
@@ -263,7 +263,7 @@
// well aligned on the number of threads.
// TODO(thomasraoux): Handle this case with padding instead so that we get
// good performance for more complex shapes.
- OwningRewritePatternList threadLevelTilingPatterns(context);
+ RewritePatternSet threadLevelTilingPatterns(context);
populateTilingCopyToWorkgroupMemPatterns(threadLevelTilingPatterns,
workgroupSize);
if (failed(applyPatternsAndFoldGreedily(
diff --git a/iree/compiler/Codegen/LLVMGPU/LLVMGPUTileAndDistribute.cpp b/iree/compiler/Codegen/LLVMGPU/LLVMGPUTileAndDistribute.cpp
index 051447c..d021f7b 100644
--- a/iree/compiler/Codegen/LLVMGPU/LLVMGPUTileAndDistribute.cpp
+++ b/iree/compiler/Codegen/LLVMGPU/LLVMGPUTileAndDistribute.cpp
@@ -32,8 +32,7 @@
/// Patterns for workgroup level tiling. Workgroup tiling is done at the flow
/// level but we may have extra tiling for the reduction dimension. Therefore we
/// tile again without distributing.
-static void populateTilingReductionPatterns(
- OwningRewritePatternList &patterns) {
+static void populateTilingReductionPatterns(RewritePatternSet &patterns) {
auto tileSizesFn = [&](OpBuilder &builder,
Operation *op) -> SmallVector<Value, 4> {
SmallVector<unsigned> partitionedLoops = getPartitionedLoops(op);
@@ -62,7 +61,7 @@
/// Patterns for warp level tiling.
static void populateTilingToWarpPatterns(
- OwningRewritePatternList &patterns, SmallVectorImpl<int64_t> &workgroupSize,
+ RewritePatternSet &patterns, SmallVectorImpl<int64_t> &workgroupSize,
SmallVectorImpl<int64_t> &workloadPerWorkgroup) {
std::array<int64_t, 3> warpPerWorkgroup = {
workgroupSize[0] / kWarpSize, workgroupSize[1], workgroupSize[2]};
@@ -121,7 +120,7 @@
/// Patterns for thread level tiling.
static void populateTilingToInvocationPatterns(
- OwningRewritePatternList &patterns, SmallVectorImpl<int64_t> &workgroupSize,
+ RewritePatternSet &patterns, SmallVectorImpl<int64_t> &workgroupSize,
SmallVectorImpl<int64_t> &workloadPerWorkgroup) {
linalg::TileSizeComputationFunction getInnerTileSizeFn =
[&](OpBuilder &builder, Operation *operation) {
@@ -238,7 +237,7 @@
}
static void populatePromotionPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<linalg::LinalgPromotionPattern<linalg::MatmulOp>,
linalg::LinalgPromotionPattern<linalg::BatchMatmulOp>>(
context,
@@ -274,7 +273,7 @@
// Tile again at the workgroup level since redution dimension were
// ignored. Dimensions already tiled will be ignore since we tile to the
// same size.
- OwningRewritePatternList wgTilingPatterns(context);
+ RewritePatternSet wgTilingPatterns(context);
populateTilingReductionPatterns(wgTilingPatterns);
if (failed(applyPatternsAndFoldGreedily(funcOp,
std::move(wgTilingPatterns)))) {
@@ -308,7 +307,7 @@
workgroupSize[0] * workgroupSize[1] * workgroupSize[2];
// Only promote to workgroup size if there are multiple warps.
if (flatWorkgroupSize > kWarpSize) {
- OwningRewritePatternList promotionPatterns(&getContext());
+ RewritePatternSet promotionPatterns(&getContext());
populatePromotionPatterns(context, promotionPatterns);
if (failed(applyPatternsAndFoldGreedily(funcOp,
std::move(promotionPatterns)))) {
@@ -349,7 +348,7 @@
if (distributeToWarp) {
// Apply last level of tiling and distribute to warps.
- OwningRewritePatternList warpLevelTilingPatterns(context);
+ RewritePatternSet warpLevelTilingPatterns(context);
populateTilingToWarpPatterns(warpLevelTilingPatterns, workgroupSize,
workloadPerWorkgroup);
if (failed(applyPatternsAndFoldGreedily(
@@ -359,7 +358,7 @@
} else {
// Apply last level of tiling and distribute to threads.
- OwningRewritePatternList threadLevelTilingPatterns(context);
+ RewritePatternSet threadLevelTilingPatterns(context);
populateTilingToInvocationPatterns(threadLevelTilingPatterns,
workgroupSize, workloadPerWorkgroup);
if (failed(applyPatternsAndFoldGreedily(
diff --git a/iree/compiler/Codegen/LLVMGPU/Passes.cpp b/iree/compiler/Codegen/LLVMGPU/Passes.cpp
index cda7243..c54e673 100644
--- a/iree/compiler/Codegen/LLVMGPU/Passes.cpp
+++ b/iree/compiler/Codegen/LLVMGPU/Passes.cpp
@@ -149,8 +149,11 @@
pm.addNestedPass<FuncOp>(createCanonicalizerPass());
pm.addNestedPass<FuncOp>(createCSEPass());
+ // math dialect elementry functions -> polynomial form.
+ pm.addNestedPass<FuncOp>(createPolynomialApproximationPass());
+
pm.addNestedPass<FuncOp>(arith::createArithmeticExpandOpsPass());
- pm.addNestedPass<FuncOp>(createStdExpandOpsPass());
+ pm.addNestedPass<FuncOp>(memref::createExpandOpsPass());
pm.addPass(createLowerAffinePass());
// Strip out the debug info for the kernel as CUDA driver doesn't diggest PTX
@@ -166,6 +169,8 @@
}
void buildLLVMGPUTransformPassPipeline(OpPassManager &pm, bool useROCM) {
+ pm.nest<ModuleOp>().nest<FuncOp>().addPass(createTypePropagationPass());
+
OpPassManager &bufferizePassPM = pm.nest<ModuleOp>();
addLinalgBufferizePasses(bufferizePassPM, gpuAllocationFunction);
pm.addPass(createLLVMGPULowerExecutableTargetPass());
diff --git a/iree/compiler/Codegen/Passes.cpp b/iree/compiler/Codegen/Passes.cpp
index 7563f89..648dda1 100644
--- a/iree/compiler/Codegen/Passes.cpp
+++ b/iree/compiler/Codegen/Passes.cpp
@@ -59,9 +59,9 @@
IREE::Codegen::TranslationInfoAttr translationInfo,
ArrayRef<int64_t> workgroupSize) {
switch (translationInfo.getDispatchLoweringPassPipeline()) {
- case IREE::Codegen::DispatchLoweringPassPipeline::CPUTensorToVectors:
- return verifyTensorToVectorsPassPipelineConfig(op, loweringConfig,
- translationInfo);
+ case IREE::Codegen::DispatchLoweringPassPipeline::CPUTileFuseAndVectorize:
+ return verifyDoubleTilingExpertPassPipelineConfig(op, loweringConfig,
+ translationInfo);
default:
break;
}
diff --git a/iree/compiler/Codegen/Passes.h b/iree/compiler/Codegen/Passes.h
index 4f28828..9da0bba 100644
--- a/iree/compiler/Codegen/Passes.h
+++ b/iree/compiler/Codegen/Passes.h
@@ -114,11 +114,19 @@
/// Pass to optimize vector transfer_read and transfer_write.
std::unique_ptr<OperationPass<FuncOp>> createOptimizeVectorTransferPass();
+/// Pass to propagate type to avoid generating load/stores of illegal types.
+std::unique_ptr<OperationPass<FuncOp>> createTypePropagationPass();
+
/// Sets the number of workgroups to use for each entry point in the dispatch
/// region.
std::unique_ptr<OperationPass<IREE::HAL::ExecutableVariantOp>>
createSetNumWorkgroupsPass(ArrayRef<int64_t> workgroupSize = {});
+/// Pass to optimize vector transfer_read and transfer_write.
+std::unique_ptr<OperationPass<FuncOp>> createOptimizeVectorTransferPass();
+
+/// Pass to convert math operations to their polynomial approximation.
+std::unique_ptr<OperationPass<>> createPolynomialApproximationPass();
//----------------------------------------------------------------------------//
// Common codegen patterns.
//----------------------------------------------------------------------------//
@@ -132,12 +140,12 @@
/// linalg.conv op for a single thread. The linalg.conv should compute on
/// static-sized subviews. To match, output shape must be 1x1xWoxCo, where Co
/// Co is a multiple of 4, and filter shape must be 1x1x4xCo.
-void populateLinalgToVectorVectorizeConvPatterns(
- MLIRContext *context, OwningRewritePatternList &patterns);
+void populateLinalgToVectorVectorizeConvPatterns(MLIRContext *context,
+ RewritePatternSet &patterns);
/// Populates `patterns` to convert linalg.mmt4d to vector.contract.
-void populateLinalgToVectorVectorizeMMT4dPatterns(
- MLIRContext *context, OwningRewritePatternList &patterns);
+void populateLinalgToVectorVectorizeMMT4dPatterns(MLIRContext *context,
+ RewritePatternSet &patterns);
//------------------------------------------------------------------------------
// LLVMCPU
@@ -160,18 +168,10 @@
std::unique_ptr<OperationPass<ModuleOp>>
createLLVMCPUSynchronizeSymbolVisibilityPass();
-/// Multi-level tiling and vectorization of linalg ops on tensors.
-std::unique_ptr<OperationPass<FuncOp>> createLLVMCPUTileAndVectorizePass(
- bool lowerToVectors = true);
-
/// Multi-level tiling, fusing and vectorization of linalg ops on tensors.
std::unique_ptr<OperationPass<FuncOp>> createLLVMCPUTileFuseAndVectorizePass(
bool lowerToVectors = true);
-/// Vectorizes linalg ops executed in the same hal.interface.workgroup.
-std::unique_ptr<OperationPass<FuncOp>> createLLVMCPUVectorizationPass(
- bool lowerToVectors = true);
-
/// Replaces llvm.intr.fma with its unfused mul and add ops.
std::unique_ptr<OperationPass<FuncOp>> createLLVMCPUUnfuseFMAOpsPass();
@@ -200,6 +200,8 @@
bool aarch64 = false;
// Under aarch64: indicates dot-product extension (SDOT, UDOT)
bool dotprod = false;
+ // Indicates that intrinsics should be used rather than inline asm
+ bool intrinsics = false;
};
// Populate target_info fields from the parent HAL::ExecutableVariantOp.
@@ -209,11 +211,10 @@
/// Populates `patterns` to convert certain vector.contract ops to special
/// "kernels" written either in SIMD intrinsics or inline assembly.
void populateVectorContractCustomKernelsPatterns(
- const CustomKernelsTargetInfo &target_info,
- OwningRewritePatternList &patterns);
+ const CustomKernelsTargetInfo &target_info, RewritePatternSet &patterns);
void populateUnfusedFMAOpsPassPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
//----------------------------------------------------------------------------//
// LLVMCPU backend Pass Pipelines.
@@ -239,6 +240,10 @@
/// Populates the passes needed to do two-level tile + vectorize of linalg ops
/// using the Codegen drivers from sandbox.
+LogicalResult verifyDoubleTilingExpertPassPipelineConfig(
+ Operation *op, IREE::Codegen::LoweringConfigAttr loweringConfig,
+ IREE::Codegen::TranslationInfoAttr translationInfo,
+ ArrayRef<int64_t> workgroupSize = {});
void addDoubleTilingExpertPassPipeline(OpPassManager &passManager);
/// Populates the passes needed to multi level tile, fuse and vectorize lowering
diff --git a/iree/compiler/Codegen/Passes.td b/iree/compiler/Codegen/Passes.td
index 9d67ecd..7b90474 100644
--- a/iree/compiler/Codegen/Passes.td
+++ b/iree/compiler/Codegen/Passes.td
@@ -78,6 +78,12 @@
let constructor = "mlir::iree_compiler::createOptimizeVectorTransferPass()";
}
+def TypePropagation :
+ Pass<"iree-codegen-type-propagation", "FuncOp"> {
+ let summary = "Propogate the type of tensor to avoid load/stores of illegal bit widths";
+ let constructor = "mlir::iree_compiler::createTypePropagationPass()";
+}
+
def RemoveSingleIterationLoop :
Pass<"iree-codegen-remove-single-iteration-loop", "FuncOp"> {
let summary = "Remove distributed loop with single iteration.";
@@ -108,6 +114,13 @@
let constructor =
"mlir::iree_compiler::createLinalgToVectorVectorizeMMT4dPass()";
}
+
+def PolynomialApproximationPass :
+ Pass<"iree-codegen-polynomial-approximation", ""> {
+ let summary = "Convert math operations to their polynomial approximation";
+ let constructor =
+ "mlir::iree_compiler::createPolynomialApproximationPass()";
+}
//------------------------------------------------------------------------------
// LLVMCPU
//------------------------------------------------------------------------------
@@ -140,12 +153,6 @@
let constructor = "mlir::iree_compiler::createLLVMCPUSynchronizeSymbolVisibilityPass()";
}
-def LLVMCPUTileAndVectorize
- : Pass<"iree-llvmcpu-tile-and-vectorize", "FuncOp"> {
- let summary = "Tile and vectorize workgroups tiles";
- let constructor = "mlir::iree_compiler::createLLVMCPUTileAndVectorizePass()";
-}
-
def LLVMCPUTileFuseAndVectorize
: Pass<"iree-llvmcpu-tile-fuse-and-vectorize", "FuncOp"> {
let summary = "Tile, fuse and vectorize Linalg ops";
@@ -170,6 +177,9 @@
Option<"dotprod", "dotprod", "bool",
/*default=*/"false",
"Under aarch64, enable kernels that use dotprod instructions">,
+ Option<"intrinsics", "intrinsics", "bool",
+ /*default=*/"false",
+ "Under aarch64, enable kernels that use dotprod instructions">,
];
}
diff --git a/iree/compiler/Codegen/SPIRV/BUILD b/iree/compiler/Codegen/SPIRV/BUILD
index e0a2bd5..dc081d7 100644
--- a/iree/compiler/Codegen/SPIRV/BUILD
+++ b/iree/compiler/Codegen/SPIRV/BUILD
@@ -72,6 +72,7 @@
"@llvm-project//mlir:SCFDialect",
"@llvm-project//mlir:SCFToGPUPass",
"@llvm-project//mlir:SCFToSPIRV",
+ "@llvm-project//mlir:SCFTransforms",
"@llvm-project//mlir:SPIRVConversion",
"@llvm-project//mlir:SPIRVDialect",
"@llvm-project//mlir:SPIRVTransforms",
diff --git a/iree/compiler/Codegen/SPIRV/CMakeLists.txt b/iree/compiler/Codegen/SPIRV/CMakeLists.txt
index 3928c3f..c8420cc 100644
--- a/iree/compiler/Codegen/SPIRV/CMakeLists.txt
+++ b/iree/compiler/Codegen/SPIRV/CMakeLists.txt
@@ -61,6 +61,7 @@
MLIRSCF
MLIRSCFToGPU
MLIRSCFToSPIRV
+ MLIRSCFTransforms
MLIRSPIRV
MLIRSPIRVConversion
MLIRSPIRVTransforms
diff --git a/iree/compiler/Codegen/SPIRV/ConvertToSPIRVPass.cpp b/iree/compiler/Codegen/SPIRV/ConvertToSPIRVPass.cpp
index 3f49b74..cfb57c9 100644
--- a/iree/compiler/Codegen/SPIRV/ConvertToSPIRVPass.cpp
+++ b/iree/compiler/Codegen/SPIRV/ConvertToSPIRVPass.cpp
@@ -325,7 +325,7 @@
spirv::TargetEnvAttr targetAttr = getSPIRVTargetEnvAttr(moduleOp);
moduleOp->setAttr(spirv::getTargetEnvAttrName(), targetAttr);
SPIRVTypeConverter typeConverter(targetAttr);
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
ScfToSPIRVContext scfToSPIRVContext;
// Pull in GPU patterns to convert processor ID ops and loop ops.
diff --git a/iree/compiler/Codegen/SPIRV/Passes.cpp b/iree/compiler/Codegen/SPIRV/Passes.cpp
index 3a3b07b..b7aa1bd 100644
--- a/iree/compiler/Codegen/SPIRV/Passes.cpp
+++ b/iree/compiler/Codegen/SPIRV/Passes.cpp
@@ -84,12 +84,15 @@
pm.addPass(createCanonicalizerPass());
pm.addPass(createCSEPass());
+ // math dialect elementry functions -> polynomial form.
+ pm.addNestedPass<FuncOp>(createPolynomialApproximationPass());
+
// Fold load/store from/to subview ops into the original memref when possible.
// In SPIR-V we don't use memref descriptor so it's not possible to handle
// subview ops.
pm.addPass(memref::createFoldSubViewOpsPass());
pm.addNestedPass<FuncOp>(arith::createArithmeticExpandOpsPass());
- pm.addNestedPass<FuncOp>(createStdExpandOpsPass());
+ pm.addNestedPass<FuncOp>(memref::createExpandOpsPass());
pm.addPass(createCanonicalizerPass());
pm.addPass(createCSEPass());
@@ -224,6 +227,7 @@
//===----------------------------------------------------------------------===//
void buildSPIRVCodegenPassPipeline(OpPassManager &pm) {
+ pm.nest<ModuleOp>().nest<FuncOp>().addPass(createTypePropagationPass());
pm.addPass(createSPIRVLowerExecutableTargetPass());
addMemRefLoweringPasses(pm.nest<ModuleOp>());
addSPIRVLoweringPasses(pm.nest<ModuleOp>());
diff --git a/iree/compiler/Codegen/SPIRV/SPIRVCopyToWorkgroupMemory.cpp b/iree/compiler/Codegen/SPIRV/SPIRVCopyToWorkgroupMemory.cpp
index 427b148..56b801f 100644
--- a/iree/compiler/Codegen/SPIRV/SPIRVCopyToWorkgroupMemory.cpp
+++ b/iree/compiler/Codegen/SPIRV/SPIRVCopyToWorkgroupMemory.cpp
@@ -276,7 +276,7 @@
});
target->markUnknownOpDynamicallyLegal([](Operation *) { return true; });
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
// TODO(antiagainst): Re-enable vectorizing workgroup memory copy once the
// whole pipeline is in a better state.
// patterns.add<TileAndDistributeCopyOp>(context);
@@ -296,7 +296,7 @@
}
// 3. Vectorize the tiled linalg to be able to map it to load/store vector.
- OwningRewritePatternList vectorizationPatterns(&getContext());
+ RewritePatternSet vectorizationPatterns(&getContext());
linalg::VectorizationPatterns<linalg::CopyOp>::insert(
vectorizationPatterns, linalg::LinalgVectorizationOptions(),
linalg::LinalgTransformationFilter(
diff --git a/iree/compiler/Codegen/Sandbox/LinalgTensorCodegenDriver.cpp b/iree/compiler/Codegen/Sandbox/LinalgTensorCodegenDriver.cpp
index a3abe64..78c111e 100644
--- a/iree/compiler/Codegen/Sandbox/LinalgTensorCodegenDriver.cpp
+++ b/iree/compiler/Codegen/Sandbox/LinalgTensorCodegenDriver.cpp
@@ -85,6 +85,18 @@
this->vectorize.setValue(vectorize);
}
LinalgFusePass(const LinalgFusePass &pass) {}
+ LinalgFusePass(const LinalgFusePassOptions &options) {
+ this->anchorFuncOpName = options.anchorFuncOpName;
+ this->anchorOpName = options.anchorOpName;
+ this->tileSizes = options.tileSizes;
+ this->tileInterchange = options.tileInterchange;
+ this->pad = options.pad;
+ this->packPaddings = options.packPaddings;
+ this->hoistPaddings = options.hoistPaddings;
+ this->vectorize = options.vectorize;
+ this->vectorizePadding = options.vectorizePadding;
+ this->tilingLevel = options.tilingLevel;
+ }
void runOnOperation() override;
};
@@ -343,6 +355,10 @@
std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgFusePass() {
return std::make_unique<LinalgFusePass>();
}
+std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgFusePass(
+ const mlir::LinalgFusePassOptions &options) {
+ return std::make_unique<LinalgFusePass>(options);
+}
std::unique_ptr<OperationPass<FuncOp>>
mlir::createLinalgSingleTilingExpertPass() {
diff --git a/iree/compiler/Codegen/Sandbox/Passes.h b/iree/compiler/Codegen/Sandbox/Passes.h
index e0a04ee..2bcf1ff 100644
--- a/iree/compiler/Codegen/Sandbox/Passes.h
+++ b/iree/compiler/Codegen/Sandbox/Passes.h
@@ -11,8 +11,24 @@
namespace mlir {
+/// Struct to control pass options for `LinalgFuse` pass.
+struct LinalgFusePassOptions {
+ std::string anchorFuncOpName = "";
+ std::string anchorOpName = "";
+ SmallVector<int64_t> tileSizes = {};
+ SmallVector<int64_t> tileInterchange = {};
+ bool pad = false;
+ SmallVector<int64_t> packPaddings = {};
+ SmallVector<int64_t> hoistPaddings = {};
+ bool vectorize = false;
+ bool vectorizePadding = false;
+ int64_t tilingLevel = -1;
+};
+
/// Creates a pass to drive tile + fuse transformations of `LinalgOp`s.
std::unique_ptr<OperationPass<FuncOp>> createLinalgFusePass();
+std::unique_ptr<OperationPass<FuncOp>> createLinalgFusePass(
+ const LinalgFusePassOptions &options);
/// Struct to control pass options for `LinalgSingleTilingExpert` pass.
struct LinalgSingleTilingExpertPassOptions {
diff --git a/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/ConvertTensorToFlow.cpp b/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/ConvertTensorToFlow.cpp
index 436c02f..06c424a 100644
--- a/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/ConvertTensorToFlow.cpp
+++ b/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/ConvertTensorToFlow.cpp
@@ -322,7 +322,7 @@
} // namespace
void populateTensorToFlowPatternsBeforeDispatchFormation(
- MLIRContext *context, OwningRewritePatternList &patterns) {
+ MLIRContext *context, RewritePatternSet &patterns) {
patterns
.insert<ConvertTensorInsertSlicePattern, ConvertTensorExtractSlicePattern,
ConvertTensorCastPattern, ConvertTensorFromElementsPattern>(
@@ -330,7 +330,7 @@
}
void populateTensorToFlowPatternsAfterDispatchFormation(
- MLIRContext *context, OwningRewritePatternList &patterns) {
+ MLIRContext *context, RewritePatternSet &patterns) {
patterns.insert<ConvertTensorExtractPattern>(context);
}
diff --git a/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/ConvertTensorToFlow.h b/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/ConvertTensorToFlow.h
index 8a56667..c3dafe7 100644
--- a/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/ConvertTensorToFlow.h
+++ b/iree/compiler/Dialect/Flow/Conversion/TensorToFlow/ConvertTensorToFlow.h
@@ -17,11 +17,11 @@
// Adds patterns for Tensor->Flow, for running before dispatch region formation.
void populateTensorToFlowPatternsBeforeDispatchFormation(
- MLIRContext *context, OwningRewritePatternList &patterns);
+ MLIRContext *context, RewritePatternSet &patterns);
// Adds patterns for Tensor->Flow, for running after dispatch region formation.
void populateTensorToFlowPatternsAfterDispatchFormation(
- MLIRContext *context, OwningRewritePatternList &patterns);
+ MLIRContext *context, RewritePatternSet &patterns);
} // namespace Flow
} // namespace IREE
diff --git a/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp b/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp
index 2916c62..cdb1a2d 100644
--- a/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp
+++ b/iree/compiler/Dialect/Flow/IR/FlowOpFolders.cpp
@@ -86,7 +86,7 @@
//===----------------------------------------------------------------------===//
void DispatchWorkgroupsOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<IREE::Util::ClosureOptimizationPattern<DispatchWorkgroupsOp>>(
context);
}
@@ -218,7 +218,7 @@
} // namespace
void DispatchTensorLoadOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<ReuseDispatchTensorLoadShapeDims>(context);
results.insert<ConvertDispatchInputLoadOfTensorToSubTensor>(context);
results.insert<
@@ -274,7 +274,7 @@
} // namespace
void DispatchTensorStoreOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<ReuseDispatchTensorStoreShapeDims>(context);
results.insert<FoldCastOpIntoDispatchStoreOp>(context);
}
@@ -378,8 +378,8 @@
} // namespace
-void TensorConstantOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorConstantOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<ExpandDynamicShapeConstant>(context);
}
@@ -501,15 +501,15 @@
} // namespace
-void TensorReshapeOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorReshapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<FlattenTensorReshapeChain>(context);
results.insert<ResolveShapedRank>(context);
results.insert<ResolveShapedDim>(context);
}
-void TensorLoadOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorLoadOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<FoldSplatLoadIntoPrimitive>(context);
}
@@ -550,8 +550,8 @@
return {};
}
-void TensorSplatOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorSplatOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): canonicalize splat+slice to smaller splat.
results.insert<FoldSplatReshapeIntoSplat>(context);
}
@@ -724,8 +724,8 @@
} // namespace
-void TensorUpdateOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorUpdateOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<FoldTensorUpdateOpWithCasts>(context);
}
diff --git a/iree/compiler/Dialect/Flow/IR/FlowOps.cpp b/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
index 2d56706..391109d 100644
--- a/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
+++ b/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
@@ -841,8 +841,8 @@
// Public methods
//===----------------------------------------------------------------------===//
-void populateFlowDispatchCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void populateFlowDispatchCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
DispatchTensorLoadOp::getCanonicalizationPatterns(results, context);
}
diff --git a/iree/compiler/Dialect/Flow/IR/FlowOps.h b/iree/compiler/Dialect/Flow/IR/FlowOps.h
index 889e92d..47381de 100644
--- a/iree/compiler/Dialect/Flow/IR/FlowOps.h
+++ b/iree/compiler/Dialect/Flow/IR/FlowOps.h
@@ -35,7 +35,7 @@
// Populates flow.dispatch.* canonicalization patterns.
void populateFlowDispatchCanonicalizationPatterns(
- ::mlir::OwningRewritePatternList &results, ::mlir::MLIRContext *context);
+ ::mlir::RewritePatternSet &results, ::mlir::MLIRContext *context);
} // namespace Flow
} // namespace IREE
diff --git a/iree/compiler/Dialect/Flow/Transforms/BUILD b/iree/compiler/Dialect/Flow/Transforms/BUILD
index 5ffc6c6..61a1be6 100644
--- a/iree/compiler/Dialect/Flow/Transforms/BUILD
+++ b/iree/compiler/Dialect/Flow/Transforms/BUILD
@@ -51,7 +51,6 @@
"PadTensorToSubTensorInsert.cpp",
"PassDetail.h",
"Passes.cpp",
- "PromoteI1ToI8Pass.cpp",
"StripAndSplatConstantVariables.cpp",
"StripSignednessPass.cpp",
"TestPartitionableLoopsInterface.cpp",
diff --git a/iree/compiler/Dialect/Flow/Transforms/CMakeLists.txt b/iree/compiler/Dialect/Flow/Transforms/CMakeLists.txt
index 27d08e1..374e4f2 100644
--- a/iree/compiler/Dialect/Flow/Transforms/CMakeLists.txt
+++ b/iree/compiler/Dialect/Flow/Transforms/CMakeLists.txt
@@ -48,7 +48,6 @@
"PadTensorToSubTensorInsert.cpp"
"PassDetail.h"
"Passes.cpp"
- "PromoteI1ToI8Pass.cpp"
"StripAndSplatConstantVariables.cpp"
"StripSignednessPass.cpp"
"TestPartitionableLoopsInterface.cpp"
diff --git a/iree/compiler/Dialect/Flow/Transforms/ConvertConv2D1x1ToMatmulPass.cpp b/iree/compiler/Dialect/Flow/Transforms/ConvertConv2D1x1ToMatmulPass.cpp
index 557e9f6..f7fabed 100644
--- a/iree/compiler/Dialect/Flow/Transforms/ConvertConv2D1x1ToMatmulPass.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/ConvertConv2D1x1ToMatmulPass.cpp
@@ -100,7 +100,7 @@
void runOnOperation() override {
MLIRContext *context = &getContext();
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
patterns.insert<Convert1x1ConvolutionMatmulOp>(context);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
diff --git a/iree/compiler/Dialect/Flow/Transforms/ConvertConv2DToImg2ColPass.cpp b/iree/compiler/Dialect/Flow/Transforms/ConvertConv2DToImg2ColPass.cpp
index 1458ed1..1ef96b1 100644
--- a/iree/compiler/Dialect/Flow/Transforms/ConvertConv2DToImg2ColPass.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/ConvertConv2DToImg2ColPass.cpp
@@ -344,7 +344,7 @@
}
void runOnOperation() override {
MLIRContext *context = &getContext();
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
patterns.insert<Conv2DImg2ColMatmulConversion,
DepthwiseConv2DNHWCHWCImg2ColMatmulConversion>(context);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
diff --git a/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgMatmulToMmt4D.cpp b/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgMatmulToMmt4D.cpp
index 70d865c..a10e778 100644
--- a/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgMatmulToMmt4D.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/ConvertLinalgMatmulToMmt4D.cpp
@@ -359,7 +359,7 @@
MLIRContext *context = &getContext();
// Main pattern.
{
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
patterns.insert<LinalgMatmulOpToLinalgMmt4DOpPattern>(context, M0, K0,
N0);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
@@ -369,7 +369,7 @@
}
// Canonicalization.
{
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
tensor::ExpandShapeOp::getCanonicalizationPatterns(patterns, context);
linalg::InitTensorOp::getCanonicalizationPatterns(patterns, context);
linalg::FillOp::getCanonicalizationPatterns(patterns, context);
diff --git a/iree/compiler/Dialect/Flow/Transforms/DestructiveUpdateUtils.cpp b/iree/compiler/Dialect/Flow/Transforms/DestructiveUpdateUtils.cpp
index 16b3027..974557c 100644
--- a/iree/compiler/Dialect/Flow/Transforms/DestructiveUpdateUtils.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/DestructiveUpdateUtils.cpp
@@ -367,7 +367,7 @@
// Non-default canonicalization patterns.
// TODO(nicolasvasilache): add Linalg tiling canonicalization patterns,
// affineminscf and others as needed.
- OwningRewritePatternList canonicalizationPatterns(context);
+ RewritePatternSet canonicalizationPatterns(context);
scf::ForOp::getCanonicalizationPatterns(canonicalizationPatterns, context);
return applyPatternsAndFoldGreedily(dispatchOp,
std::move(canonicalizationPatterns));
diff --git a/iree/compiler/Dialect/Flow/Transforms/DispatchLinalgOnTensors.cpp b/iree/compiler/Dialect/Flow/Transforms/DispatchLinalgOnTensors.cpp
index 66baaa5..3c857b7 100644
--- a/iree/compiler/Dialect/Flow/Transforms/DispatchLinalgOnTensors.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/DispatchLinalgOnTensors.cpp
@@ -1075,7 +1075,7 @@
// Create the dispatch region, first without the isolate region from above
// property.
{
- OwningRewritePatternList patterns(context);
+ RewritePatternSet patterns(context);
auto linalgTilingOptions =
linalg::LinalgTilingOptions()
.setDistributionOptions(workgroupDistributionOptions)
@@ -1091,7 +1091,7 @@
// Run canonicalization patterns and pattern to resolve tensor.dim of result
// values into tensor.dim of its operands..
- OwningRewritePatternList canonicalizationPatterns(context);
+ RewritePatternSet canonicalizationPatterns(context);
linalg::populateLinalgTilingCanonicalizationPatterns(
canonicalizationPatterns);
if (failed(applyPatternsAndFoldGreedily(
@@ -1108,7 +1108,7 @@
// Run necessary canonicalization patterns before rewrite destructive updates.
{
- OwningRewritePatternList patterns(context);
+ RewritePatternSet patterns(context);
// Resolve `tensor.dim` of result of operations into operations on its
// operands using the `ReifyRankedShapedTypeOpInterface`.
memref::populateResolveRankedShapeTypeResultDimsPatterns(patterns);
diff --git a/iree/compiler/Dialect/Flow/Transforms/FusionOfTensorOps.cpp b/iree/compiler/Dialect/Flow/Transforms/FusionOfTensorOps.cpp
index cfa8eb2..79fc28e 100644
--- a/iree/compiler/Dialect/Flow/Transforms/FusionOfTensorOps.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/FusionOfTensorOps.cpp
@@ -42,8 +42,8 @@
}
void runOnOperation() override {
- OwningRewritePatternList fusionPatterns(&getContext());
- OwningRewritePatternList interfacePatterns(&getContext());
+ RewritePatternSet fusionPatterns(&getContext());
+ RewritePatternSet interfacePatterns(&getContext());
Operation *op = getOperation();
MLIRContext *context = op->getContext();
@@ -99,7 +99,16 @@
// simplistic heuristic to avoid duplicating ops that may be
// expensive.
// TODO: Add a cost model to allow ops to be duplicated.
+ bool hasI1ReturnType =
+ llvm::any_of(producer->getResultTypes(), [](Type t) {
+ if (t.isInteger(1)) return true;
+ if (auto shapedType = t.dyn_cast<ShapedType>()) {
+ if (shapedType.getElementType().isInteger(1)) return true;
+ }
+ return false;
+ });
if (!isBroadcast && !isa<arith::ConstantOp>(producer) &&
+ !hasI1ReturnType &&
!llvm::hasSingleElement(producerResult.getUsers())) {
return false;
}
@@ -134,7 +143,7 @@
return signalPassFailure();
}
- OwningRewritePatternList reshapeCanonicalizations(&getContext());
+ RewritePatternSet reshapeCanonicalizations(&getContext());
linalg::populateFoldUnitDimsReshapeOpsByLinearizationPatterns(
reshapeCanonicalizations);
tensor::CollapseShapeOp::getCanonicalizationPatterns(
@@ -151,7 +160,7 @@
}
// Push the remaining reshapes down the graphs.
- OwningRewritePatternList pushReshapePatterns(&getContext());
+ RewritePatternSet pushReshapePatterns(&getContext());
linalg::populatePushReshapeOpsPatterns(pushReshapePatterns);
tensor::CollapseShapeOp::getCanonicalizationPatterns(pushReshapePatterns,
context);
diff --git a/iree/compiler/Dialect/Flow/Transforms/InterchangeGenericOps.cpp b/iree/compiler/Dialect/Flow/Transforms/InterchangeGenericOps.cpp
index 55b10cb..3f0966d 100644
--- a/iree/compiler/Dialect/Flow/Transforms/InterchangeGenericOps.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/InterchangeGenericOps.cpp
@@ -58,7 +58,7 @@
}
void runOnOperation() override {
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
patterns.add<GenericOpInterchangePattern>(&getContext());
if (failed(applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
diff --git a/iree/compiler/Dialect/Flow/Transforms/PadLinalgOps.cpp b/iree/compiler/Dialect/Flow/Transforms/PadLinalgOps.cpp
index df48652..7acc7e3 100644
--- a/iree/compiler/Dialect/Flow/Transforms/PadLinalgOps.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/PadLinalgOps.cpp
@@ -139,7 +139,7 @@
}
void runOnOperation() override {
MLIRContext *context = &getContext();
- OwningRewritePatternList patterns(context);
+ RewritePatternSet patterns(context);
patterns.insert<PadMatmulOp>(context, paddingSize);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
diff --git a/iree/compiler/Dialect/Flow/Transforms/Passes.h b/iree/compiler/Dialect/Flow/Transforms/Passes.h
index 773f547..4fcb2dd 100644
--- a/iree/compiler/Dialect/Flow/Transforms/Passes.h
+++ b/iree/compiler/Dialect/Flow/Transforms/Passes.h
@@ -102,9 +102,6 @@
// iree-flow-infer-numeric-narrowing.
std::unique_ptr<Pass> createOptimizeNumericsPass();
-// Promote I1 tensor constants to I8 tensors to match later operations.
-std::unique_ptr<OperationPass<mlir::FuncOp>> createPromoteI1ToI8Pass();
-
// Strips the signed/unsigned portion off of tensors.
std::unique_ptr<OperationPass<mlir::FuncOp>> createStripSignednessPass();
diff --git a/iree/compiler/Dialect/Flow/Transforms/Passes.td b/iree/compiler/Dialect/Flow/Transforms/Passes.td
index afd9a16..2e8dc08 100644
--- a/iree/compiler/Dialect/Flow/Transforms/Passes.td
+++ b/iree/compiler/Dialect/Flow/Transforms/Passes.td
@@ -124,12 +124,6 @@
let constructor = "mlir::iree_compiler::IREE::Flow::createPadTensorToSubTensorInsertPass()";
}
-def PromoteI1ToI8 :
- Pass<"iree-flow-promote-i1-to-i8", "mlir::FuncOp"> {
- let summary = "Legalizes i1 tensor constants to i8s";
- let constructor = "mlir::iree_compiler::IREE::Flow::createPromoteI1ToI8Pass()";
-}
-
def StripSignedness :
Pass<"iree-flow-strip-signedness", "mlir::FuncOp"> {
let summary = "Legalizes ui tensors constants to uis";
diff --git a/iree/compiler/Dialect/Flow/Transforms/PromoteI1ToI8Pass.cpp b/iree/compiler/Dialect/Flow/Transforms/PromoteI1ToI8Pass.cpp
deleted file mode 100644
index b7c2952..0000000
--- a/iree/compiler/Dialect/Flow/Transforms/PromoteI1ToI8Pass.cpp
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2021 The IREE Authors
-//
-// Licensed under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-#include "iree/compiler/Dialect/Flow/Transforms/PassDetail.h"
-#include "iree/compiler/Dialect/Flow/Transforms/Passes.h"
-#include "mlir/Dialect/Linalg/IR/Linalg.h"
-#include "mlir/Dialect/StandardOps/IR/Ops.h"
-#include "mlir/IR/Matchers.h"
-#include "mlir/IR/PatternMatch.h"
-#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
-
-namespace mlir {
-namespace iree_compiler {
-namespace IREE {
-namespace Flow {
-
-namespace {
-
-// Legalizes boolean (i1) constants to i8 with a linalg.generic operation
-// downcasting to i1. This occurs as IREE does not currently support tightly
-// packing and unpacking i1 buffers.
-class ConvertBoolConstantPattern
- : public OpRewritePattern<mlir::arith::ConstantOp> {
- public:
- using OpRewritePattern<mlir::arith::ConstantOp>::OpRewritePattern;
-
- LogicalResult matchAndRewrite(mlir::arith::ConstantOp op,
- PatternRewriter &rewriter) const override {
- Location loc = op.getLoc();
- auto resultTy = op.getType().dyn_cast<ShapedType>();
-
- if (!resultTy) return failure();
-
- auto eTy = resultTy.getElementType();
- if (!eTy.isInteger(1)) return failure();
-
- // Constant is never used, ignore.
- if (op.getResult().use_empty()) return failure();
-
- DenseIntElementsAttr attr = op.getValue().dyn_cast<DenseIntElementsAttr>();
- if (!attr) return failure();
-
- // Create a new ConstantOp that contains the same values as an int8.
- auto newConst = rewriter.createOrFold<arith::ConstantOp>(
- loc, attr.mapValues(rewriter.getIntegerType(8),
- [&](APInt src) { return src.zext(8); }));
-
- // We need to move the insertion to just before its first use case. This is
- // needed as it is possible we are reusing an existing ConstantOp
- // containing the same values that occurs in a future line. Moving to the
- // first use case avoids declaring out of order operations.
- Operation *firstUser = *op.getResult().getUsers().begin();
- for (auto checkOp : op.getResult().getUsers()) {
- if (checkOp->isBeforeInBlock(firstUser)) {
- firstUser = checkOp;
- }
- }
- rewriter.setInsertionPoint(firstUser);
-
- auto initTensor = rewriter.create<linalg::InitTensorOp>(
- loc, ArrayRef<Value>({}), resultTy.getShape(),
- resultTy.getElementType());
-
- SmallVector<AffineMap, 2> indexingMaps = {
- rewriter.getMultiDimIdentityMap(resultTy.getRank()),
- rewriter.getMultiDimIdentityMap(resultTy.getRank())};
-
- // Insert a generic op that Truncates the new i8 values to i1 for use as
- // the original value.
- Value genericOp =
- rewriter
- .create<linalg::GenericOp>(
- loc, TypeRange({resultTy}), ValueRange({newConst}),
- ValueRange({initTensor}), indexingMaps,
- SmallVector<StringRef>(resultTy.getRank(),
- getParallelIteratorTypeName()),
- [&](OpBuilder &nestedBuilder, Location nestedLoc,
- ValueRange blockArgs) {
- auto cast = rewriter.create<arith::TruncIOp>(
- nestedLoc, rewriter.getIntegerType(1), blockArgs[0]);
- rewriter.create<linalg::YieldOp>(nestedLoc,
- cast->getResult(0));
- })
- ->getResult(0);
-
- rewriter.replaceOp(op, genericOp);
- return success();
- }
-};
-
-} // namespace
-
-class PromoteI1ToI8Pass : public PromoteI1ToI8Base<PromoteI1ToI8Pass> {
- public:
- void getDependentDialects(DialectRegistry ®istry) const override {
- registry.insert<linalg::LinalgDialect, mlir::StandardOpsDialect,
- mlir::math::MathDialect, mlir::arith::ArithmeticDialect>();
- }
-
- void runOnOperation() override {
- OwningRewritePatternList patterns(&getContext());
- patterns.insert<ConvertBoolConstantPattern>(&getContext());
- if (failed(applyPatternsAndFoldGreedily(getOperation(),
- std::move(patterns)))) {
- return signalPassFailure();
- }
- }
-};
-
-std::unique_ptr<OperationPass<mlir::FuncOp>> createPromoteI1ToI8Pass() {
- return std::make_unique<PromoteI1ToI8Pass>();
-}
-
-} // namespace Flow
-} // namespace IREE
-} // namespace iree_compiler
-} // namespace mlir
diff --git a/iree/compiler/Dialect/Flow/Transforms/PromoteTensorLoads.cpp b/iree/compiler/Dialect/Flow/Transforms/PromoteTensorLoads.cpp
index 1114875..6cafd04 100644
--- a/iree/compiler/Dialect/Flow/Transforms/PromoteTensorLoads.cpp
+++ b/iree/compiler/Dialect/Flow/Transforms/PromoteTensorLoads.cpp
@@ -56,8 +56,8 @@
conversionTarget.addIllegalOp<tensor::ExtractOp>();
}
-void populateStandardToFlowTensorLoadPatterns(
- MLIRContext *context, OwningRewritePatternList &patterns) {
+void populateStandardToFlowTensorLoadPatterns(MLIRContext *context,
+ RewritePatternSet &patterns) {
patterns.insert<ExtractElementOpLowering>(context);
}
@@ -75,7 +75,7 @@
void runOnOperation() override {
auto *context = &getContext();
ConversionTarget conversionTarget(*context);
- OwningRewritePatternList conversionPatterns(&getContext());
+ RewritePatternSet conversionPatterns(&getContext());
conversionTarget.addLegalDialect<IREE::Flow::FlowDialect>();
conversionTarget
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/BUILD b/iree/compiler/Dialect/Flow/Transforms/test/BUILD
index 7405dbb..78534bd 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/BUILD
+++ b/iree/compiler/Dialect/Flow/Transforms/test/BUILD
@@ -36,7 +36,6 @@
"outline_dispatch_regions.mlir",
"pad_linalg_ops.mlir",
"pad_tensor_to_tensor.mlir",
- "promote_i1_to_i8.mlir",
"strip_and_splat_constant_variables.mlir",
"strip_signedness.mlir",
"test_partitionable_loops_interface.mlir",
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/CMakeLists.txt b/iree/compiler/Dialect/Flow/Transforms/test/CMakeLists.txt
index ef0811b..4b55cf5 100644
--- a/iree/compiler/Dialect/Flow/Transforms/test/CMakeLists.txt
+++ b/iree/compiler/Dialect/Flow/Transforms/test/CMakeLists.txt
@@ -33,7 +33,6 @@
"outline_dispatch_regions.mlir"
"pad_linalg_ops.mlir"
"pad_tensor_to_tensor.mlir"
- "promote_i1_to_i8.mlir"
"strip_and_splat_constant_variables.mlir"
"strip_signedness.mlir"
"test_partitionable_loops_interface.mlir"
diff --git a/iree/compiler/Dialect/Flow/Transforms/test/promote_i1_to_i8.mlir b/iree/compiler/Dialect/Flow/Transforms/test/promote_i1_to_i8.mlir
deleted file mode 100644
index bac28c0..0000000
--- a/iree/compiler/Dialect/Flow/Transforms/test/promote_i1_to_i8.mlir
+++ /dev/null
@@ -1,35 +0,0 @@
-
-// RUN: iree-opt -split-input-file -pass-pipeline='builtin.func(iree-flow-promote-i1-to-i8)' %s | FileCheck %s
-
-// CHECK: #[[$MAP:.+]] = affine_map<(d0) -> (d0)>
-
-// CHECK-LABEL: boolean_const
-func @boolean_const() -> (tensor<4xi1>) {
- // CHECK: [[CONST:%.+]] = arith.constant dense<[1, 1, 0, 1]> : tensor<4xi8>
- // CHECK: [[INIT:%.+]] = linalg.init_tensor [4] : tensor<4xi1>
- // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP]], #[[$MAP]]], iterator_types = ["parallel"]} ins([[CONST]] : tensor<4xi8>) outs([[INIT]] : tensor<4xi1>)
- // CHECK: ^bb0(%arg0: i8, %arg1: i1):
- // CHECK: [[TRUNC:%.+]] = arith.trunci %arg0 : i8 to i1
- // CHECK: linalg.yield [[TRUNC]]
- // CHECK: return [[GENERIC]]
- %0 = arith.constant dense<[true, true, false, true]> : tensor<4xi1>
- return %0 : tensor<4xi1>
-}
-
-// -----
-
-// CHECK: #[[$MAP:.+]] = affine_map<(d0) -> (d0)>
-
-// CHECK-LABEL: boolean_const
-func @boolean_const() -> (tensor<4xi1>, tensor<4xi8>) {
- // CHECK: [[CONST:%.+]] = arith.constant dense<[1, 1, 0, 1]> : tensor<4xi8>
- // CHECK: [[INIT:%.+]] = linalg.init_tensor [4] : tensor<4xi1>
- // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP]], #[[$MAP]]], iterator_types = ["parallel"]} ins([[CONST]] : tensor<4xi8>) outs([[INIT]] : tensor<4xi1>)
- // CHECK: ^bb0(%arg0: i8, %arg1: i1):
- // CHECK: [[TRUNC:%.+]] = arith.trunci %arg0 : i8 to i1
- // CHECK: linalg.yield [[TRUNC]]
- // CHECK: return [[GENERIC]], [[CONST]]
- %0 = arith.constant dense<[true, true, false, true]> : tensor<4xi1>
- %1 = arith.constant dense<[1, 1, 0, 1]> : tensor<4xi8>
- return %0, %1 : tensor<4xi1>, tensor<4xi8>
-}
diff --git a/iree/compiler/Dialect/HAL/Conversion/ConversionDialectInterface.h b/iree/compiler/Dialect/HAL/Conversion/ConversionDialectInterface.h
index 8334b2f..ffb7dff 100644
--- a/iree/compiler/Dialect/HAL/Conversion/ConversionDialectInterface.h
+++ b/iree/compiler/Dialect/HAL/Conversion/ConversionDialectInterface.h
@@ -32,7 +32,7 @@
// |target| must have newly legal and illegal ops/dialects specified to ensure
// the conversion takes place.
virtual void setupConversionTarget(ConversionTarget &target,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter) const = 0;
// Converts a type.
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertAllocatorOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertAllocatorOps.cpp
index 69a48d9..b1a023e 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertAllocatorOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertAllocatorOps.cpp
@@ -89,7 +89,7 @@
void populateHALAllocatorToVMPatterns(MLIRContext *context,
SymbolTable &importSymbols,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<VMImportOpConversion<IREE::HAL::AllocatorAllocateOp>>(
context, importSymbols, typeConverter, "hal.allocator.allocate");
patterns.insert<AllocatorMapOpConversion>(typeConverter, context,
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertBufferOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertBufferOps.cpp
index 494d041..1b7a323 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertBufferOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertBufferOps.cpp
@@ -172,7 +172,7 @@
void populateHALBufferToVMPatterns(MLIRContext *context,
SymbolTable &importSymbols,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<VMImportOpConversion<IREE::HAL::BufferAssertOp>>(
context, importSymbols, typeConverter, "hal.buffer.assert");
patterns.insert<VMImportOpConversion<IREE::HAL::BufferSubspanOp>>(
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertBufferViewOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertBufferViewOps.cpp
index cb645f7..17bebcf 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertBufferViewOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertBufferViewOps.cpp
@@ -14,7 +14,7 @@
void populateHALBufferViewToVMPatterns(MLIRContext *context,
SymbolTable &importSymbols,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<VMImportOpConversion<IREE::HAL::BufferViewCreateOp>>(
context, importSymbols, typeConverter, "hal.buffer_view.create");
patterns.insert<VMImportOpConversion<IREE::HAL::BufferViewAssertOp>>(
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertCommandBufferOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertCommandBufferOps.cpp
index e5a2263..ee5f318 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertCommandBufferOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertCommandBufferOps.cpp
@@ -121,7 +121,7 @@
void populateHALCommandBufferToVMPatterns(MLIRContext *context,
SymbolTable &importSymbols,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<VMImportOpConversion<IREE::HAL::CommandBufferCreateOp>>(
context, importSymbols, typeConverter, "hal.command_buffer.create");
patterns.insert<VMImportOpConversion<IREE::HAL::CommandBufferBeginOp>>(
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertDeviceOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertDeviceOps.cpp
index 6a8ca83..7e0ed6d 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertDeviceOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertDeviceOps.cpp
@@ -111,7 +111,7 @@
void populateHALDeviceToVMPatterns(MLIRContext *context,
SymbolTable &importSymbols,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<VMImportOpConversion<IREE::HAL::DeviceAllocatorOp>>(
context, importSymbols, typeConverter, "hal.device.allocator");
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExecutableOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExecutableOps.cpp
index 2b84d56..66a1169 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExecutableOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExecutableOps.cpp
@@ -113,7 +113,7 @@
void populateHALExecutableToVMPatterns(MLIRContext *context,
SymbolTable &importSymbols,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
// hal.executables are not needed after conversion as we extract their
// contents during conversion of the ops that use them.
patterns.insert<RemoveExecutableOpConversion>(context);
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExperimentalOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExperimentalOps.cpp
index fc55f43..08609db 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExperimentalOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertExperimentalOps.cpp
@@ -14,7 +14,7 @@
void populateHALExperimentalToVMPatterns(MLIRContext *context,
SymbolTable &importSymbols,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<VMImportOpConversion<IREE::HAL::ExSharedDeviceOp>>(
context, importSymbols, typeConverter, "hal.ex.shared_device");
patterns.insert<VMImportOpConversion<IREE::HAL::ExSubmitAndWaitOp>>(
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.cpp
index 3f406a7..5e6cc1d 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.cpp
@@ -28,35 +28,41 @@
namespace mlir {
namespace iree_compiler {
-extern void populateHALAllocatorToVMPatterns(
- MLIRContext *context, SymbolTable &importSymbols,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns);
+extern void populateHALAllocatorToVMPatterns(MLIRContext *context,
+ SymbolTable &importSymbols,
+ TypeConverter &typeConverter,
+ RewritePatternSet &patterns);
extern void populateHALBufferToVMPatterns(MLIRContext *context,
SymbolTable &importSymbols,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
-extern void populateHALBufferViewToVMPatterns(
- MLIRContext *context, SymbolTable &importSymbols,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns);
-extern void populateHALCommandBufferToVMPatterns(
- MLIRContext *context, SymbolTable &importSymbols,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
+extern void populateHALBufferViewToVMPatterns(MLIRContext *context,
+ SymbolTable &importSymbols,
+ TypeConverter &typeConverter,
+ RewritePatternSet &patterns);
+extern void populateHALCommandBufferToVMPatterns(MLIRContext *context,
+ SymbolTable &importSymbols,
+ TypeConverter &typeConverter,
+ RewritePatternSet &patterns);
extern void populateHALDeviceToVMPatterns(MLIRContext *context,
SymbolTable &importSymbols,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
-extern void populateHALExecutableToVMPatterns(
- MLIRContext *context, SymbolTable &importSymbols,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns);
-extern void populateHALExperimentalToVMPatterns(
- MLIRContext *context, SymbolTable &importSymbols,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns);
-extern void populateHALSemaphoreToVMPatterns(
- MLIRContext *context, SymbolTable &importSymbols,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
+extern void populateHALExecutableToVMPatterns(MLIRContext *context,
+ SymbolTable &importSymbols,
+ TypeConverter &typeConverter,
+ RewritePatternSet &patterns);
+extern void populateHALExperimentalToVMPatterns(MLIRContext *context,
+ SymbolTable &importSymbols,
+ TypeConverter &typeConverter,
+ RewritePatternSet &patterns);
+extern void populateHALSemaphoreToVMPatterns(MLIRContext *context,
+ SymbolTable &importSymbols,
+ TypeConverter &typeConverter,
+ RewritePatternSet &patterns);
void populateHALToVMPatterns(MLIRContext *context, SymbolTable &importSymbols,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter) {
populateHALAllocatorToVMPatterns(context, importSymbols, typeConverter,
patterns);
@@ -110,7 +116,7 @@
iree_hal_imports_create()->size),
innerModuleOp);
- OwningRewritePatternList conversionPatterns(&getContext());
+ RewritePatternSet conversionPatterns(&getContext());
populateStandardToVMPatterns(context, typeConverter, conversionPatterns);
populateUtilToVMPatterns(context, conversionTarget, typeConverter,
conversionPatterns);
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.h b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.h
index 4e90530..070d59a 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.h
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertHALToVM.h
@@ -16,7 +16,7 @@
// Populates conversion patterns from the HAL dialect to the VM dialect.
void populateHALToVMPatterns(MLIRContext *context, SymbolTable &importSymbols,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter);
} // namespace iree_compiler
diff --git a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertSemaphoreOps.cpp b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertSemaphoreOps.cpp
index b7bf3a4..1972b68 100644
--- a/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertSemaphoreOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/HALToVM/ConvertSemaphoreOps.cpp
@@ -15,7 +15,7 @@
void populateHALSemaphoreToVMPatterns(MLIRContext *context,
SymbolTable &importSymbols,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<VMImportOpConversion<IREE::HAL::SemaphoreCreateOp>>(
context, importSymbols, typeConverter, "hal.semaphore.create");
patterns.insert<VMImportOpConversion<IREE::HAL::SemaphoreQueryOp>>(
diff --git a/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertShapeOps.cpp b/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertShapeOps.cpp
index 7da9594..5a97093 100644
--- a/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertShapeOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertShapeOps.cpp
@@ -52,7 +52,7 @@
void populateStandardShapeToHALPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter) {
// Ensure all shape related ops are fully converted as we should no longer
// have any types they are valid to be used on after this conversion.
diff --git a/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStandardToHAL.cpp b/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStandardToHAL.cpp
index 200b45a..f36f431 100644
--- a/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStandardToHAL.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStandardToHAL.cpp
@@ -19,18 +19,18 @@
void populateStandardShapeToHALPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &converter);
void populateStandardStructuralToHALPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &converter);
void populateStandardToHALPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
populateStandardShapeToHALPatterns(context, conversionTarget, patterns,
typeConverter);
populateStandardStructuralToHALPatterns(context, conversionTarget, patterns,
diff --git a/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStandardToHAL.h b/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStandardToHAL.h
index 3a2ac4d..8313feb 100644
--- a/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStandardToHAL.h
+++ b/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStandardToHAL.h
@@ -17,7 +17,7 @@
void populateStandardToHALPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStructuralOps.cpp b/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStructuralOps.cpp
index 3923e96..24798f6 100644
--- a/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStructuralOps.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/StandardToHAL/ConvertStructuralOps.cpp
@@ -175,7 +175,7 @@
void populateStandardStructuralToHALPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter) {
conversionTarget.addLegalOp<mlir::ModuleOp>();
diff --git a/iree/compiler/Dialect/HAL/Conversion/StreamToHAL/ConvertStreamToHAL.cpp b/iree/compiler/Dialect/HAL/Conversion/StreamToHAL/ConvertStreamToHAL.cpp
index 5467018..927931d 100644
--- a/iree/compiler/Dialect/HAL/Conversion/StreamToHAL/ConvertStreamToHAL.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/StreamToHAL/ConvertStreamToHAL.cpp
@@ -1122,7 +1122,7 @@
void populateStreamToHALPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
conversionTarget.addIllegalDialect<IREE::Stream::StreamDialect>();
typeConverter.addConversion(
diff --git a/iree/compiler/Dialect/HAL/Conversion/StreamToHAL/ConvertStreamToHAL.h b/iree/compiler/Dialect/HAL/Conversion/StreamToHAL/ConvertStreamToHAL.h
index 23e4a65..e9e9f06 100644
--- a/iree/compiler/Dialect/HAL/Conversion/StreamToHAL/ConvertStreamToHAL.h
+++ b/iree/compiler/Dialect/HAL/Conversion/StreamToHAL/ConvertStreamToHAL.h
@@ -17,7 +17,7 @@
void populateStreamToHALPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/HAL/Conversion/UtilToHAL/ConvertUtilToHAL.cpp b/iree/compiler/Dialect/HAL/Conversion/UtilToHAL/ConvertUtilToHAL.cpp
index 899e5ec..1ba4337 100644
--- a/iree/compiler/Dialect/HAL/Conversion/UtilToHAL/ConvertUtilToHAL.cpp
+++ b/iree/compiler/Dialect/HAL/Conversion/UtilToHAL/ConvertUtilToHAL.cpp
@@ -41,7 +41,7 @@
void populateUtilToHALPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
conversionTarget.addDynamicallyLegalOp<IREE::Util::GlobalOp>(
[&](IREE::Util::GlobalOp op) {
return typeConverter.isLegal(op.type()) &&
diff --git a/iree/compiler/Dialect/HAL/Conversion/UtilToHAL/ConvertUtilToHAL.h b/iree/compiler/Dialect/HAL/Conversion/UtilToHAL/ConvertUtilToHAL.h
index c26328e..59ed45c 100644
--- a/iree/compiler/Dialect/HAL/Conversion/UtilToHAL/ConvertUtilToHAL.h
+++ b/iree/compiler/Dialect/HAL/Conversion/UtilToHAL/ConvertUtilToHAL.h
@@ -19,7 +19,7 @@
void populateUtilToHALPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/HAL/IR/HALDialect.cpp b/iree/compiler/Dialect/HAL/IR/HALDialect.cpp
index aecd566..8768c4b 100644
--- a/iree/compiler/Dialect/HAL/IR/HALDialect.cpp
+++ b/iree/compiler/Dialect/HAL/IR/HALDialect.cpp
@@ -84,7 +84,7 @@
}
void populateVMConversionPatterns(
- SymbolTable &importSymbols, OwningRewritePatternList &patterns,
+ SymbolTable &importSymbols, RewritePatternSet &patterns,
TypeConverter &typeConverter) const override {
populateHALToVMPatterns(getDialect()->getContext(), importSymbols, patterns,
typeConverter);
diff --git a/iree/compiler/Dialect/HAL/IR/HALOpFolders.cpp b/iree/compiler/Dialect/HAL/IR/HALOpFolders.cpp
index 3ff6ec6..27f95a1 100644
--- a/iree/compiler/Dialect/HAL/IR/HALOpFolders.cpp
+++ b/iree/compiler/Dialect/HAL/IR/HALOpFolders.cpp
@@ -72,8 +72,8 @@
} // namespace
-void BufferViewBufferOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void BufferViewBufferOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SkipBufferViewBufferOp>(context);
}
@@ -98,8 +98,8 @@
} // namespace
-void BufferViewDimsOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void BufferViewDimsOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<ExpandBufferViewDimsOp>(context);
}
@@ -129,7 +129,7 @@
} // namespace
void CommandBufferDeviceOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<SkipCommandBufferDeviceOp>(context);
}
@@ -167,7 +167,7 @@
} // namespace
void CommandBufferFillBufferOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<FoldCommandBufferFillBufferSubspans>(context);
}
@@ -216,7 +216,7 @@
} // namespace
void CommandBufferCopyBufferOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<FoldCommandBufferCopyBufferSubspans>(context);
}
@@ -262,7 +262,7 @@
} // namespace
void CommandBufferPushDescriptorSetOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<FoldCommandBufferPushDescriptorSetBufferSubspan>(context);
}
diff --git a/iree/compiler/Dialect/HAL/IR/test/allocator_ops.mlir b/iree/compiler/Dialect/HAL/IR/test/allocator_ops.mlir
index 3119296..ebd03e2 100644
--- a/iree/compiler/Dialect/HAL/IR/test/allocator_ops.mlir
+++ b/iree/compiler/Dialect/HAL/IR/test/allocator_ops.mlir
@@ -6,7 +6,7 @@
// CHECK-DAG: %[[SIZE:.+]] = arith.constant 123
%size = arith.constant 123 : index
// CHECK: %[[REF:.+]] = hal.allocator.allocate<%[[ALLOCATOR]] : !hal.allocator>
- // CHECK-SAME: type("HostVisible|HostCoherent")
+ // CHECK-SAME: type("HostVisible|HostCoherent|HostLocal")
// CHECK-SAME: usage(Transfer)
// CHECK-SAME: : !hal.buffer{%[[SIZE]]}
%ref = hal.allocator.allocate<%allocator : !hal.allocator>
diff --git a/iree/compiler/Dialect/HAL/Target/CUDA/BUILD b/iree/compiler/Dialect/HAL/Target/CUDA/BUILD
index bf84382..6ec35df 100644
--- a/iree/compiler/Dialect/HAL/Target/CUDA/BUILD
+++ b/iree/compiler/Dialect/HAL/Target/CUDA/BUILD
@@ -4,65 +4,46 @@
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-load("//iree:build_defs.oss.bzl", "iree_cmake_extra_content")
-load("//build_tools/embed_data:build_defs.bzl", "c_embed_data")
-
package(
default_visibility = ["//visibility:public"],
features = ["layering_check"],
licenses = ["notice"], # Apache 2.0
)
-iree_cmake_extra_content(
- content = """
-if(NOT "${IREE_TARGET_BACKEND_CUDA}")
- return()
-endif()
-""",
-)
-
-c_embed_data(
- name = "cuda_libdevice",
- srcs = ["@cuda//:libdevice_root"],
- c_file_output = "libdevice.c",
- flatten = True,
- h_file_output = "libdevice.h",
-)
-
-cc_library(
- name = "CUDA",
- srcs = [
- "CUDATarget.cpp",
- "NoLoopUnrollPass.cpp",
- ],
- hdrs = [
- "CUDATarget.h",
- "LLVMPasses.h",
- ],
- deps = [
- ":cuda_libdevice",
- "//iree/compiler/Codegen:PassHeaders",
- "//iree/compiler/Codegen/Dialect:IREECodegenDialect",
- "//iree/compiler/Codegen/LLVMGPU",
- "//iree/compiler/Dialect/HAL/Target",
- "//iree/compiler/Utils",
- "//iree/schemas:cuda_executable_def_c_fbs",
- "@llvm-project//llvm:Analysis",
- "@llvm-project//llvm:BitReader",
- "@llvm-project//llvm:Core",
- "@llvm-project//llvm:IPO",
- "@llvm-project//llvm:Linker",
- "@llvm-project//llvm:MC",
- "@llvm-project//llvm:NVPTXCodeGen",
- "@llvm-project//llvm:Support",
- "@llvm-project//llvm:Target",
- "@llvm-project//mlir:GPUDialect",
- "@llvm-project//mlir:LLVMDialect",
- "@llvm-project//mlir:LLVMToLLVMIRTranslation",
- "@llvm-project//mlir:NVVMDialect",
- "@llvm-project//mlir:NVVMToLLVMIRTranslation",
- "@llvm-project//mlir:Pass",
- "@llvm-project//mlir:Support",
- "@llvm-project//mlir:ToLLVMIRTranslation",
- ],
-)
+# Temporarily disabled pending build system changes.
+# cc_library(
+# name = "CUDA",
+# srcs = [
+# "CUDATarget.cpp",
+# "NoLoopUnrollPass.cpp",
+# ],
+# hdrs = [
+# "CUDATarget.h",
+# "LLVMPasses.h",
+# ],
+# deps = [
+# "//iree/compiler/Codegen:PassHeaders",
+# "//iree/compiler/Codegen/Dialect:IREECodegenDialect",
+# "//iree/compiler/Codegen/LLVMGPU",
+# "//iree/compiler/Dialect/HAL/Target",
+# "//iree/compiler/Utils",
+# "//iree/schemas:cuda_executable_def_c_fbs",
+# "@llvm-project//llvm:Analysis",
+# "@llvm-project//llvm:BitReader",
+# "@llvm-project//llvm:Core",
+# "@llvm-project//llvm:IPO",
+# "@llvm-project//llvm:Linker",
+# "@llvm-project//llvm:MC",
+# "@llvm-project//llvm:NVPTXCodeGen",
+# "@llvm-project//llvm:Support",
+# "@llvm-project//llvm:Target",
+# "@llvm-project//mlir:GPUDialect",
+# "@llvm-project//mlir:LLVMDialect",
+# "@llvm-project//mlir:LLVMToLLVMIRTranslation",
+# "@llvm-project//mlir:NVVMDialect",
+# "@llvm-project//mlir:NVVMToLLVMIRTranslation",
+# "@llvm-project//mlir:Pass",
+# "@llvm-project//mlir:Support",
+# "@llvm-project//mlir:ToLLVMIRTranslation",
+# ],
+# )
diff --git a/iree/compiler/Dialect/HAL/Target/CUDA/CMakeLists.txt b/iree/compiler/Dialect/HAL/Target/CUDA/CMakeLists.txt
index 727a183..6131c1a 100644
--- a/iree/compiler/Dialect/HAL/Target/CUDA/CMakeLists.txt
+++ b/iree/compiler/Dialect/HAL/Target/CUDA/CMakeLists.txt
@@ -12,55 +12,36 @@
iree_add_all_subdirs()
-# TODO(#5893): iree_c_embed_data requires a relative path and we need it to be
-# here to keep the path consistent with bazel build file. Fix it by making cmake
-# target more flexible.
-iree_c_embed_data(
- NAME
- cuda_libdevice
- SRCS
- "../../../../../../third_party/cuda/nvvm/libdevice/libdevice.10.bc"
- C_FILE_OUTPUT
- "libdevice.c"
- H_FILE_OUTPUT
- "libdevice.h"
- FLATTEN
- PUBLIC
-)
-
-iree_cc_library(
- NAME
- CUDA
- HDRS
- "CUDATarget.h"
- "LLVMPasses.h"
- SRCS
- "CUDATarget.cpp"
- "NoLoopUnrollPass.cpp"
- DEPS
- ::cuda_libdevice
- LLVMAnalysis
- LLVMBitReader
- LLVMCore
- LLVMipo
- LLVMLinker
- LLVMNVPTXCodeGen
- LLVMSupport
- LLVMTarget
- MLIRGPUOps
- MLIRLLVMIR
- MLIRLLVMToLLVMIRTranslation
- MLIRNVVMIR
- MLIRNVVMToLLVMIRTranslation
- MLIRPass
- MLIRSupport
- MLIRTargetLLVMIRExport
- iree::base::internal::flatcc::building
- iree::compiler::Codegen::LLVMGPU
- iree::compiler::Dialect::HAL::Target
- iree::compiler::Utils
- iree::schemas::cuda_executable_def_c_fbs
- PUBLIC
-)
-
-### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ###
+# iree_cc_library(
+# NAME
+# CUDA
+# HDRS
+# "CUDATarget.h"
+# "LLVMPasses.h"
+# SRCS
+# "CUDATarget.cpp"
+# "NoLoopUnrollPass.cpp"
+# DEPS
+# LLVMAnalysis
+# LLVMBitReader
+# LLVMCore
+# LLVMipo
+# LLVMLinker
+# LLVMNVPTXCodeGen
+# LLVMSupport
+# LLVMTarget
+# MLIRGPUOps
+# MLIRLLVMIR
+# MLIRLLVMToLLVMIRTranslation
+# MLIRNVVMIR
+# MLIRNVVMToLLVMIRTranslation
+# MLIRPass
+# MLIRSupport
+# MLIRTargetLLVMIRExport
+# iree::base::internal::flatcc::building
+# iree::compiler::Codegen::LLVMGPU
+# iree::compiler::Dialect::HAL::Target
+# iree::compiler::Utils
+# iree::schemas::cuda_executable_def_c_fbs
+# PUBLIC
+# )
diff --git a/iree/compiler/Dialect/HAL/Target/CUDA/CUDATarget.cpp b/iree/compiler/Dialect/HAL/Target/CUDA/CUDATarget.cpp
index 2541cbc..41d10ad 100644
--- a/iree/compiler/Dialect/HAL/Target/CUDA/CUDATarget.cpp
+++ b/iree/compiler/Dialect/HAL/Target/CUDA/CUDATarget.cpp
@@ -9,7 +9,6 @@
#include "iree/compiler/Codegen/Dialect/IREECodegenDialect.h"
#include "iree/compiler/Codegen/Passes.h"
#include "iree/compiler/Dialect/HAL/Target/CUDA/LLVMPasses.h"
-#include "iree/compiler/Dialect/HAL/Target/CUDA/libdevice.h"
#include "iree/compiler/Dialect/HAL/Target/TargetRegistry.h"
#include "iree/compiler/Utils/FlatbufferUtils.h"
#include "iree/schemas/cuda_executable_def_builder.h"
diff --git a/iree/compiler/Dialect/HAL/Target/CUDA/test/BUILD b/iree/compiler/Dialect/HAL/Target/CUDA/test/BUILD
index 43a44f4..ac1f0e0 100644
--- a/iree/compiler/Dialect/HAL/Target/CUDA/test/BUILD
+++ b/iree/compiler/Dialect/HAL/Target/CUDA/test/BUILD
@@ -4,8 +4,8 @@
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-load("//build_tools/bazel:iree_lit_test.bzl", "iree_lit_test_suite")
-load("//build_tools/bazel:enforce_glob.bzl", "enforce_glob")
+# load("//build_tools/bazel:iree_lit_test.bzl", "iree_lit_test_suite")
+# load("//build_tools/bazel:enforce_glob.bzl", "enforce_glob")
package(
default_visibility = ["//visibility:public"],
@@ -13,14 +13,14 @@
licenses = ["notice"], # Apache 2.0
)
-iree_lit_test_suite(
- name = "lit",
- srcs = enforce_glob(
- ["smoketest.mlir"],
- include = ["*.mlir"],
- ),
- tools = [
- "//iree/tools:iree-opt",
- "@llvm-project//llvm:FileCheck",
- ],
-)
+# iree_lit_test_suite(
+# name = "lit",
+# srcs = enforce_glob(
+# ["smoketest.mlir"],
+# include = ["*.mlir"],
+# ),
+# tools = [
+# "//iree/tools:iree-opt",
+# "@llvm-project//llvm:FileCheck",
+# ],
+# )
diff --git a/iree/compiler/Dialect/HAL/Target/CUDA/test/CMakeLists.txt b/iree/compiler/Dialect/HAL/Target/CUDA/test/CMakeLists.txt
index fa0a3a8..0b6559b 100644
--- a/iree/compiler/Dialect/HAL/Target/CUDA/test/CMakeLists.txt
+++ b/iree/compiler/Dialect/HAL/Target/CUDA/test/CMakeLists.txt
@@ -10,14 +10,4 @@
iree_add_all_subdirs()
-iree_lit_test_suite(
- NAME
- lit
- SRCS
- "smoketest.mlir"
- TOOLS
- FileCheck
- iree::tools::iree-opt
-)
-
### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ###
diff --git a/iree/compiler/Dialect/HAL/Target/LLVM/BUILD b/iree/compiler/Dialect/HAL/Target/LLVM/BUILD
index 045cea0..7755e97 100644
--- a/iree/compiler/Dialect/HAL/Target/LLVM/BUILD
+++ b/iree/compiler/Dialect/HAL/Target/LLVM/BUILD
@@ -57,6 +57,7 @@
"@llvm-project//llvm:WebAssemblyCodeGen",
"@llvm-project//llvm:X86AsmParser",
"@llvm-project//llvm:X86CodeGen",
+ "@llvm-project//mlir:ArmNeon",
"@llvm-project//mlir:LLVMDialect",
"@llvm-project//mlir:LLVMToLLVMIRTranslation",
"@llvm-project//mlir:ToLLVMIRTranslation",
diff --git a/iree/compiler/Dialect/HAL/Target/LLVM/CMakeLists.txt b/iree/compiler/Dialect/HAL/Target/LLVM/CMakeLists.txt
index 6455ef5..d73ca81 100644
--- a/iree/compiler/Dialect/HAL/Target/LLVM/CMakeLists.txt
+++ b/iree/compiler/Dialect/HAL/Target/LLVM/CMakeLists.txt
@@ -43,6 +43,7 @@
LLVMWebAssemblyCodeGen
LLVMX86AsmParser
LLVMX86CodeGen
+ MLIRArmNeon
MLIRLLVMIR
MLIRLLVMToLLVMIRTranslation
MLIRTargetLLVMIRExport
diff --git a/iree/compiler/Dialect/HAL/Target/LLVM/LLVMAOTTarget.cpp b/iree/compiler/Dialect/HAL/Target/LLVM/LLVMAOTTarget.cpp
index e4ef1f2..0d927e2 100644
--- a/iree/compiler/Dialect/HAL/Target/LLVM/LLVMAOTTarget.cpp
+++ b/iree/compiler/Dialect/HAL/Target/LLVM/LLVMAOTTarget.cpp
@@ -24,6 +24,7 @@
#include "llvm/Linker/Linker.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/TargetSelect.h"
+#include "mlir/Dialect/ArmNeon/ArmNeonDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Export.h"
@@ -143,7 +144,9 @@
void getDependentDialects(DialectRegistry ®istry) const override {
mlir::registerLLVMDialectTranslation(registry);
- registry.insert<IREE::Codegen::IREECodegenDialect>();
+ // TODO: make inclusion of ArmNeon conditional?
+ registry
+ .insert<IREE::Codegen::IREECodegenDialect, arm_neon::ArmNeonDialect>();
}
IREE::HAL::DeviceTargetAttr getDefaultDeviceTarget(
diff --git a/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir b/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir
index 5291970..d81e3ca 100644
--- a/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir
+++ b/iree/compiler/Dialect/HAL/Target/VMVX/test/linking.mlir
@@ -111,114 +111,6 @@
// -----
-#cuda_target = #hal.executable.target<"cuda", "cuda-nvptx-fb">
-#vmvx_target = #hal.executable.target<"vmvx", "vmvx-bytecode-fb">
-#executable_layout = #hal.executable.layout<push_constants = 1, sets = [
- #hal.descriptor_set.layout<0, bindings = [
- #hal.descriptor_set.binding<0, storage_buffer>,
- #hal.descriptor_set.binding<1, storage_buffer>
- ]>
-]>
-
-hal.executable private @dispatch_0 {
- hal.executable.variant @vmvx, target = #vmvx_target {
- hal.executable.entry_point @dispatch_0 ordinal(0) layout(#executable_layout)
- builtin.module {
- vm.module @module {
- vm.func @dispatch_0() {
- vm.return
- }
- vm.export @dispatch_0
- }
- }
- }
- hal.executable.variant @cuda, target = #cuda_target {
- builtin.module {
- }
- }
-}
-hal.executable private @dispatch_1 {
- hal.executable.variant @vmvx, target = #vmvx_target {
- hal.executable.entry_point @dispatch_1 ordinal(0) layout(#executable_layout)
- builtin.module {
- vm.module @module {
- vm.func @dispatch_1() {
- vm.return
- }
- vm.export @dispatch_1
- }
- }
- }
- hal.executable.variant @cuda, target = #cuda_target {
- builtin.module {
- }
- }
-}
-func @other_targets() -> () {
- %device = hal.ex.shared_device : !hal.device
- %cmd = hal.command_buffer.create device(%device : !hal.device) mode("OneShot") categories("Transfer|Dispatch") : !hal.command_buffer
- hal.device.switch<%device : !hal.device>
- #hal.device.match.executable.format<"vmvx-bytecode-fb"> {
- %c1 = arith.constant 1 : index
- hal.command_buffer.dispatch.symbol<%cmd : !hal.command_buffer> target(@dispatch_0::@vmvx::@dispatch_0) workgroups([%c1, %c1, %c1])
- hal.command_buffer.dispatch.symbol<%cmd : !hal.command_buffer> target(@dispatch_1::@vmvx::@dispatch_1) workgroups([%c1, %c1, %c1])
- hal.return
- },
- #hal.device.match.executable.format<"cuda-nvptx-fb"> {
- %c1 = arith.constant 1 : index
- hal.command_buffer.dispatch.symbol<%cmd : !hal.command_buffer> target(@dispatch_0::@otherdispatch::@dispatch_0) workgroups([%c1, %c1, %c1])
- hal.command_buffer.dispatch.symbol<%cmd : !hal.command_buffer> target(@dispatch_1::@otherdispatch::@dispatch_1) workgroups([%c1, %c1, %c1])
- hal.return
- }
- return
-}
-
-// VMVX target should be pulled out from both executables leaving the originals
-// untouched.
-// CHECK: hal.executable private @vmvx_linked {
-// CHECK-NEXT: hal.executable.variant public @vmvx_bytecode_fb, target = #executable_target_vmvx_bytecode_fb {
-// CHECK-NEXT: hal.executable.entry_point public @dispatch_0 ordinal(0)
-// CHECK-NEXT: hal.executable.entry_point public @dispatch_1 ordinal(1)
-// CHECK-NEXT: module {
-// CHECK-NEXT: vm.module public @linked_module {
-// CHECK-NEXT: vm.func @dispatch_0() {
-// CHECK-NEXT: vm.return
-// CHECK-NEXT: }
-// CHECK-NEXT: vm.export @dispatch_0
-// CHECK-NEXT: vm.func @dispatch_1() {
-// CHECK-NEXT: vm.return
-// CHECK-NEXT: }
-// CHECK-NEXT: vm.export @dispatch_1
-// CHECK-NEXT: }
-// CHECK-NEXT: }
-// CHECK-NEXT: }
-// CHECK-NEXT: }
-//
-// @dispatch_0/1 should remain, with just @cuda
-// CHECK: hal.executable private @dispatch_0 {
-// CHECK: hal.executable.variant public @cuda, target = #executable_target_cuda
-// CHECK: hal.executable private @dispatch_1 {
-// CHECK: hal.executable.variant public @cuda, target = #executable_target_cuda
-//
-// CHECK: func @other_targets() {
-// CHECK: hal.device.switch<%device : !hal.device>
-// CHECK-NEXT: #hal.device.match.executable.format<"vmvx-bytecode-fb"> {
-// CHECK-NEXT: %c1 = arith.constant 1 : index
-// CHECK-NEXT: hal.command_buffer.dispatch.symbol<%cmd : !hal.command_buffer> target(@vmvx_linked::@vmvx_bytecode_fb::@dispatch_0) workgroups([%c1, %c1, %c1])
-// CHECK-NEXT: hal.command_buffer.dispatch.symbol<%cmd : !hal.command_buffer> target(@vmvx_linked::@vmvx_bytecode_fb::@dispatch_1) workgroups([%c1, %c1, %c1])
-// CHECK-NEXT: hal.return
-// CHECK-NEXT: },
-// CHECK-NEXT: #hal.device.match.executable.format<"cuda-nvptx-fb"> {
-// CHECK-NEXT: %c1 = arith.constant 1 : index
-// CHECK-NEXT: hal.command_buffer.dispatch.symbol<%cmd : !hal.command_buffer> target(@dispatch_0::@otherdispatch::@dispatch_0) workgroups([%c1, %c1, %c1])
-// CHECK-NEXT: hal.command_buffer.dispatch.symbol<%cmd : !hal.command_buffer> target(@dispatch_1::@otherdispatch::@dispatch_1) workgroups([%c1, %c1, %c1])
-// CHECK-NEXT: hal.return
-// CHECK-NEXT: }
-// CHECK-NEXT: return
-// CHECK-NEXT: }
-
-// -----
-
#vmvx_target = #hal.executable.target<"vmvx", "vmvx-bytecode-fb">
#executable_layout = #hal.executable.layout<push_constants = 1, sets = [
#hal.descriptor_set.layout<0, bindings = [
diff --git a/iree/compiler/Dialect/HAL/Transforms/ConvertToHAL.cpp b/iree/compiler/Dialect/HAL/Transforms/ConvertToHAL.cpp
index 990ac2e..89183a1 100644
--- a/iree/compiler/Dialect/HAL/Transforms/ConvertToHAL.cpp
+++ b/iree/compiler/Dialect/HAL/Transforms/ConvertToHAL.cpp
@@ -70,7 +70,7 @@
HALTypeConverter typeConverter(conversionInterfaces);
HALConversionTarget conversionTarget(context, typeConverter);
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
populateUtilToHALPatterns(context, conversionTarget, typeConverter,
patterns);
diff --git a/iree/compiler/Dialect/HAL/Transforms/MaterializeInterfaces.cpp b/iree/compiler/Dialect/HAL/Transforms/MaterializeInterfaces.cpp
index aaccaec..0f1156c 100644
--- a/iree/compiler/Dialect/HAL/Transforms/MaterializeInterfaces.cpp
+++ b/iree/compiler/Dialect/HAL/Transforms/MaterializeInterfaces.cpp
@@ -316,7 +316,7 @@
} // namespace
static LogicalResult convertFlowInfoOps(IREE::HAL::ExecutableOp executableOp) {
- OwningRewritePatternList patterns(executableOp.getContext());
+ RewritePatternSet patterns(executableOp.getContext());
patterns.insert<
ConvertDispatchWorkgroupInfoPattern<IREE::Flow::DispatchWorkgroupIDOp,
IREE::HAL::InterfaceWorkgroupIDOp>,
diff --git a/iree/compiler/Dialect/HAL/Transforms/ResolveEntryPointOrdinals.cpp b/iree/compiler/Dialect/HAL/Transforms/ResolveEntryPointOrdinals.cpp
index f4b4699..9ed0a03 100644
--- a/iree/compiler/Dialect/HAL/Transforms/ResolveEntryPointOrdinals.cpp
+++ b/iree/compiler/Dialect/HAL/Transforms/ResolveEntryPointOrdinals.cpp
@@ -86,7 +86,7 @@
void runOnOperation() override {
MLIRContext *context = &getContext();
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
patterns.insert<ResolveCommandBufferDispatchOrdinals>(context);
patterns.insert<ResolveCommandBufferDispatchIndirectOrdinals>(context);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
diff --git a/iree/compiler/Dialect/HAL/Transforms/test/convert_to_hal.mlir b/iree/compiler/Dialect/HAL/Transforms/test/convert_to_hal.mlir
index 908caa1..cc85089 100644
--- a/iree/compiler/Dialect/HAL/Transforms/test/convert_to_hal.mlir
+++ b/iree/compiler/Dialect/HAL/Transforms/test/convert_to_hal.mlir
@@ -67,7 +67,7 @@
// CHECK: %[[RESULT_BUFFER:.+]] = hal.allocator.allocate<%[[ALLOCATOR]] : !hal.allocator>
// CHECK-SAME: type("HostVisible|DeviceVisible|DeviceLocal")
- // CHECK-SAME: usage("Transfer|Mapping|Dispatch")
+ // CHECK-SAME: usage("Transfer|Mapping|Dispatch|All")
// CHECK-SAME: : !hal.buffer{%c16}
%result_resource = stream.resource.alloc uninitialized : !stream.resource<external>{%c16}
diff --git a/iree/compiler/Dialect/Modules/Check/Conversion/ConversionPatterns.cpp b/iree/compiler/Dialect/Modules/Check/Conversion/ConversionPatterns.cpp
index cc049b5..4382a52 100644
--- a/iree/compiler/Dialect/Modules/Check/Conversion/ConversionPatterns.cpp
+++ b/iree/compiler/Dialect/Modules/Check/Conversion/ConversionPatterns.cpp
@@ -18,7 +18,7 @@
namespace Check {
void populateCheckToVMPatterns(MLIRContext *context, SymbolTable &importSymbols,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter) {
patterns.insert<VMImportOpConversion<IREE::Check::ExpectTrueOp>>(
context, importSymbols, typeConverter, "check.expect_true");
@@ -33,7 +33,7 @@
}
void populateCheckToHALPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter) {
// The same op handles both tensors and buffer views.
patterns
diff --git a/iree/compiler/Dialect/Modules/Check/Conversion/ConversionPatterns.h b/iree/compiler/Dialect/Modules/Check/Conversion/ConversionPatterns.h
index c42a16c..a9fec56 100644
--- a/iree/compiler/Dialect/Modules/Check/Conversion/ConversionPatterns.h
+++ b/iree/compiler/Dialect/Modules/Check/Conversion/ConversionPatterns.h
@@ -17,13 +17,13 @@
// Populates conversion patterns from the Check dialect to the VM dialect.
void populateCheckToVMPatterns(MLIRContext *context, SymbolTable &importSymbols,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter);
// Populates conversion patterns from the Check dialect to the HAL dialect.
// Mostly lowers tensors to buffer views.
void populateCheckToHALPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter);
} // namespace Check
diff --git a/iree/compiler/Dialect/Modules/Check/IR/CheckDialect.cpp b/iree/compiler/Dialect/Modules/Check/IR/CheckDialect.cpp
index 9b912db..4d0da79 100644
--- a/iree/compiler/Dialect/Modules/Check/IR/CheckDialect.cpp
+++ b/iree/compiler/Dialect/Modules/Check/IR/CheckDialect.cpp
@@ -32,7 +32,7 @@
}
void populateVMConversionPatterns(
- SymbolTable &importSymbols, OwningRewritePatternList &patterns,
+ SymbolTable &importSymbols, RewritePatternSet &patterns,
TypeConverter &typeConverter) const override {
populateCheckToVMPatterns(getDialect()->getContext(), importSymbols,
patterns, typeConverter);
@@ -44,7 +44,7 @@
using HALConversionDialectInterface::HALConversionDialectInterface;
void setupConversionTarget(ConversionTarget &target,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter) const override {
populateCheckToHALPatterns(getDialect()->getContext(), patterns,
typeConverter);
diff --git a/iree/compiler/Dialect/Modules/Check/IR/CheckOps.cpp b/iree/compiler/Dialect/Modules/Check/IR/CheckOps.cpp
index ed43857..9e1fc33 100644
--- a/iree/compiler/Dialect/Modules/Check/IR/CheckOps.cpp
+++ b/iree/compiler/Dialect/Modules/Check/IR/CheckOps.cpp
@@ -29,13 +29,13 @@
};
} // namespace
-void ExpectEqConstOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ExpectEqConstOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<ExpandAttributeToConst<ExpectEqConstOp, ExpectEqOp>>(context);
}
void ExpectAlmostEqConstOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results
.insert<ExpandAttributeToConst<ExpectAlmostEqConstOp, ExpectAlmostEqOp>>(
context);
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.cpp b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.cpp
index 7f89683..07d53f3 100644
--- a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.cpp
+++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.cpp
@@ -260,7 +260,7 @@
} // namespace
void populateHALToVMVXPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter) {
patterns.insert<ConvertHALInterfaceWorkgroupIDOp>(typeConverter, context);
patterns.insert<ConvertHALInterfaceWorkgroupSizeOp>(typeConverter, context);
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.h b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.h
index 3b1faa1..8cfc8ed 100644
--- a/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.h
+++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/HALToVMVX/ConvertHALToVMVX.h
@@ -21,7 +21,7 @@
// Populates conversion patterns from the IREE HAL dialect interface to the
// VMVX dialect interface.
void populateHALToVMVXPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter);
} // namespace iree_compiler
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.cpp b/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.cpp
index ca6597c..8d49eae 100644
--- a/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.cpp
+++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.cpp
@@ -63,7 +63,7 @@
} // namespace
void populateStandardToVMVXPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter) {
// We type/shape erase memrefs as we lower so there is no need for reshapes.
patterns.insert<FoldAsNoOp<memref::CollapseShapeOp>>(typeConverter, context);
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.h b/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.h
index 06467de..e083a12 100644
--- a/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.h
+++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/StandardToVMVX/ConvertStandardToVMVX.h
@@ -15,7 +15,7 @@
// Populates conversion patterns from the std dialect to the VMVX dialect.
void populateStandardToVMVXPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter);
} // namespace iree_compiler
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.cpp b/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.cpp
index 5f47f56..5c6d11a 100644
--- a/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.cpp
+++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.cpp
@@ -120,7 +120,7 @@
void populateVMVXToVMPatterns(MLIRContext *context,
TypeConverter &typeConverter,
SymbolTable &importSymbols,
- OwningRewritePatternList &patterns) {}
+ RewritePatternSet &patterns) {}
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.h b/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.h
index 94334a4..5803f00 100644
--- a/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.h
+++ b/iree/compiler/Dialect/Modules/VMVX/Conversion/VMVXToVM/ConvertVMVXToVM.h
@@ -18,7 +18,7 @@
void populateVMVXToVMPatterns(MLIRContext *context,
TypeConverter &typeConverter,
SymbolTable &importSymbols,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/Modules/VMVX/IR/VMVXDialect.cpp b/iree/compiler/Dialect/Modules/VMVX/IR/VMVXDialect.cpp
index 1c5b158..511a280 100644
--- a/iree/compiler/Dialect/Modules/VMVX/IR/VMVXDialect.cpp
+++ b/iree/compiler/Dialect/Modules/VMVX/IR/VMVXDialect.cpp
@@ -34,7 +34,7 @@
}
void populateVMConversionPatterns(
- SymbolTable &importSymbols, OwningRewritePatternList &patterns,
+ SymbolTable &importSymbols, RewritePatternSet &patterns,
TypeConverter &typeConverter) const override {
populateVMVXToVMPatterns(getDialect()->getContext(), typeConverter,
importSymbols, patterns);
diff --git a/iree/compiler/Dialect/Modules/VMVX/Transforms/Conversion.cpp b/iree/compiler/Dialect/Modules/VMVX/Transforms/Conversion.cpp
index 5b6775a..eb380fe 100644
--- a/iree/compiler/Dialect/Modules/VMVX/Transforms/Conversion.cpp
+++ b/iree/compiler/Dialect/Modules/VMVX/Transforms/Conversion.cpp
@@ -73,7 +73,7 @@
conversionTarget.addLegalDialect<memref::MemRefDialect>();
conversionTarget.addLegalOp<mlir::UnrealizedConversionCastOp>();
- OwningRewritePatternList conversionPatterns(&getContext());
+ RewritePatternSet conversionPatterns(&getContext());
populateHALToVMVXPatterns(context, conversionPatterns, typeConverter);
populateStandardToVMVXPatterns(context, conversionPatterns, typeConverter);
diff --git a/iree/compiler/Dialect/Modules/VMVX/Transforms/Passes.cpp b/iree/compiler/Dialect/Modules/VMVX/Transforms/Passes.cpp
index 7804fe0..c4ab9bc 100644
--- a/iree/compiler/Dialect/Modules/VMVX/Transforms/Passes.cpp
+++ b/iree/compiler/Dialect/Modules/VMVX/Transforms/Passes.cpp
@@ -28,6 +28,8 @@
namespace VMVX {
static void buildVectorVMVXTransformPassPipeline(OpPassManager &passManager) {
+ passManager.nest<ModuleOp>().nest<FuncOp>().addPass(
+ createTypePropagationPass());
passManager.addPass(createLLVMCPULowerExecutableTargetPass());
OpPassManager &nestedModulePM = passManager.nest<ModuleOp>();
@@ -52,7 +54,7 @@
nestedModulePM.addNestedPass<FuncOp>(createConvertVectorToSCFPass());
nestedModulePM.addNestedPass<FuncOp>(createCanonicalizerPass());
nestedModulePM.addNestedPass<FuncOp>(arith::createArithmeticExpandOpsPass());
- nestedModulePM.addNestedPass<FuncOp>(createStdExpandOpsPass());
+ nestedModulePM.addNestedPass<FuncOp>(memref::createExpandOpsPass());
// Handle tensor-type constants.
nestedModulePM.addPass(createTensorConstantBufferizePass());
diff --git a/iree/compiler/Dialect/Stream/Conversion/FlowToStream/ConvertFlowToStream.cpp b/iree/compiler/Dialect/Stream/Conversion/FlowToStream/ConvertFlowToStream.cpp
index 4055d49..8058f18 100644
--- a/iree/compiler/Dialect/Stream/Conversion/FlowToStream/ConvertFlowToStream.cpp
+++ b/iree/compiler/Dialect/Stream/Conversion/FlowToStream/ConvertFlowToStream.cpp
@@ -415,9 +415,9 @@
} // namespace
-void populateFlowToStreamConversionPatterns(
- MLIRContext *context, TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+void populateFlowToStreamConversionPatterns(MLIRContext *context,
+ TypeConverter &typeConverter,
+ RewritePatternSet &patterns) {
patterns
.insert<ConvertTensorReshapeOp, ConvertTensorSplatOp,
ConvertTensorCloneOp, ConvertTensorSliceOp, ConvertTensorUpdateOp,
@@ -427,9 +427,10 @@
patterns.insert<ConvertExecutableOp>(typeConverter, context);
}
-void populateFlowToStreamConversionPatterns(
- MLIRContext *context, ConversionTarget &conversionTarget,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns) {
+void populateFlowToStreamConversionPatterns(MLIRContext *context,
+ ConversionTarget &conversionTarget,
+ TypeConverter &typeConverter,
+ RewritePatternSet &patterns) {
// Disallow all flow ops besides the ones we pass through (today).
// We don't have a stream-equivalent of several of the dispatch-level flow
// ops as the codegen backends directly touch them and so long as we have both
diff --git a/iree/compiler/Dialect/Stream/Conversion/FlowToStream/ConvertFlowToStream.h b/iree/compiler/Dialect/Stream/Conversion/FlowToStream/ConvertFlowToStream.h
index 0ea90e6..a451965 100644
--- a/iree/compiler/Dialect/Stream/Conversion/FlowToStream/ConvertFlowToStream.h
+++ b/iree/compiler/Dialect/Stream/Conversion/FlowToStream/ConvertFlowToStream.h
@@ -19,11 +19,11 @@
// |typeConverter|.
void populateFlowToStreamConversionPatterns(MLIRContext *context,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
void populateFlowToStreamConversionPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/Stream/Conversion/HALToStream/ConvertHALToStream.cpp b/iree/compiler/Dialect/Stream/Conversion/HALToStream/ConvertHALToStream.cpp
index d8128c8..474a186 100644
--- a/iree/compiler/Dialect/Stream/Conversion/HALToStream/ConvertHALToStream.cpp
+++ b/iree/compiler/Dialect/Stream/Conversion/HALToStream/ConvertHALToStream.cpp
@@ -197,7 +197,7 @@
void populateHALToStreamConversionPatterns(MLIRContext *context,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
typeConverter.addConversion(
[](IREE::HAL::BufferViewType type) { return type; });
patterns.insert<ConvertTensorImportOp>(typeConverter, context);
@@ -207,7 +207,7 @@
void populateHALToStreamConversionPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
conversionTarget.addDynamicallyLegalOp<IREE::HAL::TensorImportOp>(
[&](IREE::HAL::TensorImportOp op) {
return typeConverter.isLegal(op.source().getType()) &&
diff --git a/iree/compiler/Dialect/Stream/Conversion/HALToStream/ConvertHALToStream.h b/iree/compiler/Dialect/Stream/Conversion/HALToStream/ConvertHALToStream.h
index 71c2bff..099f421 100644
--- a/iree/compiler/Dialect/Stream/Conversion/HALToStream/ConvertHALToStream.h
+++ b/iree/compiler/Dialect/Stream/Conversion/HALToStream/ConvertHALToStream.h
@@ -19,11 +19,11 @@
// |typeConverter|.
void populateHALToStreamConversionPatterns(MLIRContext *context,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
void populateHALToStreamConversionPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertConstantOps.cpp b/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertConstantOps.cpp
index 3127319..6b11a05 100644
--- a/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertConstantOps.cpp
+++ b/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertConstantOps.cpp
@@ -52,7 +52,7 @@
void populateStandardConstantToStreamPatterns(
MLIRContext *context, ConversionTarget &conversionTarget,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns) {
+ TypeConverter &typeConverter, RewritePatternSet &patterns) {
conversionTarget.addDynamicallyLegalOp<arith::ConstantOp>(
[](arith::ConstantOp op) { return !op.getType().isa<TensorType>(); });
diff --git a/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStandardToStream.cpp b/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStandardToStream.cpp
index 1498906..839a18a 100644
--- a/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStandardToStream.cpp
+++ b/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStandardToStream.cpp
@@ -19,15 +19,15 @@
void populateStandardConstantToStreamPatterns(
MLIRContext *context, ConversionTarget &conversionTarget,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns);
+ TypeConverter &typeConverter, RewritePatternSet &patterns);
void populateStandardStructuralToStreamPatterns(
MLIRContext *context, ConversionTarget &conversionTarget,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns);
+ TypeConverter &typeConverter, RewritePatternSet &patterns);
void populateStandardToStreamConversionPatterns(
MLIRContext *context, ConversionTarget &conversionTarget,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns) {
+ TypeConverter &typeConverter, RewritePatternSet &patterns) {
typeConverter.addConversion([](IndexType type) { return type; });
typeConverter.addConversion([](IntegerType type) { return type; });
typeConverter.addConversion([](FloatType type) { return type; });
diff --git a/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStandardToStream.h b/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStandardToStream.h
index 12997c6..5c050c7 100644
--- a/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStandardToStream.h
+++ b/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStandardToStream.h
@@ -18,7 +18,7 @@
// provided |typeConverter|.
void populateStandardToStreamConversionPatterns(
MLIRContext *context, ConversionTarget &conversionTarget,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns);
+ TypeConverter &typeConverter, RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStructuralOps.cpp b/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStructuralOps.cpp
index 239c9a3..4dcfb6c 100644
--- a/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStructuralOps.cpp
+++ b/iree/compiler/Dialect/Stream/Conversion/StandardToStream/ConvertStructuralOps.cpp
@@ -212,7 +212,7 @@
void populateStandardStructuralToStreamPatterns(
MLIRContext *context, ConversionTarget &conversionTarget,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns) {
+ TypeConverter &typeConverter, RewritePatternSet &patterns) {
conversionTarget.addLegalOp<mlir::ModuleOp>();
// We need to rewrite certain types on operands/results so use the default
diff --git a/iree/compiler/Dialect/Stream/Conversion/UtilToStream/ConvertUtilToStream.cpp b/iree/compiler/Dialect/Stream/Conversion/UtilToStream/ConvertUtilToStream.cpp
index 22c6cdf..7993743 100644
--- a/iree/compiler/Dialect/Stream/Conversion/UtilToStream/ConvertUtilToStream.cpp
+++ b/iree/compiler/Dialect/Stream/Conversion/UtilToStream/ConvertUtilToStream.cpp
@@ -204,9 +204,9 @@
} // namespace
-void populateUtilToStreamConversionPatterns(
- MLIRContext *context, TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+void populateUtilToStreamConversionPatterns(MLIRContext *context,
+ TypeConverter &typeConverter,
+ RewritePatternSet &patterns) {
auto expansionState = std::make_shared<GlobalExpansionState>();
// TODO(#7432): add indirect global expansion support to streams.
patterns
@@ -214,9 +214,10 @@
expansionState, typeConverter, context);
}
-void populateUtilToStreamConversionPatterns(
- MLIRContext *context, ConversionTarget &conversionTarget,
- TypeConverter &typeConverter, OwningRewritePatternList &patterns) {
+void populateUtilToStreamConversionPatterns(MLIRContext *context,
+ ConversionTarget &conversionTarget,
+ TypeConverter &typeConverter,
+ RewritePatternSet &patterns) {
typeConverter.addConversion([=](IREE::Util::PtrType type,
SmallVectorImpl<Type> &resultTypes) {
// Expand pointers to tensors to [resource, sizeof resource] pointers.
diff --git a/iree/compiler/Dialect/Stream/Conversion/UtilToStream/ConvertUtilToStream.h b/iree/compiler/Dialect/Stream/Conversion/UtilToStream/ConvertUtilToStream.h
index c43912d..96004f8 100644
--- a/iree/compiler/Dialect/Stream/Conversion/UtilToStream/ConvertUtilToStream.h
+++ b/iree/compiler/Dialect/Stream/Conversion/UtilToStream/ConvertUtilToStream.h
@@ -19,11 +19,11 @@
// |typeConverter|.
void populateUtilToStreamConversionPatterns(MLIRContext *context,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
void populateUtilToStreamConversionPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/Stream/IR/StreamOpFolders.cpp b/iree/compiler/Dialect/Stream/IR/StreamOpFolders.cpp
index b84cce7..d7a0d9b 100644
--- a/iree/compiler/Dialect/Stream/IR/StreamOpFolders.cpp
+++ b/iree/compiler/Dialect/Stream/IR/StreamOpFolders.cpp
@@ -274,8 +274,8 @@
// stream.resource.alloc
//===----------------------------------------------------------------------===//
-void ResourceAllocOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ResourceAllocOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): sink to first user.
}
@@ -283,8 +283,8 @@
// stream.resource.alloca
//===----------------------------------------------------------------------===//
-void ResourceAllocaOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ResourceAllocaOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): sink to first user.
// TODO(benvanik): elide if only user is dealloc.
}
@@ -293,8 +293,8 @@
// stream.resource.dealloca
//===----------------------------------------------------------------------===//
-void ResourceDeallocaOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ResourceDeallocaOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): move up to producer of timepoint.
}
@@ -341,8 +341,8 @@
} // namespace
-void ResourceSizeOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ResourceSizeOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SelectResourceSizeOp>(context);
}
@@ -350,8 +350,8 @@
// stream.resource.map
//===----------------------------------------------------------------------===//
-void ResourceMapOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ResourceMapOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): fold subviews up into maps to limit range.
results.insert<ElideUnusedOp<ResourceMapOp>>(context);
}
@@ -360,8 +360,8 @@
// stream.resource.try_map
//===----------------------------------------------------------------------===//
-void ResourceTryMapOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ResourceTryMapOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): fold subviews up into maps to limit range.
// TODO(benvanik): if mapping for staging then turn into a map?
results.insert<ElideUnusedOp<ResourceTryMapOp>>(context);
@@ -401,8 +401,8 @@
} // namespace
-void ResourceLoadOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ResourceLoadOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): if staging resource comes from splat (through transfers)
// then pull splat value.
// TODO(benvanik): combine multiple loads from the same target if contiguous.
@@ -445,8 +445,8 @@
} // namespace
-void ResourceStoreOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ResourceStoreOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): combine multiple stores to the same target if contiguous.
// TODO(benvanik): if value is a constant splat then turn into fill?
results.insert<FoldSubviewIntoStoreOp>(context);
@@ -587,8 +587,8 @@
} // namespace
-void ResourcePackOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ResourcePackOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<PropagateResourcePackBaseOffset>(context);
results.insert<CanonicalizeResourcePackIntervals>(context);
}
@@ -663,8 +663,8 @@
} // namespace
-void ResourceSubviewOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ResourceSubviewOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<FoldResourceSubviewOps>(context);
results.insert<SinkSubviewAcrossSelectOps>(context);
}
@@ -686,8 +686,8 @@
return {};
}
-void TensorImportOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorImportOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): check operand and dedupe imports.
}
@@ -708,8 +708,8 @@
return {};
}
-void TensorExportOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorExportOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): check operand and dedupe exports.
}
@@ -757,8 +757,8 @@
} // namespace
-void TensorConstantOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorConstantOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): if value is _mostly_ a splat, turn into splat + updates.
results.insert<TensorConstantToSplat>(context);
}
@@ -902,8 +902,8 @@
} // namespace
-void TensorSplatOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorSplatOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<ElideUnusedOp<TensorSplatOp>>(context);
results.insert<NarrowSplatPattern>(context);
}
@@ -938,8 +938,8 @@
} // namespace
-void TensorCloneOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorCloneOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): splat -> clone duplicates splat.
// TODO(benvanik): some way to reduce deep clone->clone->clone chains.
// TODO(benvanik): clone + slice => slice.
@@ -957,8 +957,8 @@
return {};
}
-void TensorSliceOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): turn into a transfer if target_size == update_size and
// affinity/lifetime differ.
// TODO(benvanik): splat->slice -> splat.
@@ -995,8 +995,8 @@
} // namespace
-void TensorFillOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorFillOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): if target_size == sizeof(value) turn into splat.
results.insert<NarrowFillPattern>(context);
}
@@ -1010,8 +1010,8 @@
return {};
}
-void TensorUpdateOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorUpdateOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): turn into a transfer if target_size == update_size and
// affinity/lifetime differ.
// TODO(benvanik): turn into fill if source is a splat.
@@ -1021,8 +1021,8 @@
// stream.tensor.load
//===----------------------------------------------------------------------===//
-void TensorLoadOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorLoadOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): splat + load -> splat value.
// TODO(benvanik): clone + ex load -> slice (ranged) + load.
// TODO(benvanik): slice + ex load -> slice (ranged) + load.
@@ -1034,8 +1034,8 @@
// stream.tensor.store
//===----------------------------------------------------------------------===//
-void TensorStoreOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TensorStoreOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): if value is a constant splat then turn into fill.
// TODO(benvanik): combine multiple stores to the same target if contiguous.
}
@@ -1044,8 +1044,8 @@
// stream.async.alloca
//===----------------------------------------------------------------------===//
-void AsyncAllocaOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void AsyncAllocaOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): alloca (staging) -> non-staging change to target.
// TODO(benvanik): alloca (non-staging) -> staging change to target.
// TODO(benvanik): sink to first user.
@@ -1079,8 +1079,8 @@
} // namespace
-void AsyncConstantOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void AsyncConstantOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<ConvertSplatConstantsIntoSplats>(context);
// TODO(benvanik): if value is _mostly_ a splat, turn into splat + updates.
}
@@ -1149,8 +1149,8 @@
} // namespace
-void AsyncSplatOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void AsyncSplatOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(#6972): find splat+update-from and turn into fill.
// TODO(#6972): find splat+copy-from and turn into fill.
// TODO(#6972): find splat+update-into and turn into alloca+fill+update.
@@ -1202,8 +1202,8 @@
} // namespace
-void AsyncCloneOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void AsyncCloneOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): some way to reduce deep clone->clone->clone chains.
results.insert<PropagateClonableOps>(context);
results.insert<ElideUnusedOp<AsyncCloneOp>>(context);
@@ -1247,8 +1247,8 @@
} // namespace
-void AsyncSliceOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void AsyncSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): turn into a transfer if target_size == update_size and
// affinity/lifetime differ.
results.insert<PropagateSplatsThroughSlices>(context);
@@ -1285,7 +1285,7 @@
} // namespace
-void AsyncFillOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void AsyncFillOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<FlattenFullFillToSplat>(context);
results.insert<ElideUnusedOp<AsyncFillOp>>(context);
@@ -1369,8 +1369,8 @@
} // namespace
-void AsyncUpdateOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void AsyncUpdateOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): turn into a transfer if target_size == update_size and
// affinity/lifetime differ.
// TODO(#6972): updates into splats could become alloca + fill exclusive
@@ -1412,7 +1412,7 @@
} // namespace
-void AsyncCopyOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void AsyncCopyOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<AsyncCopyFullSourceToUpdate>(context);
results.insert<ElideUnusedOp<AsyncCopyOp>>(context);
@@ -1452,8 +1452,8 @@
} // namespace
-void AsyncTransferOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void AsyncTransferOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): staging propagation (fill of staging -> fill on device).
results.insert<RedundantTransferElision>(context);
results.insert<ElideUnusedOp<AsyncTransferOp>>(context);
@@ -1463,7 +1463,7 @@
// stream.async.load
//===----------------------------------------------------------------------===//
-void AsyncLoadOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void AsyncLoadOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
// TODO(benvanik): splat + load -> splat value.
// TODO(benvanik): clone + ex load -> slice (ranged) + load.
@@ -1476,8 +1476,8 @@
// stream.async.store
//===----------------------------------------------------------------------===//
-void AsyncStoreOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void AsyncStoreOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): if value is a constant splat then turn into fill.
// TODO(benvanik): combine multiple stores to the same target if contiguous.
}
@@ -1486,8 +1486,8 @@
// stream.async.dispatch
//===----------------------------------------------------------------------===//
-void AsyncDispatchOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void AsyncDispatchOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): nothing? maybe tied type/lifetime updates?
results.insert<ElideUnusedOp<AsyncDispatchOp>>(context);
}
@@ -1670,8 +1670,8 @@
} // namespace
-void AsyncExecuteOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void AsyncExecuteOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<ElideImmediateAsyncExecuteWaits>(context);
results.insert<ChainAsyncExecuteWaits>(context);
results.insert<CloneCapturedAsyncExecuteSubviewOps>(context);
@@ -1686,8 +1686,8 @@
// stream.async.concurrent
//===----------------------------------------------------------------------===//
-void AsyncConcurrentOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void AsyncConcurrentOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<IREE::Util::ClosureOptimizationPattern<AsyncConcurrentOp>>(
context);
results.insert<TieRegionResults<AsyncConcurrentOp>>(context);
@@ -1729,7 +1729,7 @@
} // namespace
-void CmdFlushOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmdFlushOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<FoldSubviewsIntoCmdFlushOp>(context);
}
@@ -1770,8 +1770,8 @@
} // namespace
-void CmdInvalidateOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmdInvalidateOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<FoldSubviewsIntoCmdInvalidateOp>(context);
}
@@ -1810,8 +1810,8 @@
} // namespace
-void CmdDiscardOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmdDiscardOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<FoldSubviewsIntoCmdDiscardOp>(context);
}
@@ -1850,7 +1850,7 @@
} // namespace
-void CmdFillOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmdFillOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<FoldSubviewsIntoCmdFillOp>(context);
}
@@ -1906,7 +1906,7 @@
} // namespace
-void CmdCopyOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmdCopyOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<FoldSubviewsIntoCmdCopyOp>(context);
}
@@ -1967,8 +1967,8 @@
} // namespace
-void CmdDispatchOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmdDispatchOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<FoldSubviewsIntoCmdDispatchOp>(context);
}
@@ -2116,8 +2116,8 @@
} // namespace
-void CmdExecuteOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmdExecuteOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<ElideImmediateCmdExecuteWaits>(context);
results.insert<ChainCmdExecuteWaits>(context);
results.insert<CloneCapturedCmdExecuteSubviewOps>(context);
@@ -2152,7 +2152,7 @@
} // namespace
-void CmdSerialOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmdSerialOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<ElideEmptyCmdRegionOp<CmdSerialOp>>(context);
}
@@ -2161,8 +2161,8 @@
// stream.cmd.concurrent
//===----------------------------------------------------------------------===//
-void CmdConcurrentOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmdConcurrentOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<ElideEmptyCmdRegionOp<CmdConcurrentOp>>(context);
}
@@ -2283,8 +2283,8 @@
} // namespace
-void TimepointJoinOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TimepointJoinOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): elide operands if timepoint must be satisfied in use-def.
// TODO(benvanik): sink and pull in other timepoints (join on all needed).
results.insert<ElideImmediateTimepointJoinOperands>(context);
@@ -2537,8 +2537,8 @@
} // namespace
-void TimepointAwaitOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TimepointAwaitOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): elide waits if timepoint must be satisfied in use-def.
results.insert<ElideImmediateAwaits>(context);
results.insert<SinkAwaitToFirstConsumer>(context);
diff --git a/iree/compiler/Dialect/Stream/Transforms/ConvertToStream.cpp b/iree/compiler/Dialect/Stream/Transforms/ConvertToStream.cpp
index c8821d7..3d11c9c 100644
--- a/iree/compiler/Dialect/Stream/Transforms/ConvertToStream.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/ConvertToStream.cpp
@@ -193,7 +193,7 @@
TypeConverter typeConverter;
ConversionTarget conversionTarget(getContext());
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
// Always allow lowerering target dialects and reasonable types.
conversionTarget.addLegalDialect<IREE::Stream::StreamDialect>();
diff --git a/iree/compiler/Dialect/Stream/Transforms/EncodeTensors.cpp b/iree/compiler/Dialect/Stream/Transforms/EncodeTensors.cpp
index 35d22f1..832b39d 100644
--- a/iree/compiler/Dialect/Stream/Transforms/EncodeTensors.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/EncodeTensors.cpp
@@ -49,13 +49,6 @@
return success();
}
-// Returns an 8-bit aligned element byte count.
-static int64_t getElementByteSize(Type elementType) {
- int64_t bitCount = elementType.getIntOrFloatBitWidth();
- int64_t byteCount = (bitCount + 8 - 1) / 8;
- return byteCount;
-}
-
// Aligns an element type to a byte-aligned power of 2 bit width.
//
// Examples:
@@ -69,7 +62,8 @@
if (!elementType) return originalType;
// Align the element type to a power of two byte size.
- auto alignedBitWidth = getElementByteSize(elementType) * 8;
+ auto alignedBitWidth =
+ IREE::Util::getRoundedElementByteWidth(elementType) * 8;
if (elementType.getIntOrFloatBitWidth() == alignedBitWidth) {
// Already aligned.
return originalType;
@@ -155,7 +149,8 @@
loc,
calculateElementOffset(loc, tensorType, dynamicDims, indices, rewriter),
rewriter.create<arith::ConstantIndexOp>(
- loc, getElementByteSize(tensorType.getElementType())));
+ loc,
+ IREE::Util::getRoundedElementByteWidth(tensorType.getElementType())));
}
//===----------------------------------------------------------------------===//
@@ -220,7 +215,8 @@
}
// Dense: element count * element size.
- auto elementByteSize = getElementByteSize(encodingType.getElementType());
+ auto elementByteSize =
+ IREE::Util::getRoundedElementByteWidth(encodingType.getElementType());
auto totalSize = calculateElementCount(
op.getLoc(), encodingType, encodingDims, elementByteSize, rewriter);
rewriter.replaceOp(op, totalSize);
@@ -278,7 +274,8 @@
// Dense:
auto resultSize = calculateElementCount(
op.getLoc(), alignedType, resultDims,
- getElementByteSize(alignedType.getElementType()), rewriter);
+ IREE::Util::getRoundedElementByteWidth(alignedType.getElementType()),
+ rewriter);
rewriter.replaceOpWithNewOp<IREE::Stream::AsyncConstantOp>(
op, op.result().getType(), encodedAttr, resultSize, op.affinityAttr());
@@ -567,7 +564,7 @@
}
void runOnOperation() override {
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
patterns.insert<
EncodeTensorImportOp, EncodeTensorExportOp, EncodeTensorSizeOfOp,
EncodeTensorConstantOp, EncodeTensorSplatOp, EncodeTensorCloneOp,
@@ -703,7 +700,7 @@
}
void runOnOperation() override {
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
patterns.insert<EncodeBindingSubspanOp, EncodeDispatchTensorLoadOp,
EncodeDispatchTensorStoreOp>(&getContext());
FrozenRewritePatternSet frozenPatterns(std::move(patterns));
diff --git a/iree/compiler/Dialect/Stream/Transforms/FoldUniformOperands.cpp b/iree/compiler/Dialect/Stream/Transforms/FoldUniformOperands.cpp
index 034d569..1157df2 100644
--- a/iree/compiler/Dialect/Stream/Transforms/FoldUniformOperands.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/FoldUniformOperands.cpp
@@ -127,13 +127,11 @@
});
// Replace uses of the duplicate arguments with their base arguments.
- SmallVector<unsigned> deadArgs;
llvm::BitVector deadArgMap(funcOp.getNumArguments());
for (auto replacement : llvm::enumerate(argReplacementMap)) {
unsigned deadIdx = replacement.index();
unsigned liveIdx = replacement.value();
if (deadIdx == liveIdx) continue;
- deadArgs.push_back(deadIdx);
deadArgMap.set(deadIdx);
entryBlock.getArgument(deadIdx).replaceAllUsesWith(
entryBlock.getArgument(liveIdx));
@@ -150,7 +148,7 @@
// Update the function signature.
// Lame we need two data structures to do this.
- funcOp.setType(funcOp.getTypeWithoutArgsAndResults(deadArgs, {}));
+ funcOp.setType(funcOp.getTypeWithoutArgsAndResults(deadArgMap, {}));
entryBlock.eraseArguments(
[&](BlockArgument arg) { return deadArgMap.test(arg.getArgNumber()); });
}
@@ -218,13 +216,11 @@
IREE::Stream::CmdDispatchOp::makeOperandToArgMap(funcOp);
// Replace uses of the uniform arguments with a constant value.
- SmallVector<unsigned> deadArgs;
llvm::BitVector deadArgMap(funcOp.getNumArguments());
auto builder = OpBuilder::atBlockBegin(&entryBlock);
for (auto operandIdx : uniformOperandMap.set_bits()) {
unsigned argIdx = operandToArgMap[operandIdx];
auto arg = entryBlock.getArgument(argIdx);
- deadArgs.push_back(argIdx);
deadArgMap.set(argIdx);
auto constantOp = builder.create<arith::ConstantOp>(
builder.getFusedLoc(operandLocs[operandIdx]),
@@ -243,7 +239,7 @@
}
// Fixup function signature.
- funcOp.setType(funcOp.getTypeWithoutArgsAndResults(deadArgs, {}));
+ funcOp.setType(funcOp.getTypeWithoutArgsAndResults(deadArgMap, {}));
entryBlock.eraseArguments(
[&](BlockArgument arg) { return deadArgMap.test(arg.getArgNumber()); });
}
diff --git a/iree/compiler/Dialect/Stream/Transforms/Passes.cpp b/iree/compiler/Dialect/Stream/Transforms/Passes.cpp
index bd69d45..7ecf05a 100644
--- a/iree/compiler/Dialect/Stream/Transforms/Passes.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/Passes.cpp
@@ -111,9 +111,8 @@
IREE::Stream::createEncodeHostTensorsPass());
passManager.addNestedPass<mlir::FuncOp>(
IREE::Stream::createEncodeHostTensorsPass());
- // TODO(ravishankarm): enable when codegen can handle extui/trunc ops.
- // passManager.addNestedPass<IREE::Stream::ExecutableOp>(
- // IREE::Stream::createEncodeDeviceTensorsPass());
+ passManager.addNestedPass<IREE::Stream::ExecutableOp>(
+ IREE::Stream::createEncodeDeviceTensorsPass());
// Expand builtins to dispatches. This may introduce new executables.
passManager.addPass(IREE::Stream::createMaterializeBuiltinsPass());
diff --git a/iree/compiler/Dialect/Stream/Transforms/RefineUsage.cpp b/iree/compiler/Dialect/Stream/Transforms/RefineUsage.cpp
index fca1be7..1a4e526 100644
--- a/iree/compiler/Dialect/Stream/Transforms/RefineUsage.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/RefineUsage.cpp
@@ -325,7 +325,7 @@
static void insertUsageRefinementPatterns(MLIRContext *context,
ResourceUsageAnalysis &analysis,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<ApplyInitializerOp, ApplyFuncOp>(context, analysis);
patterns.insert<ApplyGenericOp<IREE::Util::DoNotOptimizeOp>,
ApplyGenericOp<mlir::SelectOp>, ApplyGenericOp<mlir::CallOp>>(
@@ -376,7 +376,7 @@
}
// Query and apply analysis results to all resources in the program.
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
insertUsageRefinementPatterns(&getContext(), analysis, patterns);
FrozenRewritePatternSet frozenPatterns(std::move(patterns));
if (failed(applyPatternsAndFoldGreedily(moduleOp, frozenPatterns))) {
diff --git a/iree/compiler/Dialect/Stream/Transforms/ScheduleExecution.cpp b/iree/compiler/Dialect/Stream/Transforms/ScheduleExecution.cpp
index 1ce64d2..cff4868 100644
--- a/iree/compiler/Dialect/Stream/Transforms/ScheduleExecution.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/ScheduleExecution.cpp
@@ -303,7 +303,7 @@
// Cleanup the dead ops.
// TODO(benvanik): less work here - maybe no patterns to just force folding?
- OwningRewritePatternList patterns(context);
+ RewritePatternSet patterns(context);
for (auto *dialect : context->getLoadedDialects()) {
dialect->getCanonicalizationPatterns(patterns);
}
diff --git a/iree/compiler/Dialect/Stream/Transforms/SpecializeDispatches.cpp b/iree/compiler/Dialect/Stream/Transforms/SpecializeDispatches.cpp
index 338b3a3..c974919 100644
--- a/iree/compiler/Dialect/Stream/Transforms/SpecializeDispatches.cpp
+++ b/iree/compiler/Dialect/Stream/Transforms/SpecializeDispatches.cpp
@@ -211,14 +211,12 @@
}
// Fixup function signature.
- SmallVector<unsigned> deadArgs;
llvm::BitVector deadArgMap(funcOp.getNumArguments() + 1);
for (auto operandIdx : constantTable.coveredOperands.set_bits()) {
unsigned argIdx = operandToArgMap[operandIdx];
- deadArgs.push_back(argIdx);
deadArgMap.set(argIdx);
}
- funcOp.setType(funcOp.getTypeWithoutArgsAndResults(deadArgs, {}));
+ funcOp.setType(funcOp.getTypeWithoutArgsAndResults(deadArgMap, {}));
funcOp.setType(funcOp.getTypeWithArgsAndResults(
{funcOp.getNumArguments()}, {builder.getIndexType()}, {}, {}));
entryBlock.eraseArguments(
diff --git a/iree/compiler/Dialect/Stream/Transforms/test/encode_device_tensors.mlir b/iree/compiler/Dialect/Stream/Transforms/test/encode_device_tensors.mlir
index d3e905c..6923325 100644
--- a/iree/compiler/Dialect/Stream/Transforms/test/encode_device_tensors.mlir
+++ b/iree/compiler/Dialect/Stream/Transforms/test/encode_device_tensors.mlir
@@ -64,3 +64,23 @@
}
}
}
+
+// -----
+
+// CHECK-LABEL: @convert_load_i33
+stream.executable private @convert_load_i33 {
+ stream.executable.export public @dispatch
+ builtin.module {
+ func @dispatch(%arg0: !stream.binding) {
+ %c0 = arith.constant 0 : index
+ // CHECK: %[[BINDING:.+]] = stream.binding.subspan {{.+}} -> !flow.dispatch.tensor<readonly:4xi64>
+ %binding = stream.binding.subspan %arg0[%c0] : !stream.binding -> !flow.dispatch.tensor<readonly:4xi33>
+ // CHECK: %[[TILE_I8:.+]] = flow.dispatch.tensor.load %[[BINDING]], {{.+}} : !flow.dispatch.tensor<readonly:4xi64> -> tensor<?xi64>
+ // CHECK: %[[TILE_I1:.+]] = arith.trunci %[[TILE_I8]] : tensor<?xi64> to tensor<?xi33>
+ %tile = flow.dispatch.tensor.load %binding, offsets = [0], sizes = [4], strides = [1] : !flow.dispatch.tensor<readonly:4xi33> -> tensor<?xi33>
+ // CHECK: do_not_optimize(%[[TILE_I1]])
+ util.do_not_optimize(%tile) : tensor<?xi33>
+ return
+ }
+ }
+}
\ No newline at end of file
diff --git a/iree/compiler/Dialect/Util/Conversion/ConversionPatterns.cpp b/iree/compiler/Dialect/Util/Conversion/ConversionPatterns.cpp
index 029f0ee..039f4f3 100644
--- a/iree/compiler/Dialect/Util/Conversion/ConversionPatterns.cpp
+++ b/iree/compiler/Dialect/Util/Conversion/ConversionPatterns.cpp
@@ -26,7 +26,7 @@
void populateUtilConversionPatterns(MLIRContext *context,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<GenericConvertTypesPattern<IREE::Util::DoNotOptimizeOp>>(
typeConverter, context);
@@ -56,7 +56,7 @@
void populateUtilConversionPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
addGenericLegalOp<IREE::Util::DoNotOptimizeOp>(conversionTarget,
typeConverter);
addGenericLegalOp<IREE::Util::ListCreateOp>(conversionTarget, typeConverter);
diff --git a/iree/compiler/Dialect/Util/Conversion/ConversionPatterns.h b/iree/compiler/Dialect/Util/Conversion/ConversionPatterns.h
index db83bb9..aada228 100644
--- a/iree/compiler/Dialect/Util/Conversion/ConversionPatterns.h
+++ b/iree/compiler/Dialect/Util/Conversion/ConversionPatterns.h
@@ -54,11 +54,11 @@
// |typeConverter|.
void populateUtilConversionPatterns(MLIRContext *context,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
void populateUtilConversionPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/Util/IR/ClosureOpUtils.cpp b/iree/compiler/Dialect/Util/IR/ClosureOpUtils.cpp
index 8b41693..6336a75 100644
--- a/iree/compiler/Dialect/Util/IR/ClosureOpUtils.cpp
+++ b/iree/compiler/Dialect/Util/IR/ClosureOpUtils.cpp
@@ -137,7 +137,8 @@
// Smallish constants are worth moving inside.
auto shapedType = constantType.cast<ShapedType>();
uint64_t estimatedByteLength =
- (shapedType.getNumElements() * shapedType.getElementTypeBitWidth()) / 8;
+ shapedType.getNumElements() *
+ getRoundedElementByteWidth(shapedType.getElementType());
return denseAttr.isSplat() ||
estimatedByteLength <= kMaxInlinedConstantBytes;
} else if (constantType.isIntOrIndexOrFloat()) {
diff --git a/iree/compiler/Dialect/Util/IR/UtilOpFolders.cpp b/iree/compiler/Dialect/Util/IR/UtilOpFolders.cpp
index 8f5f633..1b74216 100644
--- a/iree/compiler/Dialect/Util/IR/UtilOpFolders.cpp
+++ b/iree/compiler/Dialect/Util/IR/UtilOpFolders.cpp
@@ -145,13 +145,13 @@
} // namespace
-void RangeMinOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void RangeMinOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<ExpandSimpleRangeOp<RangeMinOp, arith::MinUIOp>>(context);
results.insert<SimplifyUniformRangeOp<RangeMinOp, INT64_MAX, xmin>>(context);
}
-void RangeMaxOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void RangeMaxOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<ExpandSimpleRangeOp<RangeMaxOp, arith::MaxUIOp>>(context);
results.insert<SimplifyUniformRangeOp<RangeMaxOp, INT64_MIN, xmax>>(context);
@@ -299,8 +299,8 @@
} // namespace
-void RangeExtentsOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void RangeExtentsOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
// TODO(benvanik): extract ranges with common offsets or lengths and move them
// to min/max ops where they have a better chance of folding.
results.insert<FoldConstantRanges>(context);
@@ -407,7 +407,7 @@
} // namespace
void UnfoldableConstantOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<ExpandUnfoldableConstantOp>(context);
}
@@ -464,13 +464,13 @@
} // namespace
-void InitializerOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void InitializerOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<DropEmptyInitializerOp, InlineConstantGlobalInitializer>(
context);
}
-void GlobalOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void GlobalOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {}
namespace {
@@ -496,7 +496,7 @@
} // namespace
void GlobalLoadIndirectOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<PropagateGlobalLoadAddress>(context);
}
@@ -524,8 +524,8 @@
} // namespace
-void GlobalStoreOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void GlobalStoreOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<EraseUnusedGlobalStoreOp>(context);
}
@@ -552,7 +552,7 @@
} // namespace
void GlobalStoreIndirectOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<PropagateGlobalStoreAddress>(context);
}
diff --git a/iree/compiler/Dialect/Util/IR/UtilTypes.h b/iree/compiler/Dialect/Util/IR/UtilTypes.h
index 6527a37..f68318c 100644
--- a/iree/compiler/Dialect/Util/IR/UtilTypes.h
+++ b/iree/compiler/Dialect/Util/IR/UtilTypes.h
@@ -231,9 +231,12 @@
// getRoundedElementByteWidth(i23) = 4
// getRoundedElementByteWidth(i32) = 4
// getRoundedElementByteWidth(bf16) = 2
+// getRoundedElementByteWidth(i33) = 8
static inline int32_t getRoundedElementByteWidth(Type type) {
+ unsigned bitsUnaligned = type.getIntOrFloatBitWidth();
+ assert(bitsUnaligned > 0 && "0-width types unsupported");
// Round up to 8-bit aligned bytes.
- unsigned byteAligned = (type.getIntOrFloatBitWidth() + 8 - 1) / 8;
+ unsigned byteAligned = (bitsUnaligned + 8 - 1) / 8;
// Round up to the next power of two (unless already a power of two).
return llvm::PowerOf2Ceil(byteAligned);
}
diff --git a/iree/compiler/Dialect/Util/Transforms/ApplyPatterns.cpp b/iree/compiler/Dialect/Util/Transforms/ApplyPatterns.cpp
index 69d41cb..966724d 100644
--- a/iree/compiler/Dialect/Util/Transforms/ApplyPatterns.cpp
+++ b/iree/compiler/Dialect/Util/Transforms/ApplyPatterns.cpp
@@ -36,7 +36,7 @@
void runOnOperation() override {
auto *context = &getContext();
- OwningRewritePatternList patterns(context);
+ RewritePatternSet patterns(context);
for (auto *dialect : context->getLoadedDialects()) {
dialect->getCanonicalizationPatterns(patterns);
diff --git a/iree/compiler/Dialect/Util/Transforms/FoldGlobals.cpp b/iree/compiler/Dialect/Util/Transforms/FoldGlobals.cpp
index 2ee4692..6e03236 100644
--- a/iree/compiler/Dialect/Util/Transforms/FoldGlobals.cpp
+++ b/iree/compiler/Dialect/Util/Transforms/FoldGlobals.cpp
@@ -401,7 +401,7 @@
void runOnOperation() override {
auto *context = &getContext();
- OwningRewritePatternList patterns(context);
+ RewritePatternSet patterns(context);
for (auto *dialect : context->getLoadedDialects()) {
dialect->getCanonicalizationPatterns(patterns);
diff --git a/iree/compiler/Dialect/Util/Transforms/Patterns.cpp b/iree/compiler/Dialect/Util/Transforms/Patterns.cpp
index 0a493c3..42dbe1f 100644
--- a/iree/compiler/Dialect/Util/Transforms/Patterns.cpp
+++ b/iree/compiler/Dialect/Util/Transforms/Patterns.cpp
@@ -293,8 +293,7 @@
} // namespace
-void populateCommonPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns) {
+void populateCommonPatterns(MLIRContext *context, RewritePatternSet &patterns) {
context->getOrLoadDialect<IREE::Util::UtilDialect>()
->getCanonicalizationPatterns(patterns);
diff --git a/iree/compiler/Dialect/Util/Transforms/Patterns.h b/iree/compiler/Dialect/Util/Transforms/Patterns.h
index 26becd8..6669e36 100644
--- a/iree/compiler/Dialect/Util/Transforms/Patterns.h
+++ b/iree/compiler/Dialect/Util/Transforms/Patterns.h
@@ -17,8 +17,7 @@
// Populates |patterns| with some risky/IREE-specific canonicalization patterns.
// Some of these apply to other dialects (such as std/builtin) and could be
// upstreamed after some more exhaustive investigation.
-void populateCommonPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns);
+void populateCommonPatterns(MLIRContext *context, RewritePatternSet &patterns);
} // namespace Util
} // namespace IREE
diff --git a/iree/compiler/Dialect/VM/Analysis/RegisterAllocation.cpp b/iree/compiler/Dialect/VM/Analysis/RegisterAllocation.cpp
index 7302c60..bcd0370 100644
--- a/iree/compiler/Dialect/VM/Analysis/RegisterAllocation.cpp
+++ b/iree/compiler/Dialect/VM/Analysis/RegisterAllocation.cpp
@@ -115,7 +115,7 @@
rangeAvailable &= !intRegisters.test(ordinal);
}
if (rangeAvailable) {
- return ordinalStart;
+ return static_cast<int>(ordinalStart);
}
ordinalStart = intRegisters.find_next_unset(ordinalEnd);
}
@@ -124,7 +124,7 @@
Optional<Register> allocateRegister(Type type) {
if (type.isIntOrFloat()) {
- size_t byteWidth = type.getIntOrFloatBitWidth() / 8;
+ size_t byteWidth = IREE::Util::getRoundedElementByteWidth(type);
auto ordinalStartOr = findFirstUnsetIntOrdinalSpan(byteWidth);
if (!ordinalStartOr.hasValue()) {
return llvm::None;
diff --git a/iree/compiler/Dialect/VM/Analysis/RegisterAllocation.h b/iree/compiler/Dialect/VM/Analysis/RegisterAllocation.h
index 9e24814..8ae7eff 100644
--- a/iree/compiler/Dialect/VM/Analysis/RegisterAllocation.h
+++ b/iree/compiler/Dialect/VM/Analysis/RegisterAllocation.h
@@ -49,7 +49,7 @@
assert(type.getIntOrFloatBitWidth() % 8 == 0 &&
"require 8-bit aligned value types");
assert(ordinal < kInt32RegisterCount);
- size_t byteWidth = type.getIntOrFloatBitWidth() / 8;
+ size_t byteWidth = IREE::Util::getRoundedElementByteWidth(type);
return {/*isRef=*/false, /*isMove=*/false, byteWidth, ordinal};
}
diff --git a/iree/compiler/Dialect/VM/Conversion/ConversionDialectInterface.h b/iree/compiler/Dialect/VM/Conversion/ConversionDialectInterface.h
index d84a23e..465b199 100644
--- a/iree/compiler/Dialect/VM/Conversion/ConversionDialectInterface.h
+++ b/iree/compiler/Dialect/VM/Conversion/ConversionDialectInterface.h
@@ -41,7 +41,7 @@
// |importSymbols| contains all vm.imports that have been queried from all
// used dialects, not just this dialect.
virtual void populateVMConversionPatterns(
- SymbolTable &importSymbols, OwningRewritePatternList &patterns,
+ SymbolTable &importSymbols, RewritePatternSet &patterns,
TypeConverter &typeConverter) const = 0;
// Walks all child attributes defined within a custom dialect attribute.
diff --git a/iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.cpp b/iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.cpp
index 0f3fc00..1c6fa4b 100644
--- a/iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.cpp
@@ -84,7 +84,7 @@
void populateMathToVMPatterns(MLIRContext *context,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns
.insert<UnaryArithmeticOpConversion<math::AtanOp, IREE::VM::AtanF32Op,
IREE::VM::AtanF64Op>,
diff --git a/iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.h b/iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.h
index c29acbe..6bc6f9e 100644
--- a/iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.h
+++ b/iree/compiler/Dialect/VM/Conversion/MathToVM/ConvertMathToVM.h
@@ -16,7 +16,7 @@
// Appends math dialect to vm dialect patterns to the given pattern list.
void populateMathToVMPatterns(MLIRContext *context,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.cpp b/iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.cpp
index defc7c7..cadcaf5 100644
--- a/iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.cpp
@@ -223,7 +223,7 @@
void populateMemRefToVMPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
conversionTarget.addIllegalDialect<memref::MemRefDialect>();
typeConverter.addConversion([&](MemRefType type) -> llvm::Optional<Type> {
diff --git a/iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.h b/iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.h
index 47df742..084f7f7 100644
--- a/iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.h
+++ b/iree/compiler/Dialect/VM/Conversion/MemRefToVM/ConvertMemRefToVM.h
@@ -17,7 +17,7 @@
void populateMemRefToVMPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.cpp b/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.cpp
index e5bb31f..a3bca87 100644
--- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.cpp
@@ -824,7 +824,7 @@
void populateStandardToVMPatterns(MLIRContext *context,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<AssertOpConversion, BranchOpConversion, CallOpConversion,
CmpI32OpConversion, CmpI64OpConversion, CmpF32OpConversion,
CondBranchOpConversion, ModuleOpConversion, FuncOpConversion,
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.h b/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.h
index c2f5238..f4542a8 100644
--- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.h
+++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVM.h
@@ -16,7 +16,7 @@
// Appends standard dialect to vm dialect patterns to the given pattern list.
void populateStandardToVMPatterns(MLIRContext *context,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVMTest.cpp b/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVMTest.cpp
index dc755da..f0fe130 100644
--- a/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVMTest.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/StandardToVM/ConvertStandardToVMTest.cpp
@@ -42,7 +42,7 @@
IREE::VM::TypeConverter typeConverter(
IREE::VM::TargetOptions::FromFlags::get());
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
populateStandardToVMPatterns(&getContext(), typeConverter, patterns);
// NOTE: we allow other dialects besides just VM during this pass as we are
diff --git a/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertAlignmentOps.cpp b/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertAlignmentOps.cpp
index c4f83a6..52aadbe 100644
--- a/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertAlignmentOps.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertAlignmentOps.cpp
@@ -73,7 +73,7 @@
void populateUtilAlignmentToVMPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
conversionTarget.addIllegalOp<IREE::Util::AlignOp>();
patterns.insert<AlignOpConversion>(typeConverter, context);
diff --git a/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertGlobalOps.cpp b/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertGlobalOps.cpp
index b24cd34..3939b12 100644
--- a/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertGlobalOps.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertGlobalOps.cpp
@@ -278,7 +278,7 @@
void populateUtilGlobalToVMPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
conversionTarget.addIllegalOp<IREE::Util::InitializerOp,
IREE::Util::InitializerReturnOp>();
patterns.insert<InitializerOpConversion, InitializerReturnOpConversion>(
diff --git a/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertListOps.cpp b/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertListOps.cpp
index 3762842..6a2ff3e 100644
--- a/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertListOps.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertListOps.cpp
@@ -124,7 +124,7 @@
void populateUtilListToVMPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
typeConverter.addConversion(
[&typeConverter](IREE::Util::ListType type) -> Optional<Type> {
Type elementType;
diff --git a/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertStatusOps.cpp b/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertStatusOps.cpp
index 26f1b9c..28b7da4 100644
--- a/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertStatusOps.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertStatusOps.cpp
@@ -30,7 +30,7 @@
void populateUtilStatusToVMPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
conversionTarget.addIllegalOp<IREE::Util::StatusCheckOkOp>();
patterns.insert<StatusCheckOkOpConversion>(context, typeConverter);
}
diff --git a/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertUtilToVM.cpp b/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertUtilToVM.cpp
index 2e1d0fa..9bab556 100644
--- a/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertUtilToVM.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertUtilToVM.cpp
@@ -23,19 +23,19 @@
void populateUtilGlobalToVMPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
void populateUtilListToVMPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
void populateUtilStatusToVMPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
void populateUtilAlignmentToVMPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
namespace {
@@ -117,7 +117,7 @@
void populateUtilToVMPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<NullOpConversion>(typeConverter, context);
patterns.insert<CmpEQOpConversion>(typeConverter, context);
patterns.insert<ByteBufferConstantOpConversion>(typeConverter, context);
diff --git a/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertUtilToVM.h b/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertUtilToVM.h
index df82ffc..a6cdd40 100644
--- a/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertUtilToVM.h
+++ b/iree/compiler/Dialect/VM/Conversion/UtilToVM/ConvertUtilToVM.h
@@ -17,7 +17,7 @@
void populateUtilToVMPatterns(MLIRContext *context,
ConversionTarget &conversionTarget,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace iree_compiler
} // namespace mlir
diff --git a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp
index dd187b8..60ed16d 100644
--- a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp
+++ b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.cpp
@@ -4587,7 +4587,7 @@
void populateVMToEmitCPatterns(ConversionTarget &conversionTarget,
IREE::VM::EmitCTypeConverter &typeConverter,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
SmallVector<Operation *> &visitedExports,
SmallVector<std::string> &importShims) {
auto context = patterns.getContext();
@@ -4973,7 +4973,7 @@
SmallVector<Operation *> visitedExports;
SmallVector<std::string> importShims;
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
populateVMToEmitCPatterns(target, typeConverter, patterns, visitedExports,
importShims);
diff --git a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.h b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.h
index 60eb440..67b0ac9 100644
--- a/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.h
+++ b/iree/compiler/Dialect/VM/Conversion/VMToEmitC/ConvertVMToEmitC.h
@@ -17,7 +17,7 @@
void populateVMToEmitCPatterns(ConversionTarget &conversionTarget,
IREE::VM::EmitCTypeConverter &typeConverter,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
SmallVector<Operation *> &visitedExports,
SmallVector<std::string> &importShims);
diff --git a/iree/compiler/Dialect/VM/IR/VMBase.td b/iree/compiler/Dialect/VM/IR/VMBase.td
index b58482a..466fdf2 100644
--- a/iree/compiler/Dialect/VM/IR/VMBase.td
+++ b/iree/compiler/Dialect/VM/IR/VMBase.td
@@ -214,7 +214,7 @@
auto storageType = $_self.getStorageType();
assert(storageType.isIntOrFloat());
assert(storageType.getIntOrFloatBitWidth() % 8 == 0);
- return storageType.getIntOrFloatBitWidth() / 8;
+ return IREE::Util::getRoundedElementByteWidth(storageType);
}]>,
InterfaceMethod<[{}], "StringRef", "getSymbolName", (ins)>,
InterfaceMethod<[{}], "bool", "isMutable", (ins)>,
diff --git a/iree/compiler/Dialect/VM/IR/VMDialect.h b/iree/compiler/Dialect/VM/IR/VMDialect.h
index 1eafa51..1c60296 100644
--- a/iree/compiler/Dialect/VM/IR/VMDialect.h
+++ b/iree/compiler/Dialect/VM/IR/VMDialect.h
@@ -7,6 +7,7 @@
#ifndef IREE_COMPILER_DIALECT_VM_IR_VMDIALECT_H_
#define IREE_COMPILER_DIALECT_VM_IR_VMDIALECT_H_
+#include "iree/compiler/Dialect/Util/IR/UtilTypes.h"
#include "iree/compiler/Dialect/VM/IR/VMFuncEncoder.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
diff --git a/iree/compiler/Dialect/VM/IR/VMOpFolders.cpp b/iree/compiler/Dialect/VM/IR/VMOpFolders.cpp
index edf6647..e12afd9 100644
--- a/iree/compiler/Dialect/VM/IR/VMOpFolders.cpp
+++ b/iree/compiler/Dialect/VM/IR/VMOpFolders.cpp
@@ -119,8 +119,8 @@
} // namespace
-void InitializerOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void InitializerOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<DropEmptyInitializerOp, InlineConstGlobalInitializer>(context);
}
@@ -153,27 +153,27 @@
} // namespace
-void GlobalI32Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void GlobalI32Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<DropDefaultConstGlobalOpInitializer<GlobalI32Op>>(context);
}
-void GlobalI64Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void GlobalI64Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<DropDefaultConstGlobalOpInitializer<GlobalI64Op>>(context);
}
-void GlobalF32Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void GlobalF32Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<DropDefaultConstGlobalOpInitializer<GlobalF32Op>>(context);
}
-void GlobalF64Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void GlobalF64Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<DropDefaultConstGlobalOpInitializer<GlobalF64Op>>(context);
}
-void GlobalRefOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void GlobalRefOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {}
namespace {
@@ -203,29 +203,29 @@
} // namespace
-void GlobalLoadI32Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void GlobalLoadI32Op::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<InlineConstGlobalLoadIntegerOp<GlobalLoadI32Op, GlobalI32Op,
ConstI32Op, ConstI32ZeroOp>>(
context);
}
-void GlobalLoadI64Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void GlobalLoadI64Op::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<InlineConstGlobalLoadIntegerOp<GlobalLoadI64Op, GlobalI64Op,
ConstI64Op, ConstI64ZeroOp>>(
context);
}
-void GlobalLoadF32Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void GlobalLoadF32Op::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<InlineConstGlobalLoadIntegerOp<GlobalLoadF32Op, GlobalF32Op,
ConstF32Op, ConstF32ZeroOp>>(
context);
}
-void GlobalLoadF64Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void GlobalLoadF64Op::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<InlineConstGlobalLoadIntegerOp<GlobalLoadF64Op, GlobalF64Op,
ConstF64Op, ConstF64ZeroOp>>(
context);
@@ -251,8 +251,8 @@
} // namespace
-void GlobalLoadRefOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void GlobalLoadRefOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<InlineConstGlobalLoadRefOp>(context);
}
@@ -277,35 +277,35 @@
} // namespace
void GlobalLoadIndirectI32Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<
PropagateGlobalLoadAddress<GlobalLoadIndirectI32Op, GlobalLoadI32Op>>(
context);
}
void GlobalLoadIndirectI64Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<
PropagateGlobalLoadAddress<GlobalLoadIndirectI64Op, GlobalLoadI64Op>>(
context);
}
void GlobalLoadIndirectF32Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<
PropagateGlobalLoadAddress<GlobalLoadIndirectF32Op, GlobalLoadF32Op>>(
context);
}
void GlobalLoadIndirectF64Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<
PropagateGlobalLoadAddress<GlobalLoadIndirectF64Op, GlobalLoadF64Op>>(
context);
}
void GlobalLoadIndirectRefOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<
PropagateGlobalLoadAddress<GlobalLoadIndirectRefOp, GlobalLoadRefOp>>(
context);
@@ -331,35 +331,35 @@
} // namespace
void GlobalStoreIndirectI32Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<
PropagateGlobalStoreAddress<GlobalStoreIndirectI32Op, GlobalStoreI32Op>>(
context);
}
void GlobalStoreIndirectI64Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<
PropagateGlobalStoreAddress<GlobalStoreIndirectI64Op, GlobalStoreI64Op>>(
context);
}
void GlobalStoreIndirectF32Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<
PropagateGlobalStoreAddress<GlobalStoreIndirectF32Op, GlobalStoreF32Op>>(
context);
}
void GlobalStoreIndirectF64Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<
PropagateGlobalStoreAddress<GlobalStoreIndirectF64Op, GlobalStoreF64Op>>(
context);
}
void GlobalStoreIndirectRefOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+ RewritePatternSet &results, MLIRContext *context) {
results.insert<
PropagateGlobalStoreAddress<GlobalStoreIndirectRefOp, GlobalStoreRefOp>>(
context);
@@ -389,28 +389,28 @@
OpFoldResult ConstI32Op::fold(ArrayRef<Attribute> operands) { return value(); }
-void ConstI32Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void ConstI32Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<FoldZeroConstPrimitive<ConstI32Op, ConstI32ZeroOp>>(context);
}
OpFoldResult ConstI64Op::fold(ArrayRef<Attribute> operands) { return value(); }
-void ConstI64Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void ConstI64Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<FoldZeroConstPrimitive<ConstI64Op, ConstI64ZeroOp>>(context);
}
OpFoldResult ConstF32Op::fold(ArrayRef<Attribute> operands) { return value(); }
-void ConstF32Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void ConstF32Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<FoldZeroConstPrimitive<ConstF32Op, ConstF32ZeroOp>>(context);
}
OpFoldResult ConstF64Op::fold(ArrayRef<Attribute> operands) { return value(); }
-void ConstF64Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void ConstF64Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<FoldZeroConstPrimitive<ConstF64Op, ConstF64ZeroOp>>(context);
}
@@ -770,7 +770,7 @@
return foldMulOp<IntegerAttr>(*this, operands);
}
-void MulI32Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void MulI32Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<FoldConstantMulOperand<IntegerAttr, MulI32Op, ConstI32Op>>(
context);
@@ -780,7 +780,7 @@
return foldMulOp<IntegerAttr>(*this, operands);
}
-void MulI64Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void MulI64Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<FoldConstantMulOperand<IntegerAttr, MulI64Op, ConstI64Op>>(
context);
@@ -932,12 +932,12 @@
return foldFMAOp(*this, operands);
}
-void FMAI32Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void FMAI32Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<CanonicalizeFMA<FMAI32Op, MulI32Op, AddI32Op>>(context);
}
-void FMAI64Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void FMAI64Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<CanonicalizeFMA<FMAI64Op, MulI64Op, AddI64Op>>(context);
}
@@ -966,7 +966,7 @@
return foldMulOp<FloatAttr>(*this, operands);
}
-void MulF32Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void MulF32Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<FoldConstantMulOperand<FloatAttr, MulF32Op, ConstF32Op>>(
context);
@@ -976,7 +976,7 @@
return foldMulOp<FloatAttr>(*this, operands);
}
-void MulF64Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void MulF64Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<FoldConstantMulOperand<FloatAttr, MulF64Op, ConstF64Op>>(
context);
@@ -1061,12 +1061,12 @@
return foldFMAFOp(*this, operands);
}
-void FMAF32Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void FMAF32Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<CanonicalizeFMA<FMAF32Op, MulF32Op, AddF32Op>>(context);
}
-void FMAF64Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void FMAF64Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<CanonicalizeFMA<FMAF64Op, MulF64Op, AddF64Op>>(context);
}
@@ -1440,38 +1440,38 @@
} // namespace
-void TruncI64I8Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TruncI64I8Op::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<PseudoIntegerConversionToSplitConversionOp<
TruncI64I8Op, TruncI64I32Op, 32, TruncI32I8Op>>(context);
}
-void TruncI64I16Op::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void TruncI64I16Op::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<PseudoIntegerConversionToSplitConversionOp<
TruncI64I16Op, TruncI64I32Op, 32, TruncI32I16Op>>(context);
}
-void ExtI8I64SOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void ExtI8I64SOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<PseudoIntegerConversionToSplitConversionOp<
ExtI8I64SOp, ExtI8I32SOp, 32, ExtI32I64SOp>>(context);
}
-void ExtI8I64UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void ExtI8I64UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<PseudoIntegerConversionToSplitConversionOp<
ExtI8I64UOp, ExtI8I32UOp, 32, ExtI32I64UOp>>(context);
}
-void ExtI16I64SOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ExtI16I64SOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<PseudoIntegerConversionToSplitConversionOp<
ExtI16I64SOp, ExtI16I32SOp, 32, ExtI32I64SOp>>(context);
}
-void ExtI16I64UOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void ExtI16I64UOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<PseudoIntegerConversionToSplitConversionOp<
ExtI16I64UOp, ExtI16I32UOp, 32, ExtI32I64UOp>>(context);
}
@@ -1604,7 +1604,7 @@
return foldCmpEQOp(*this, operands);
}
-void CmpEQI32Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpEQI32Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpEQI32Op, CmpNEI32Op>>(context);
}
@@ -1613,7 +1613,7 @@
return foldCmpEQOp(*this, operands);
}
-void CmpEQI64Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpEQI64Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpEQI64Op, CmpNEI64Op>>(context);
}
@@ -1654,13 +1654,13 @@
} // namespace
-void CmpNEI32Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpNEI32Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpNEI32Op, CmpEQI32Op>,
CmpNEZeroToCmpNZ<CmpNEI32Op, CmpNZI32Op>>(context);
}
-void CmpNEI64Op::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpNEI64Op::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpNEI64Op, CmpEQI64Op>,
CmpNEZeroToCmpNZ<CmpNEI64Op, CmpNZI64Op>>(context);
@@ -1684,10 +1684,10 @@
return foldCmpLTSOp(*this, operands);
}
-void CmpLTI32SOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpLTI32SOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {}
-void CmpLTI64SOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpLTI64SOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {}
template <typename T>
@@ -1708,10 +1708,10 @@
return foldCmpLTUOp(*this, operands);
}
-void CmpLTI32UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpLTI32UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {}
-void CmpLTI64UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpLTI64UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {}
namespace {
@@ -1752,14 +1752,14 @@
return foldCmpLTESOp(*this, operands);
}
-void CmpLTEI32SOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmpLTEI32SOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpLTEI32SOp, CmpGTI32SOp>>(context);
results.insert<RewritePseudoCmpLTEToLT<CmpLTEI32SOp, CmpLTI32SOp>>(context);
}
-void CmpLTEI64SOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmpLTEI64SOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpLTEI64SOp, CmpGTI64SOp>>(context);
results.insert<RewritePseudoCmpLTEToLT<CmpLTEI64SOp, CmpLTI64SOp>>(context);
}
@@ -1782,14 +1782,14 @@
return foldCmpLTEUOp(*this, operands);
}
-void CmpLTEI32UOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmpLTEI32UOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpLTEI32UOp, CmpGTI32UOp>>(context);
results.insert<RewritePseudoCmpLTEToLT<CmpLTEI32UOp, CmpLTI32UOp>>(context);
}
-void CmpLTEI64UOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmpLTEI64UOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpLTEI64UOp, CmpGTI64UOp>>(context);
results.insert<RewritePseudoCmpLTEToLT<CmpLTEI64UOp, CmpLTI64UOp>>(context);
}
@@ -1828,13 +1828,13 @@
return foldCmpGTSOp(*this, operands);
}
-void CmpGTI32SOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpGTI32SOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTI32SOp, CmpLTEI32SOp>>(context);
results.insert<RewritePseudoCmpGTToLT<CmpGTI32SOp, CmpLTI32SOp>>(context);
}
-void CmpGTI64SOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpGTI64SOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTI64SOp, CmpLTEI64SOp>>(context);
results.insert<RewritePseudoCmpGTToLT<CmpGTI64SOp, CmpLTI64SOp>>(context);
@@ -1858,13 +1858,13 @@
return foldCmpGTUOp(*this, operands);
}
-void CmpGTI32UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpGTI32UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTI32UOp, CmpLTEI32UOp>>(context);
results.insert<RewritePseudoCmpGTToLT<CmpGTI32UOp, CmpLTI32UOp>>(context);
}
-void CmpGTI64UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpGTI64UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTI64UOp, CmpLTEI64UOp>>(context);
results.insert<RewritePseudoCmpGTToLT<CmpGTI64UOp, CmpLTI64UOp>>(context);
@@ -1908,14 +1908,14 @@
return foldCmpGTESOp(*this, operands);
}
-void CmpGTEI32SOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmpGTEI32SOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTEI32SOp, CmpLTI32SOp>>(context);
results.insert<RewritePseudoCmpGTEToLT<CmpGTEI32SOp, CmpLTI32SOp>>(context);
}
-void CmpGTEI64SOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmpGTEI64SOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTEI64SOp, CmpLTI64SOp>>(context);
results.insert<RewritePseudoCmpGTEToLT<CmpGTEI64SOp, CmpLTI64SOp>>(context);
}
@@ -1938,14 +1938,14 @@
return foldCmpGTEUOp(*this, operands);
}
-void CmpGTEI32UOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmpGTEI32UOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTEI32UOp, CmpLTI32UOp>>(context);
results.insert<RewritePseudoCmpGTEToLT<CmpGTEI32UOp, CmpLTI32UOp>>(context);
}
-void CmpGTEI64UOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmpGTEI64UOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTEI64UOp, CmpLTI64UOp>>(context);
results.insert<RewritePseudoCmpGTEToLT<CmpGTEI64UOp, CmpLTI64UOp>>(context);
}
@@ -2002,22 +2002,22 @@
return foldCmpEQFOp<UNORDERED>(*this, operands);
}
-void CmpEQF32OOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpEQF32OOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpEQF32OOp, CmpNEF32OOp>>(context);
}
-void CmpEQF64OOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpEQF64OOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpEQF64OOp, CmpNEF64OOp>>(context);
}
-void CmpEQF32UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpEQF32UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpEQF32UOp, CmpNEF32UOp>>(context);
}
-void CmpEQF64UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpEQF64UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpEQF64UOp, CmpNEF64UOp>>(context);
}
@@ -2055,22 +2055,22 @@
return foldCmpNEFOp<UNORDERED>(*this, operands);
}
-void CmpNEF32OOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpNEF32OOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpNEF32OOp, CmpEQF32OOp>>(context);
}
-void CmpNEF64OOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpNEF64OOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpNEF64OOp, CmpEQF64OOp>>(context);
}
-void CmpNEF32UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpNEF32UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpNEF32UOp, CmpEQF32UOp>>(context);
}
-void CmpNEF64UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpNEF64UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpNEF64UOp, CmpEQF64UOp>>(context);
}
@@ -2108,16 +2108,16 @@
return foldCmpLTFOp<UNORDERED>(*this, operands);
}
-void CmpLTF32OOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpLTF32OOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {}
-void CmpLTF64OOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpLTF64OOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {}
-void CmpLTF32UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpLTF32UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {}
-void CmpLTF64UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpLTF64UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {}
template <CmpFOrdering ordering, typename T>
@@ -2188,25 +2188,25 @@
return foldCmpGTFOp<UNORDERED>(*this, operands);
}
-void CmpGTF32OOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpGTF32OOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTF32OOp, CmpLTEF32OOp>>(context);
results.insert<RewritePseudoCmpGTToLT<CmpGTF32OOp, CmpLTF32OOp>>(context);
}
-void CmpGTF64OOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpGTF64OOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTF64OOp, CmpLTEF64OOp>>(context);
results.insert<RewritePseudoCmpGTToLT<CmpGTF64OOp, CmpLTF64OOp>>(context);
}
-void CmpGTF32UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpGTF32UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTF32UOp, CmpLTEF32UOp>>(context);
results.insert<RewritePseudoCmpGTToLT<CmpGTF32UOp, CmpLTF32UOp>>(context);
}
-void CmpGTF64UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpGTF64UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTF64UOp, CmpLTEF64UOp>>(context);
results.insert<RewritePseudoCmpGTToLT<CmpGTF64UOp, CmpLTF64UOp>>(context);
@@ -2246,26 +2246,26 @@
return foldCmpGTEFOp<UNORDERED>(*this, operands);
}
-void CmpGTEF32OOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmpGTEF32OOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTEF32OOp, CmpLTF32OOp>>(context);
results.insert<RewritePseudoCmpGTEToLT<CmpGTEF32OOp, CmpLTF32OOp>>(context);
}
-void CmpGTEF64OOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmpGTEF64OOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTEF64OOp, CmpLTF64OOp>>(context);
results.insert<RewritePseudoCmpGTEToLT<CmpGTEF64OOp, CmpLTF64OOp>>(context);
}
-void CmpGTEF32UOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmpGTEF32UOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTEF32UOp, CmpLTF32UOp>>(context);
results.insert<RewritePseudoCmpGTEToLT<CmpGTEF32UOp, CmpLTF32UOp>>(context);
}
-void CmpGTEF64UOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CmpGTEF64UOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SwapInvertedCmpOps<CmpGTEF64UOp, CmpLTF64UOp>>(context);
results.insert<RewritePseudoCmpGTEToLT<CmpGTEF64UOp, CmpLTF64UOp>>(context);
}
@@ -2308,28 +2308,28 @@
});
}
-void CmpNZF32OOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpNZF32OOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results
.insert<RewritePseudoCmpNZToNE<CmpNZF32OOp, CmpNEF32OOp, ConstF32ZeroOp>>(
context);
}
-void CmpNZF64OOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpNZF64OOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results
.insert<RewritePseudoCmpNZToNE<CmpNZF64OOp, CmpNEF64OOp, ConstF64ZeroOp>>(
context);
}
-void CmpNZF32UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpNZF32UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results
.insert<RewritePseudoCmpNZToNE<CmpNZF32UOp, CmpNEF32UOp, ConstF32ZeroOp>>(
context);
}
-void CmpNZF64UOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpNZF64UOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results
.insert<RewritePseudoCmpNZToNE<CmpNZF64UOp, CmpNEF64UOp, ConstF64ZeroOp>>(
@@ -2389,7 +2389,7 @@
} // namespace
-void CmpEQRefOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpEQRefOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<NullCheckCmpEQRefToCmpNZRef>(context);
}
@@ -2420,7 +2420,7 @@
} // namespace
-void CmpNERefOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CmpNERefOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<NullCheckCmpNERefToCmpNZRef>(context);
}
@@ -2542,7 +2542,7 @@
} // namespace
-void BranchOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void BranchOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SimplifyBrToBlockWithSinglePred, SimplifyPassThroughBr>(
context);
@@ -2620,8 +2620,8 @@
} // namespace
-void CondBranchOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CondBranchOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<SimplifyConstCondBranchPred, SimplifySameTargetCondBranchOp,
SwapInvertedCondBranchOpTargets>(context);
}
@@ -2665,7 +2665,7 @@
} // namespace
-void CallOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CallOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<EraseUnusedCallOp<CallOp>>(context);
}
@@ -2692,8 +2692,8 @@
} // namespace
-void CallVariadicOp::getCanonicalizationPatterns(
- OwningRewritePatternList &results, MLIRContext *context) {
+void CallVariadicOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
results.insert<EraseUnusedCallOp<CallVariadicOp>, ConvertNonVariadicToCallOp>(
context);
}
@@ -2732,7 +2732,7 @@
} // namespace
-void CondFailOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CondFailOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<RewriteCondFailToBranchFail>(context);
}
@@ -2785,21 +2785,21 @@
} // namespace
-void CheckEQOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CheckEQOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<RewriteCheckToCondFail<CheckEQOp, CmpEQI32Op, CmpEQI64Op,
CmpEQF32OOp, CmpEQF64OOp, CmpEQRefOp>>(
context);
}
-void CheckNEOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CheckNEOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<RewriteCheckToCondFail<CheckNEOp, CmpNEI32Op, CmpNEI64Op,
CmpNEF32OOp, CmpNEF64OOp, CmpNERefOp>>(
context);
}
-void CheckNZOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CheckNZOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<RewriteCheckToCondFail<CheckNZOp, CmpNZI32Op, CmpNZI64Op,
CmpNZF32OOp, CmpNZF64OOp, CmpNZRefOp>>(
@@ -2859,22 +2859,22 @@
} // namespace
-void TraceOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void TraceOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<RemoveDisabledDebugOp<TraceOp>>(context);
}
-void PrintOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void PrintOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<RemoveDisabledDebugOp<PrintOp>>(context);
}
-void BreakOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void BreakOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<RemoveDisabledDebugAsyncOp<BreakOp>>(context);
}
-void CondBreakOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
+void CondBreakOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<RemoveDisabledDebugAsyncOp<CondBreakOp>,
SimplifyConstCondBreakPred>(context);
diff --git a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp
index 0f2cc96..a6b7244 100644
--- a/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp
+++ b/iree/compiler/Dialect/VM/Target/Bytecode/BytecodeModuleTarget.cpp
@@ -379,7 +379,7 @@
// required transformations (such as debug op stripping).
static LogicalResult canonicalizeModule(BytecodeTargetOptions targetOptions,
IREE::VM::ModuleOp moduleOp) {
- OwningRewritePatternList patterns(moduleOp.getContext());
+ RewritePatternSet patterns(moduleOp.getContext());
ConversionTarget target(*moduleOp.getContext());
target.addLegalDialect<IREE::VM::VMDialect>();
target.addLegalOp<IREE::Util::DoNotOptimizeOp>();
diff --git a/iree/compiler/Dialect/VM/Target/C/CModuleTarget.cpp b/iree/compiler/Dialect/VM/Target/C/CModuleTarget.cpp
index 52123b0..135a847 100644
--- a/iree/compiler/Dialect/VM/Target/C/CModuleTarget.cpp
+++ b/iree/compiler/Dialect/VM/Target/C/CModuleTarget.cpp
@@ -255,7 +255,7 @@
/// Adapted from BytecodeModuleTarget and extended by C specific passes
static LogicalResult canonicalizeModule(
IREE::VM::ModuleOp moduleOp, IREE::VM::CTargetOptions targetOptions) {
- OwningRewritePatternList patterns(moduleOp.getContext());
+ RewritePatternSet patterns(moduleOp.getContext());
ConversionTarget target(*moduleOp.getContext());
target.addLegalDialect<IREE::VM::VMDialect>();
target.addLegalOp<IREE::Util::DoNotOptimizeOp>();
diff --git a/iree/compiler/Dialect/VM/Transforms/Conversion.cpp b/iree/compiler/Dialect/VM/Transforms/Conversion.cpp
index cc55193..6a51411 100644
--- a/iree/compiler/Dialect/VM/Transforms/Conversion.cpp
+++ b/iree/compiler/Dialect/VM/Transforms/Conversion.cpp
@@ -115,7 +115,7 @@
}
}
- OwningRewritePatternList conversionPatterns(&getContext());
+ RewritePatternSet conversionPatterns(&getContext());
populateUtilConversionPatterns(context, conversionTarget, typeConverter,
conversionPatterns);
populateUtilToVMPatterns(context, conversionTarget, typeConverter,
diff --git a/iree/compiler/InputConversion/MHLO/BroadcastingToLinalgPatterns.cpp b/iree/compiler/InputConversion/MHLO/BroadcastingToLinalgPatterns.cpp
index cfc2dea..22a1bd2 100644
--- a/iree/compiler/InputConversion/MHLO/BroadcastingToLinalgPatterns.cpp
+++ b/iree/compiler/InputConversion/MHLO/BroadcastingToLinalgPatterns.cpp
@@ -756,7 +756,7 @@
void mlir::iree_compiler::MHLO::populateMHLOBroadcastingToLinalgPatterns(
MLIRContext *context, TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
#define POPULATE_SIMPLE_BCAST(ChloOp, HloOp) \
patterns.insert<ConvertTrivialNonBroadcastBinaryOp>( \
context, typeConverter, 10, \
diff --git a/iree/compiler/InputConversion/MHLO/ConvertComplexToReal.cpp b/iree/compiler/InputConversion/MHLO/ConvertComplexToReal.cpp
index c6c4d1c..8c7c7cd 100644
--- a/iree/compiler/InputConversion/MHLO/ConvertComplexToReal.cpp
+++ b/iree/compiler/InputConversion/MHLO/ConvertComplexToReal.cpp
@@ -308,7 +308,7 @@
void populateMHLOComplexToRealPatterns(MLIRContext *context,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
// Add an subtract patterns.
patterns.insert<ConvertAddSubOp<mhlo::AddOp>>(typeConverter, context);
patterns.insert<ConvertAddSubOp<mhlo::SubOp>>(typeConverter, context);
@@ -361,7 +361,7 @@
}
void runOnOperation() override {
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
MLIRContext *context = &getContext();
TypeConverter typeConverter;
typeConverter.addConversion([](Type t) { return t; });
diff --git a/iree/compiler/InputConversion/MHLO/ConvertMHLOToFlow.cpp b/iree/compiler/InputConversion/MHLO/ConvertMHLOToFlow.cpp
index 8aec9b1..1d7995f 100644
--- a/iree/compiler/InputConversion/MHLO/ConvertMHLOToFlow.cpp
+++ b/iree/compiler/InputConversion/MHLO/ConvertMHLOToFlow.cpp
@@ -40,7 +40,7 @@
}
void populateMHLOToFlowPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
patterns.insert<ConstOpLowering>(context);
}
diff --git a/iree/compiler/InputConversion/MHLO/ConvertMHLOToFlow.h b/iree/compiler/InputConversion/MHLO/ConvertMHLOToFlow.h
index 17f914b..d81d5b2 100644
--- a/iree/compiler/InputConversion/MHLO/ConvertMHLOToFlow.h
+++ b/iree/compiler/InputConversion/MHLO/ConvertMHLOToFlow.h
@@ -23,7 +23,7 @@
// Appends all patterns for converting MHLO ops to flow ops.
void populateMHLOToFlowPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace MHLO
} // namespace iree_compiler
diff --git a/iree/compiler/InputConversion/MHLO/ConvertMHLOToLinalgExt.cpp b/iree/compiler/InputConversion/MHLO/ConvertMHLOToLinalgExt.cpp
index dabfa9b..fe001d0 100644
--- a/iree/compiler/InputConversion/MHLO/ConvertMHLOToLinalgExt.cpp
+++ b/iree/compiler/InputConversion/MHLO/ConvertMHLOToLinalgExt.cpp
@@ -47,39 +47,33 @@
return elementType;
}
-static Optional<Value> materializeCastToSignless(OpBuilder &builder,
- IntegerType toType,
- ValueRange inputs,
- Location loc) {
- assert(inputs.size() == 1 && "too many inputs to type conversion");
- Value fromValue = inputs[0];
- auto fromType = fromValue.getType();
- if (fromType.isSignlessInteger() || !toType.isSignlessInteger())
- return llvm::None;
- // Use unrealized conversion casts to do signful->signless conversion.
- return builder.create<UnrealizedConversionCastOp>(loc, toType, fromValue)
- ->getResult(0);
+static Type convertShapedToSignless(ShapedType shapedType) {
+ if (auto intType = shapedType.getElementType().dyn_cast<IntegerType>())
+ return shapedType.clone(convertIntegerToSignless(intType));
+ return shapedType;
}
-static Optional<Value> materializeCastToScalar(OpBuilder &builder, Type toType,
- ValueRange inputs,
- Location loc) {
+static Optional<Value> materializeCast(OpBuilder &builder, Type toType,
+ ValueRange inputs, Location loc) {
assert(inputs.size() == 1 && "too many inputs to type conversion");
Value fromValue = inputs[0];
auto fromType = fromValue.getType().dyn_cast<RankedTensorType>();
- if (!fromType || fromType.getRank() != 0) return llvm::None;
+ if (!fromType) return llvm::None;
if (auto intFromType = fromType.getElementType().dyn_cast<IntegerType>()) {
- if (!intFromType.isSignlessInteger()) {
- if (!toType.isSignlessInteger()) return llvm::None;
- fromType = fromType.clone(toType).cast<RankedTensorType>();
+ Type castType = getElementTypeOrSelf(toType);
+ if (auto shapedType = fromType.dyn_cast<ShapedType>())
+ castType = shapedType.clone(castType);
+
+ if (castType != fromType)
fromValue =
- builder.create<UnrealizedConversionCastOp>(loc, fromType, fromValue)
+ builder.create<UnrealizedConversionCastOp>(loc, castType, fromValue)
->getResult(0);
- }
}
- Type extractType = fromType.getElementType();
+ if (fromType.getRank() != 0) return fromValue;
+
+ Type extractType = getElementTypeOrSelf(toType);
return builder.createOrFold<tensor::ExtractOp>(loc, extractType, fromValue);
}
@@ -90,11 +84,13 @@
MhloToStdTypeConverter() {
addConversion([](Type type) { return type; });
+ addConversion(convertShapedToSignless);
addConversion(convertRank0TensorToScalar);
addConversion(convertIntegerToSignless);
- addArgumentMaterialization(materializeCastToScalar);
- addTargetMaterialization(materializeCastToScalar);
+ addArgumentMaterialization(materializeCast);
+ addSourceMaterialization(materializeCast);
+ addTargetMaterialization(materializeCast);
}
};
@@ -163,8 +159,16 @@
LogicalResult matchAndRewrite(
mhlo::SortOp mhloSortOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
+ Location loc = mhloSortOp.getLoc();
+
+ llvm::SmallVector<Type> resultTypes;
+ if (this->typeConverter
+ ->convertTypes(mhloSortOp.getResultTypes(), resultTypes)
+ .failed()) {
+ return failure();
+ };
auto sortOp = rewriter.create<IREE::LinalgExt::SortOp>(
- mhloSortOp.getLoc(), mhloSortOp.getResultTypes(),
+ loc, resultTypes,
/*inputs=*/ValueRange{}, adaptor.getOperands(),
mhloSortOp.dimensionAttr());
rewriter.inlineRegionBefore(mhloSortOp.comparator(), sortOp.region(),
@@ -174,8 +178,9 @@
TypeConverter::SignatureConversion signature_converter(
block.getNumArguments());
for (auto en : llvm::enumerate(block.getArguments())) {
- signature_converter.addInputs(en.index(),
- getElementTypeOrSelf(en.value().getType()));
+ signature_converter.addInputs(
+ en.index(), this->typeConverter->convertType(
+ getElementTypeOrSelf(en.value().getType())));
}
rewriter.applySignatureConversion(®ion, signature_converter);
@@ -487,7 +492,7 @@
}
void runOnOperation() override {
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
MLIRContext *context = &getContext();
MhloToStdTypeConverter typeConverter;
diff --git a/iree/compiler/InputConversion/MHLO/MHLOToLinalgOnTensors.cpp b/iree/compiler/InputConversion/MHLO/MHLOToLinalgOnTensors.cpp
index 5f6dd9b..70908ed 100644
--- a/iree/compiler/InputConversion/MHLO/MHLOToLinalgOnTensors.cpp
+++ b/iree/compiler/InputConversion/MHLO/MHLOToLinalgOnTensors.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include <memory>
+#include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtDialect.h"
#include "iree/compiler/Dialect/Flow/IR/FlowOps.h"
#include "iree/compiler/InputConversion/MHLO/ConvertMHLOToFlow.h"
#include "iree/compiler/InputConversion/MHLO/PassDetail.h"
@@ -303,7 +304,7 @@
}
void runOnOperation() override {
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
MLIRContext *context = &getContext();
auto typeConverter = mhlo::createHloToLinalgSignedIntegerConverter();
@@ -362,6 +363,7 @@
// Let the rest fall through.
target.addLegalDialect<BuiltinDialect>();
+ target.addLegalDialect<IREE::LinalgExt::IREELinalgExtDialect>();
target.markUnknownOpDynamicallyLegal(isLegallyTypedOp);
if (failed(applyPartialConversion(getOperation(), target,
@@ -375,7 +377,7 @@
void populateMHLOToLinalgOnTensorsConversionPatterns(
MLIRContext *context, TypeConverter &typeConverter,
- OwningRewritePatternList &patterns) {
+ RewritePatternSet &patterns) {
mhlo::populateHLOToLinalgConversionPattern(context, typeConverter, &patterns);
// TODO(#5809): Drop ConcatenateOp lowering in favor of the upstream version
// then remove the PatternBenefit here
diff --git a/iree/compiler/InputConversion/MHLO/MHLOToMHLOPreprocessing.cpp b/iree/compiler/InputConversion/MHLO/MHLOToMHLOPreprocessing.cpp
index 8e457eb..a7516cf 100644
--- a/iree/compiler/InputConversion/MHLO/MHLOToMHLOPreprocessing.cpp
+++ b/iree/compiler/InputConversion/MHLO/MHLOToMHLOPreprocessing.cpp
@@ -79,52 +79,6 @@
.getResult();
}
-static Value getF32SplatConst(ImplicitLocOpBuilder b, ArrayRef<int64_t> shapes,
- float value) {
- return getF32Const(b, shapes, {value});
-}
-
-class DecomposeLog1PPattern : public OpRewritePattern<mhlo::Log1pOp> {
- public:
- using OpRewritePattern<mhlo::Log1pOp>::OpRewritePattern;
-
- LogicalResult matchAndRewrite(mhlo::Log1pOp op,
- PatternRewriter &rewriter) const override {
- Location loc = op.getLoc();
- auto type = op.operand().getType().cast<TensorType>();
- // https://github.com/google/iree/issues/8083
- if (!type.hasStaticShape()) {
- return rewriter.notifyMatchFailure(op, "TODO: Support dynamic shape");
- }
- DenseElementsAttr attr =
- DenseElementsAttr::get(type, rewriter.getF32FloatAttr(1.0));
- auto one = rewriter.create<arith::ConstantOp>(loc, attr);
- auto x = rewriter.create<mhlo::AddOp>(loc, op.operand(), one);
- rewriter.replaceOpWithNewOp<mhlo::LogOp>(op, x);
- return success();
- }
-};
-
-class DecomposeExpM1Pattern : public OpRewritePattern<mhlo::Expm1Op> {
- public:
- using OpRewritePattern<mhlo::Expm1Op>::OpRewritePattern;
-
- LogicalResult matchAndRewrite(mhlo::Expm1Op op,
- PatternRewriter &rewriter) const override {
- Location loc = op.getLoc();
- auto type = op.operand().getType().cast<TensorType>();
- // https://github.com/google/iree/issues/8083
- if (!type.hasStaticShape()) {
- return rewriter.notifyMatchFailure(op, "TODO: Support dynamic shape");
- }
- DenseElementsAttr attr =
- DenseElementsAttr::get(type, rewriter.getF32FloatAttr(1.0));
- auto one = rewriter.create<arith::ConstantOp>(loc, attr);
- auto x = rewriter.create<mhlo::ExpOp>(loc, op.operand());
- rewriter.replaceOpWithNewOp<mhlo::SubOp>(op, x, one);
- return success();
- }
-};
class ExtractConvOpPaddingAttributes : public OpRewritePattern<mhlo::ConvOp> {
public:
@@ -805,7 +759,7 @@
void runOnOperation() override {
MLIRContext *context = &getContext();
ConversionTarget conversionTarget(*context);
- OwningRewritePatternList conversionPatterns(&getContext());
+ RewritePatternSet conversionPatterns(&getContext());
// Note that various input modalities may do their own legalization of
// CHLO. Converting here allows IREE to accept CHLO dialect regardless of
// whether it was legalized away at a higher level.
@@ -820,15 +774,14 @@
return signalPassFailure();
}
- OwningRewritePatternList patterns(&getContext());
+ RewritePatternSet patterns(&getContext());
// TODO: Remove once we have a general contraction to matmul pass.
mhlo::PopulateEinsumToDotGeneralPatterns(context, &patterns);
mhlo::PopulateUnfuseBatchNormPatterns(context, &patterns);
mhlo::PopulateComplexLoweringPatterns(context, &patterns);
mhlo::PopulateGatherToTorchIndexSelectPatterns(context, &patterns);
patterns.insert<ExtractReduceWindowOpPaddingAttributes,
- AdjustDepthwiseFilterShape, DecomposeLog1PPattern,
- DecomposeExpM1Pattern, ExpandRngNormal>(context);
+ AdjustDepthwiseFilterShape, ExpandRngNormal>(context);
// dot_general canoncalization patterns.
mhlo::PopulateGeneralDotOpLoweringPatterns(&patterns, context);
diff --git a/iree/compiler/InputConversion/MHLO/Rewriters.h b/iree/compiler/InputConversion/MHLO/Rewriters.h
index f7cfb11..ac996f0 100644
--- a/iree/compiler/InputConversion/MHLO/Rewriters.h
+++ b/iree/compiler/InputConversion/MHLO/Rewriters.h
@@ -17,21 +17,21 @@
/// patterns from XLA, as well as some IREE specific modifications.
void populateMHLOToLinalgOnTensorsConversionPatterns(
MLIRContext *context, TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
/// Populates IREE specific patterns to convert HLO broadcasting ops to Linalg.
/// These are being maintained separately because they are a standalone unit
/// that is both intricate and possible to upstream, should there be alignment
/// to do so.
-void populateMHLOBroadcastingToLinalgPatterns(
- MLIRContext *context, TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+void populateMHLOBroadcastingToLinalgPatterns(MLIRContext *context,
+ TypeConverter &typeConverter,
+ RewritePatternSet &patterns);
/// Populates patterns to convert MHLO/CHLO arithmetic on complex tensors to
/// equivalent HLO level real arithmetic.
void populateMHLOComplexToRealPatterns(MLIRContext *context,
TypeConverter &typeConverter,
- OwningRewritePatternList &patterns);
+ RewritePatternSet &patterns);
} // namespace MHLO
} // namespace iree_compiler
diff --git a/iree/compiler/InputConversion/MHLO/VerifyCompilerMHLOInputLegality.cpp b/iree/compiler/InputConversion/MHLO/VerifyCompilerMHLOInputLegality.cpp
index 3c0ec3a..3959345 100644
--- a/iree/compiler/InputConversion/MHLO/VerifyCompilerMHLOInputLegality.cpp
+++ b/iree/compiler/InputConversion/MHLO/VerifyCompilerMHLOInputLegality.cpp
@@ -23,7 +23,7 @@
void runOnOperation() override {
auto *context = &getContext();
ConversionTarget conversionTarget(*context);
- OwningRewritePatternList conversionPatterns(&getContext());
+ RewritePatternSet conversionPatterns(&getContext());
// Note that we would prefer allow-lists of what we positively support.
// However, it is so common to sneak input-level ops into the pipeline
diff --git a/iree/compiler/InputConversion/MHLO/test/convert_mhlo_to_linalg_ext.mlir b/iree/compiler/InputConversion/MHLO/test/convert_mhlo_to_linalg_ext.mlir
index 280ae7b..20d9595 100644
--- a/iree/compiler/InputConversion/MHLO/test/convert_mhlo_to_linalg_ext.mlir
+++ b/iree/compiler/InputConversion/MHLO/test/convert_mhlo_to_linalg_ext.mlir
@@ -23,6 +23,29 @@
// -----
+func @sort_1d_ui(%arg0: tensor<128xui32>) -> (tensor<128xui32>) {
+ %0 = "mhlo.sort"(%arg0) ( {
+ ^bb0(%arg2: tensor<ui32>, %arg3: tensor<ui32>): // no predecessors
+ %1 = "mhlo.compare"(%arg2, %arg3) {comparison_direction = "GT"} : (tensor<ui32>, tensor<ui32>) -> tensor<i1>
+ "mhlo.return"(%1) : (tensor<i1>) -> ()
+ }) {dimension = 0 : i64, is_stable = false} : (tensor<128xui32>) -> (tensor<128xui32>)
+ return %0 : tensor<128xui32>
+}
+// CHECK-LABEL: func @sort_1d_ui(
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
+// CHECK-SAME: )
+// CHECK: %[[CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]] : tensor<128xui32> to tensor<128xi32>
+// CHECK: %[[SORT:.+]] = iree_linalg_ext.sort
+// CHECK-SAME: dimension(0)
+// CHECK-SAME: outs(%[[CAST]] : tensor<128xi32>)
+// CHECK: ^bb0(%[[ARG1:.+]]: i32, %[[ARG2:.+]]: i32)
+// CHECK: %[[CMP:.+]] = arith.cmpi ugt, %[[ARG1]], %[[ARG2]]
+// CHECK: iree_linalg_ext.yield %[[CMP]]
+// CHECK: %[[RESULT:.+]] = builtin.unrealized_conversion_cast %[[SORT]] : tensor<128xi32> to tensor<128xui32>
+// CHECK: return %[[RESULT]]
+
+// -----
+
func @sort_cst_capture(%arg0: tensor<1x10xi32>) -> tensor<1x10xi32> {
%0 = mhlo.constant dense<0> : tensor<i32>
%1 = "mhlo.sort"(%arg0) ( {
diff --git a/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing.mlir b/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing.mlir
index 0ca9057..55d378c 100644
--- a/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing.mlir
+++ b/iree/compiler/InputConversion/MHLO/test/mhlo_to_mhlo_preprocessing.mlir
@@ -111,24 +111,6 @@
// -----
-func @log_plus_one(%input: tensor<4xf32>) -> tensor<4xf32> {
- // CHECK: mhlo.add
- // CHECK: mhlo.log
- %0 = "mhlo.log_plus_one"(%input) : (tensor<4xf32>) -> tensor<4xf32>
- return %0: tensor<4xf32>
-}
-
-// -----
-
-func @exponential_minus_one(%input: tensor<4xf32>) -> tensor<4xf32> {
- // CHECK: mhlo.exponential
- // CHECK: mhlo.subtract
- %0 = "mhlo.exponential_minus_one"(%input) : (tensor<4xf32>) -> tensor<4xf32>
- return %0: tensor<4xf32>
-}
-
-// -----
-
// CHECK: @reorder_broadcast_in_dim_scalar_binary(%[[ARG0:.*]]: tensor<f32>, %[[ARG1:.*]]: tensor<f32>, %[[ARG2:.*]]: tensor<i32>, %[[ARG3:.*]]: tensor<i32>)
func @reorder_broadcast_in_dim_scalar_binary(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tensor<i32>, %arg3: tensor<i32>) -> (tensor<1x8x8x64xf32>, tensor<1x8x8x64xf32>, tensor<1x8x8x64xf32>, tensor<1x8x8x64xf32>, tensor<1x8x8x64xf32>, tensor<1x8x8x64xf32>, tensor<1x8x8x64xf32>, tensor<1x8x8x64xf32>, tensor<1x8x8x64xf32>, tensor<1x8x8x64xf32>, tensor<1x8x8x64xf32>, tensor<1x8x8x64xf32>, tensor<1x8x8x64xi32>, tensor<1x8x8x64xi32>, tensor<1x8x8x64xi32>) {
// CHECK: %[[ADD:.*]] = mhlo.add %[[ARG0]], %[[ARG1]] : tensor<f32>
diff --git a/iree/compiler/InputConversion/TOSA/Passes.cpp b/iree/compiler/InputConversion/TOSA/Passes.cpp
index 0ee308f..5029f16 100644
--- a/iree/compiler/InputConversion/TOSA/Passes.cpp
+++ b/iree/compiler/InputConversion/TOSA/Passes.cpp
@@ -43,7 +43,6 @@
passManager.addNestedPass<FuncOp>(tosa::createTosaMakeBroadcastablePass());
passManager.addNestedPass<FuncOp>(tosa::createTosaToStandard());
passManager.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
- passManager.addNestedPass<FuncOp>(IREE::Flow::createPromoteI1ToI8Pass());
tosa::addTosaToLinalgPasses(passManager);
passManager.addNestedPass<FuncOp>(tosa::createTosaToStandard());
diff --git a/iree/compiler/InputConversion/TOSA/VerifyCompilerTOSAInputLegality.cpp b/iree/compiler/InputConversion/TOSA/VerifyCompilerTOSAInputLegality.cpp
index 252906d..aa063ae 100644
--- a/iree/compiler/InputConversion/TOSA/VerifyCompilerTOSAInputLegality.cpp
+++ b/iree/compiler/InputConversion/TOSA/VerifyCompilerTOSAInputLegality.cpp
@@ -20,7 +20,7 @@
void runOnOperation() override {
auto *context = &getContext();
ConversionTarget conversionTarget(*context);
- OwningRewritePatternList conversionPatterns(&getContext());
+ RewritePatternSet conversionPatterns(&getContext());
// Note that we would prefer allow-lists of what we positively support.
// However, it is so common to sneak input-level ops into the pipeline
diff --git a/iree/compiler/Utils/PatternUtils.h b/iree/compiler/Utils/PatternUtils.h
index 50b0e9f..bac5cf4 100644
--- a/iree/compiler/Utils/PatternUtils.h
+++ b/iree/compiler/Utils/PatternUtils.h
@@ -30,7 +30,7 @@
OpTy op, typename OpTy::Adaptor operands, PatternRewriter &rewriter);
template <typename OpTy>
-static void insertGreedyPattern(OwningRewritePatternList &patterns,
+static void insertGreedyPattern(RewritePatternSet &patterns,
MLIRContext *context,
GenericOpRewritePattern<OpTy> f,
PatternBenefit benefit = 1) {
@@ -57,7 +57,7 @@
}
template <typename OpTy>
-static void insertConversionPattern(OwningRewritePatternList &patterns,
+static void insertConversionPattern(RewritePatternSet &patterns,
MLIRContext *context,
GenericOpRewritePattern<OpTy> f,
PatternBenefit benefit = 1) {
diff --git a/iree/hal/buffer_view.h b/iree/hal/buffer_view.h
index a6ee1d6..5a483e1 100644
--- a/iree/hal/buffer_view.h
+++ b/iree/hal/buffer_view.h
@@ -74,6 +74,7 @@
iree_all_bits_set(iree_hal_element_numerical_type(element_type), \
IREE_HAL_NUMERICAL_TYPE_FLOAT)
+// TODO(#8193): split out logical and physical bit widths.
// Returns the bit width of each element.
#define iree_hal_element_bit_count(element_type) (size_t)((element_type)&0xFF)
diff --git a/iree/hal/cuda/BUILD b/iree/hal/cuda/BUILD
index 3ab575b..a7161fb 100644
--- a/iree/hal/cuda/BUILD
+++ b/iree/hal/cuda/BUILD
@@ -20,86 +20,87 @@
""",
)
-cc_library(
- name = "cuda",
- srcs = [
- "api.h",
- "context_wrapper.h",
- "cuda_allocator.c",
- "cuda_allocator.h",
- "cuda_buffer.c",
- "cuda_buffer.h",
- "cuda_device.c",
- "cuda_device.h",
- "cuda_driver.c",
- "cuda_event.c",
- "cuda_event.h",
- "descriptor_set_layout.c",
- "descriptor_set_layout.h",
- "event_semaphore.c",
- "event_semaphore.h",
- "executable_layout.c",
- "executable_layout.h",
- "graph_command_buffer.c",
- "graph_command_buffer.h",
- "native_executable.c",
- "native_executable.h",
- "nop_executable_cache.c",
- "nop_executable_cache.h",
- "status_util.c",
- "status_util.h",
- "stream_command_buffer.c",
- "stream_command_buffer.h",
- ],
- hdrs = [
- "api.h",
- ],
- visibility = ["//visibility:public"],
- deps = [
- ":dynamic_symbols",
- "//iree/base",
- "//iree/base:core_headers",
- "//iree/base:tracing",
- "//iree/base/internal",
- "//iree/base/internal:arena",
- "//iree/base/internal:synchronization",
- "//iree/base/internal/flatcc:parsing",
- "//iree/hal",
- "//iree/hal/utils:buffer_transfer",
- "//iree/hal/utils:deferred_command_buffer",
- "//iree/hal/utils:resource_set",
- "//iree/schemas:cuda_executable_def_c_fbs",
- ],
-)
+# Temporarily disabled pending build system changes.
-cc_library(
- name = "dynamic_symbols",
- srcs = [
- "cuda_headers.h",
- "dynamic_symbols.c",
- ],
- hdrs = [
- "dynamic_symbols.h",
- ],
- textual_hdrs = [
- "dynamic_symbol_tables.h",
- ],
- deps = [
- "//iree/base:core_headers",
- "//iree/base:tracing",
- "//iree/base/internal:dynamic_library",
- "@cuda//:cuda_headers",
- ],
-)
+# cc_library(
+# name = "cuda",
+# srcs = [
+# "api.h",
+# "context_wrapper.h",
+# "cuda_allocator.c",
+# "cuda_allocator.h",
+# "cuda_buffer.c",
+# "cuda_buffer.h",
+# "cuda_device.c",
+# "cuda_device.h",
+# "cuda_driver.c",
+# "cuda_event.c",
+# "cuda_event.h",
+# "descriptor_set_layout.c",
+# "descriptor_set_layout.h",
+# "event_semaphore.c",
+# "event_semaphore.h",
+# "executable_layout.c",
+# "executable_layout.h",
+# "graph_command_buffer.c",
+# "graph_command_buffer.h",
+# "native_executable.c",
+# "native_executable.h",
+# "nop_executable_cache.c",
+# "nop_executable_cache.h",
+# "status_util.c",
+# "status_util.h",
+# "stream_command_buffer.c",
+# "stream_command_buffer.h",
+# ],
+# hdrs = [
+# "api.h",
+# ],
+# visibility = ["//visibility:public"],
+# deps = [
+# ":dynamic_symbols",
+# "//iree/base",
+# "//iree/base:core_headers",
+# "//iree/base:tracing",
+# "//iree/base/internal",
+# "//iree/base/internal:arena",
+# "//iree/base/internal:synchronization",
+# "//iree/base/internal/flatcc:parsing",
+# "//iree/hal",
+# "//iree/hal/utils:buffer_transfer",
+# "//iree/hal/utils:deferred_command_buffer",
+# "//iree/hal/utils:resource_set",
+# "//iree/schemas:cuda_executable_def_c_fbs",
+# ],
+# )
-cc_test(
- name = "dynamic_symbols_test",
- srcs = ["dynamic_symbols_test.cc"],
- tags = ["driver=cuda"],
- deps = [
- ":dynamic_symbols",
- "//iree/base",
- "//iree/testing:gtest",
- "//iree/testing:gtest_main",
- ],
-)
+# cc_library(
+# name = "dynamic_symbols",
+# srcs = [
+# "cuda_headers.h",
+# "dynamic_symbols.c",
+# ],
+# hdrs = [
+# "dynamic_symbols.h",
+# ],
+# textual_hdrs = [
+# "dynamic_symbol_tables.h",
+# ],
+# deps = [
+# "//iree/base:core_headers",
+# "//iree/base:tracing",
+# "//iree/base/internal:dynamic_library",
+# ],
+# )
+
+# cc_test(
+# name = "dynamic_symbols_test",
+# srcs = ["dynamic_symbols_test.cc"],
+# tags = ["driver=cuda"],
+# deps = [
+# ":dynamic_symbols",
+# "//iree/base",
+# "//iree/testing:gtest",
+# "//iree/testing:gtest_main",
+# ],
+# )
diff --git a/iree/hal/cuda/CMakeLists.txt b/iree/hal/cuda/CMakeLists.txt
index 6fe645f..709da11 100644
--- a/iree/hal/cuda/CMakeLists.txt
+++ b/iree/hal/cuda/CMakeLists.txt
@@ -14,86 +14,4 @@
iree_add_all_subdirs()
-iree_cc_library(
- NAME
- cuda
- HDRS
- "api.h"
- SRCS
- "api.h"
- "context_wrapper.h"
- "cuda_allocator.c"
- "cuda_allocator.h"
- "cuda_buffer.c"
- "cuda_buffer.h"
- "cuda_device.c"
- "cuda_device.h"
- "cuda_driver.c"
- "cuda_event.c"
- "cuda_event.h"
- "descriptor_set_layout.c"
- "descriptor_set_layout.h"
- "event_semaphore.c"
- "event_semaphore.h"
- "executable_layout.c"
- "executable_layout.h"
- "graph_command_buffer.c"
- "graph_command_buffer.h"
- "native_executable.c"
- "native_executable.h"
- "nop_executable_cache.c"
- "nop_executable_cache.h"
- "status_util.c"
- "status_util.h"
- "stream_command_buffer.c"
- "stream_command_buffer.h"
- DEPS
- ::dynamic_symbols
- iree::base
- iree::base::core_headers
- iree::base::internal
- iree::base::internal::arena
- iree::base::internal::flatcc::parsing
- iree::base::internal::synchronization
- iree::base::tracing
- iree::hal
- iree::hal::utils::buffer_transfer
- iree::hal::utils::deferred_command_buffer
- iree::hal::utils::resource_set
- iree::schemas::cuda_executable_def_c_fbs
- PUBLIC
-)
-
-iree_cc_library(
- NAME
- dynamic_symbols
- HDRS
- "dynamic_symbols.h"
- TEXTUAL_HDRS
- "dynamic_symbol_tables.h"
- SRCS
- "cuda_headers.h"
- "dynamic_symbols.c"
- DEPS
- cuda_headers
- iree::base::core_headers
- iree::base::internal::dynamic_library
- iree::base::tracing
- PUBLIC
-)
-
-iree_cc_test(
- NAME
- dynamic_symbols_test
- SRCS
- "dynamic_symbols_test.cc"
- DEPS
- ::dynamic_symbols
- iree::base
- iree::testing::gtest
- iree::testing::gtest_main
- LABELS
- "driver=cuda"
-)
-
### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ###
diff --git a/iree/hal/cuda/cts/CMakeLists.txt b/iree/hal/cuda/cts/CMakeLists.txt
index e2d6e72..72c2269 100644
--- a/iree/hal/cuda/cts/CMakeLists.txt
+++ b/iree/hal/cuda/cts/CMakeLists.txt
@@ -4,26 +4,27 @@
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-iree_hal_cts_test_suite(
- DRIVER_NAME
- cuda
- DRIVER_REGISTRATION_HDR
- "iree/hal/cuda/registration/driver_module.h"
- DRIVER_REGISTRATION_FN
- "iree_hal_cuda_driver_module_register"
- COMPILER_TARGET_BACKEND
- "cuda"
- EXECUTABLE_FORMAT
- "\"PTXE\""
- DEPS
- iree::hal::cuda::registration
- EXCLUDED_TESTS
- # This test depends on iree_hal_cuda_stream_command_buffer_update_buffer
- # via iree_hal_buffer_view_allocate_buffer, which is not implemented yet.
- "command_buffer_dispatch"
- # Non-push descriptor sets are not implemented in the CUDA backend yet.
- "descriptor_set"
- # Semaphores are not implemented in the CUDA backend yet.
- "semaphore_submission"
- "semaphore"
-)
+# Temporarily disabled pending build system changes.
+# iree_hal_cts_test_suite(
+# DRIVER_NAME
+# cuda
+# DRIVER_REGISTRATION_HDR
+# "iree/hal/cuda/registration/driver_module.h"
+# DRIVER_REGISTRATION_FN
+# "iree_hal_cuda_driver_module_register"
+# COMPILER_TARGET_BACKEND
+# "cuda"
+# EXECUTABLE_FORMAT
+# "\"PTXE\""
+# DEPS
+# iree::hal::cuda::registration
+# EXCLUDED_TESTS
+# # This test depends on iree_hal_cuda_stream_command_buffer_update_buffer
+# # via iree_hal_buffer_view_allocate_buffer, which is not implemented yet.
+# "command_buffer_dispatch"
+# # Non-push descriptor sets are not implemented in the CUDA backend yet.
+# "descriptor_set"
+# # Semaphores are not implemented in the CUDA backend yet.
+# "semaphore_submission"
+# "semaphore"
+# )
diff --git a/iree/hal/cuda/registration/BUILD b/iree/hal/cuda/registration/BUILD
index 10bd574..cf162ed 100644
--- a/iree/hal/cuda/registration/BUILD
+++ b/iree/hal/cuda/registration/BUILD
@@ -19,23 +19,25 @@
inline = True,
)
-cc_library(
- name = "registration",
- srcs = ["driver_module.c"],
- hdrs = ["driver_module.h"],
- defines = [
- "IREE_HAL_HAVE_CUDA_DRIVER_MODULE=1",
- ],
- deps = [
- "//iree/base",
- "//iree/base:cc",
- "//iree/base:core_headers",
- "//iree/base:tracing",
- "//iree/base/internal:flags",
- "//iree/hal",
- "//iree/hal/cuda",
- ],
-)
+# Temporarily disabled pending build system changes.
+
+# cc_library(
+# name = "registration",
+# srcs = ["driver_module.c"],
+# hdrs = ["driver_module.h"],
+# defines = [
+# "IREE_HAL_HAVE_CUDA_DRIVER_MODULE=1",
+# ],
+# deps = [
+# "//iree/base",
+# "//iree/base:cc",
+# "//iree/base:core_headers",
+# "//iree/base:tracing",
+# "//iree/base/internal:flags",
+# "//iree/hal",
+# "//iree/hal/cuda",
+# ],
+# )
iree_cmake_extra_content(
content = """
diff --git a/iree/hal/cuda/registration/CMakeLists.txt b/iree/hal/cuda/registration/CMakeLists.txt
index 7d43d3c..95b06cb 100644
--- a/iree/hal/cuda/registration/CMakeLists.txt
+++ b/iree/hal/cuda/registration/CMakeLists.txt
@@ -12,26 +12,6 @@
if(${IREE_HAL_DRIVER_CUDA})
-iree_cc_library(
- NAME
- registration
- HDRS
- "driver_module.h"
- SRCS
- "driver_module.c"
- DEPS
- iree::base
- iree::base::cc
- iree::base::core_headers
- iree::base::internal::flags
- iree::base::tracing
- iree::hal
- iree::hal::cuda
- DEFINES
- "IREE_HAL_HAVE_CUDA_DRIVER_MODULE=1"
- PUBLIC
-)
-
endif()
### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ###
diff --git a/iree/hal/drivers/CMakeLists.txt b/iree/hal/drivers/CMakeLists.txt
index 015f064..5836c3e 100644
--- a/iree/hal/drivers/CMakeLists.txt
+++ b/iree/hal/drivers/CMakeLists.txt
@@ -7,9 +7,10 @@
# Doesn't use bazel_to_cmake because of custom configuration vars
set(IREE_HAL_DRIVER_MODULES)
-if(${IREE_HAL_DRIVER_CUDA})
- list(APPEND IREE_HAL_DRIVER_MODULES iree::hal::cuda::registration)
-endif()
+# Temporarily disabled pending build system changes.
+# if(${IREE_HAL_DRIVER_CUDA})
+# list(APPEND IREE_HAL_DRIVER_MODULES iree::hal::cuda::registration)
+# endif()
if(${IREE_HAL_DRIVER_DYLIB})
list(APPEND IREE_HAL_DRIVER_MODULES iree::hal::dylib::registration)
endif()
diff --git a/iree/runtime/CMakeLists.txt b/iree/runtime/CMakeLists.txt
index 71f6556..143fd67 100644
--- a/iree/runtime/CMakeLists.txt
+++ b/iree/runtime/CMakeLists.txt
@@ -47,3 +47,10 @@
)
### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ###
+
+iree_cc_unified_library(
+ NAME
+ unified
+ ROOT
+ ::impl
+)
diff --git a/iree/runtime/demo/CMakeLists.txt b/iree/runtime/demo/CMakeLists.txt
index 4500f97..54c8c5d 100644
--- a/iree/runtime/demo/CMakeLists.txt
+++ b/iree/runtime/demo/CMakeLists.txt
@@ -1,15 +1,3 @@
-################################################################################
-# Autogenerated by build_tools/bazel_to_cmake/bazel_to_cmake.py from #
-# iree/runtime/demo/BUILD #
-# #
-# Use iree_cmake_extra_content from iree/build_defs.oss.bzl to add arbitrary #
-# CMake-only content. #
-# #
-# To disable autogeneration for this file entirely, delete this header. #
-################################################################################
-
-iree_add_all_subdirs()
-
if (NOT ${IREE_HAL_DRIVER_VMVX} OR NOT ${IREE_TARGET_BACKEND_VMVX})
return()
endif()
@@ -22,7 +10,7 @@
DEFINES
"IREE_RUNTIME_DEMO_LOAD_FILE_FROM_EMBEDDED_DATA"
DEPS
- iree::runtime
+ iree::runtime::unified
iree::runtime::testdata::simple_mul_module_c
)
@@ -41,7 +29,7 @@
DEFINES
"IREE_RUNTIME_DEMO_LOAD_FILE_FROM_COMMAND_LINE_ARG"
DEPS
- iree::runtime
+ iree::runtime::unified
)
iree_cc_binary(
@@ -50,7 +38,7 @@
SRCS
"hello_world_terse.c"
DEPS
- iree::runtime
+ iree::runtime::unified
iree::runtime::testdata::simple_mul_module_c
)
@@ -60,5 +48,3 @@
SRC
::hello_world_terse
)
-
-### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ###
diff --git a/iree/samples/custom_modules/dialect/conversion_patterns.cc b/iree/samples/custom_modules/dialect/conversion_patterns.cc
index 27f0fbe..99de453 100644
--- a/iree/samples/custom_modules/dialect/conversion_patterns.cc
+++ b/iree/samples/custom_modules/dialect/conversion_patterns.cc
@@ -16,7 +16,7 @@
namespace Custom {
void populateCustomToHALPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter) {
// We can use the HAL conversion handler for this tensor->buffer conversion
// as we just want the simple form. If we wanted to perform additional
@@ -30,7 +30,7 @@
void populateCustomToVMPatterns(MLIRContext *context,
SymbolTable &importSymbols,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter) {
// We can use the VM conversion handler for all of these as they are simple
// 1:1 mappings. More complex mappings can provide their own conversions
diff --git a/iree/samples/custom_modules/dialect/conversion_patterns.h b/iree/samples/custom_modules/dialect/conversion_patterns.h
index 33a05a2..f265c2c 100644
--- a/iree/samples/custom_modules/dialect/conversion_patterns.h
+++ b/iree/samples/custom_modules/dialect/conversion_patterns.h
@@ -18,13 +18,13 @@
// Populates conversion patterns from the tensor-based custom dialect ops to the
// HAL buffer-based ones.
void populateCustomToHALPatterns(MLIRContext *context,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter);
// Populates conversion patterns from the custom dialect to the VM dialect.
void populateCustomToVMPatterns(MLIRContext *context,
SymbolTable &importSymbols,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter);
} // namespace Custom
diff --git a/iree/samples/custom_modules/dialect/custom_dialect.cc b/iree/samples/custom_modules/dialect/custom_dialect.cc
index 35b048c..19620d4 100644
--- a/iree/samples/custom_modules/dialect/custom_dialect.cc
+++ b/iree/samples/custom_modules/dialect/custom_dialect.cc
@@ -31,7 +31,7 @@
using HALConversionDialectInterface::HALConversionDialectInterface;
void setupConversionTarget(ConversionTarget &target,
- OwningRewritePatternList &patterns,
+ RewritePatternSet &patterns,
TypeConverter &typeConverter) const override {
populateCustomToHALPatterns(getDialect()->getContext(), patterns,
typeConverter);
@@ -52,7 +52,7 @@
}
void populateVMConversionPatterns(
- SymbolTable &importSymbols, OwningRewritePatternList &patterns,
+ SymbolTable &importSymbols, RewritePatternSet &patterns,
TypeConverter &typeConverter) const override {
populateCustomToVMPatterns(getDialect()->getContext(), importSymbols,
patterns, typeConverter);
diff --git a/iree/samples/simple_embedding/BUILD b/iree/samples/simple_embedding/BUILD
index 1c38f8c..4914245 100644
--- a/iree/samples/simple_embedding/BUILD
+++ b/iree/samples/simple_embedding/BUILD
@@ -269,40 +269,46 @@
iree_cmake_extra_content(
content = """
endif()
-
-if(${IREE_HAL_DRIVER_CUDA} AND (${IREE_TARGET_BACKEND_CUDA} OR DEFINED IREE_HOST_BINARY_ROOT))
""",
inline = True,
)
-cc_binary(
- name = "simple_embedding_cuda",
- srcs = [
- "device_cuda.c",
- "simple_embedding.c",
- ],
- deps = [
- ":simple_embedding_test_bytecode_module_cuda_c",
- "//iree/base",
- "//iree/hal",
- "//iree/hal/cuda/registration",
- "//iree/modules/hal",
- "//iree/vm",
- "//iree/vm:bytecode_module",
- ],
-)
+# Temporarily disabled pending build system changes.
+# iree_cmake_extra_content(
+# content = """
+# if(${IREE_HAL_DRIVER_CUDA} AND (${IREE_TARGET_BACKEND_CUDA} OR DEFINED IREE_HOST_BINARY_ROOT))
+# """,
+# inline = True,
+# )
-iree_bytecode_module(
- name = "simple_embedding_test_bytecode_module_cuda",
- src = "simple_embedding_test.mlir",
- c_identifier = "iree_samples_simple_embedding_test_module_cuda",
- flags = [
- "-iree-input-type=mhlo",
- "-iree-mlir-to-vm-bytecode-module",
- "-iree-hal-target-backends=cuda",
- "-iree-llvm-debug-symbols=false",
- ],
-)
+# cc_binary(
+# name = "simple_embedding_cuda",
+# srcs = [
+# "device_cuda.c",
+# "simple_embedding.c",
+# ],
+# deps = [
+# ":simple_embedding_test_bytecode_module_cuda_c",
+# "//iree/base",
+# "//iree/hal",
+# "//iree/hal/cuda/registration",
+# "//iree/modules/hal",
+# "//iree/vm",
+# "//iree/vm:bytecode_module",
+# ],
+# )
+
+# iree_bytecode_module(
+# name = "simple_embedding_test_bytecode_module_cuda",
+# src = "simple_embedding_test.mlir",
+# c_identifier = "iree_samples_simple_embedding_test_module_cuda",
+# flags = [
+# "-iree-input-type=mhlo",
+# "-iree-mlir-to-vm-bytecode-module",
+# "-iree-hal-target-backends=cuda",
+# "-iree-llvm-debug-symbols=false",
+# ],
+# )
# Simple embedding is failing in the CI.
# native_test(
@@ -313,18 +319,18 @@
# src = ":simple_embedding_cuda",
# )
-iree_cmake_extra_content(
- content = """
-iree_native_test(
- NAME
- "simple_embedding_cuda_test"
- LABELS
- "driver=cuda"
- SRC
- ::simple_embedding_cuda
-)
+# iree_cmake_extra_content(
+# content = """
+# iree_native_test(
+# NAME
+# "simple_embedding_cuda_test"
+# LABELS
+# "driver=cuda"
+# SRC
+# ::simple_embedding_cuda
+# )
-endif()
-""",
- inline = True,
-)
+# endif()
+# """,
+# inline = True,
+# )
diff --git a/iree/samples/simple_embedding/CMakeLists.txt b/iree/samples/simple_embedding/CMakeLists.txt
index 329566c..e9dfde1 100644
--- a/iree/samples/simple_embedding/CMakeLists.txt
+++ b/iree/samples/simple_embedding/CMakeLists.txt
@@ -259,48 +259,4 @@
endif()
-if(${IREE_HAL_DRIVER_CUDA} AND (${IREE_TARGET_BACKEND_CUDA} OR DEFINED IREE_HOST_BINARY_ROOT))
-
-iree_cc_binary(
- NAME
- simple_embedding_cuda
- SRCS
- "device_cuda.c"
- "simple_embedding.c"
- DEPS
- ::simple_embedding_test_bytecode_module_cuda_c
- iree::base
- iree::hal
- iree::hal::cuda::registration
- iree::modules::hal
- iree::vm
- iree::vm::bytecode_module
-)
-
-iree_bytecode_module(
- NAME
- simple_embedding_test_bytecode_module_cuda
- SRC
- "simple_embedding_test.mlir"
- C_IDENTIFIER
- "iree_samples_simple_embedding_test_module_cuda"
- FLAGS
- "-iree-input-type=mhlo"
- "-iree-mlir-to-vm-bytecode-module"
- "-iree-hal-target-backends=cuda"
- "-iree-llvm-debug-symbols=false"
- PUBLIC
-)
-
-iree_native_test(
- NAME
- "simple_embedding_cuda_test"
- LABELS
- "driver=cuda"
- SRC
- ::simple_embedding_cuda
-)
-
-endif()
-
### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ###
diff --git a/iree/test/e2e/cuda_specific/BUILD b/iree/test/e2e/cuda_specific/BUILD
index dc53d19..a067545 100644
--- a/iree/test/e2e/cuda_specific/BUILD
+++ b/iree/test/e2e/cuda_specific/BUILD
@@ -7,7 +7,7 @@
# Tests for end-to-end IREE support specific to the CUDA backend to be able to
# incrementally enable features.
-load("//build_tools/bazel:iree_check_test.bzl", "iree_check_single_backend_test_suite")
+# load("//build_tools/bazel:iree_check_test.bzl", "iree_check_single_backend_test_suite")
package(
default_visibility = ["//visibility:public"],
@@ -15,20 +15,21 @@
licenses = ["notice"], # Apache 2.0
)
-iree_check_single_backend_test_suite(
- name = "check_cuda",
- srcs = [
- "dot.mlir",
- ],
- compiler_flags = ["-iree-input-type=mhlo"],
- driver = "cuda",
- tags = [
- # CUDA cuInit fails with sanitizer on.
- "noasan",
- "nomsan",
- "notsan",
- "noubsan",
- "requires-gpu-nvidia",
- ],
- target_backend = "cuda",
-)
+# Temporarily disabled pending build system changes.
+# iree_check_single_backend_test_suite(
+# name = "check_cuda",
+# srcs = [
+# "dot.mlir",
+# ],
+# compiler_flags = ["-iree-input-type=mhlo"],
+# driver = "cuda",
+# tags = [
+# # CUDA cuInit fails with sanitizer on.
+# "noasan",
+# "nomsan",
+# "notsan",
+# "noubsan",
+# "requires-gpu-nvidia",
+# ],
+# target_backend = "cuda",
+# )
diff --git a/iree/test/e2e/cuda_specific/CMakeLists.txt b/iree/test/e2e/cuda_specific/CMakeLists.txt
index 47ca1d3..ab173dc 100644
--- a/iree/test/e2e/cuda_specific/CMakeLists.txt
+++ b/iree/test/e2e/cuda_specific/CMakeLists.txt
@@ -10,23 +10,4 @@
iree_add_all_subdirs()
-iree_check_single_backend_test_suite(
- NAME
- check_cuda
- SRCS
- "dot.mlir"
- TARGET_BACKEND
- "cuda"
- DRIVER
- "cuda"
- COMPILER_FLAGS
- "-iree-input-type=mhlo"
- LABELS
- "noasan"
- "nomsan"
- "notsan"
- "noubsan"
- "requires-gpu-nvidia"
-)
-
### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ###
diff --git a/iree/test/e2e/linalg_ext_ops/BUILD b/iree/test/e2e/linalg_ext_ops/BUILD
index 824d951..343c8b1 100644
--- a/iree/test/e2e/linalg_ext_ops/BUILD
+++ b/iree/test/e2e/linalg_ext_ops/BUILD
@@ -13,29 +13,30 @@
licenses = ["notice"], # Apache 2.0
)
-iree_check_single_backend_test_suite(
- name = "check_cuda",
- srcs = enforce_glob(
- # keep sorted
- [
- "reverse.mlir",
- "scan.mlir",
- ],
- include = ["*.mlir"],
- exclude = [
- ],
- ),
- driver = "cuda",
- tags = [
- # CUDA cuInit fails with sanitizer on.
- "noasan",
- "nomsan",
- "notsan",
- "noubsan",
- "requires-gpu-nvidia",
- ],
- target_backend = "cuda",
-)
+# Temporarily disabled pending build system changes.
+# iree_check_single_backend_test_suite(
+# name = "check_cuda",
+# srcs = enforce_glob(
+# # keep sorted
+# [
+# "reverse.mlir",
+# "scan.mlir",
+# ],
+# include = ["*.mlir"],
+# exclude = [
+# ],
+# ),
+# driver = "cuda",
+# tags = [
+# # CUDA cuInit fails with sanitizer on.
+# "noasan",
+# "nomsan",
+# "notsan",
+# "noubsan",
+# "requires-gpu-nvidia",
+# ],
+# target_backend = "cuda",
+# )
iree_check_single_backend_test_suite(
name = "check_dylib-llvm-aot_dylib",
diff --git a/iree/test/e2e/linalg_ext_ops/CMakeLists.txt b/iree/test/e2e/linalg_ext_ops/CMakeLists.txt
index 3d9d628..7aed204 100644
--- a/iree/test/e2e/linalg_ext_ops/CMakeLists.txt
+++ b/iree/test/e2e/linalg_ext_ops/CMakeLists.txt
@@ -12,24 +12,6 @@
iree_check_single_backend_test_suite(
NAME
- check_cuda
- SRCS
- "reverse.mlir"
- "scan.mlir"
- TARGET_BACKEND
- "cuda"
- DRIVER
- "cuda"
- LABELS
- "noasan"
- "nomsan"
- "notsan"
- "noubsan"
- "requires-gpu-nvidia"
-)
-
-iree_check_single_backend_test_suite(
- NAME
check_dylib-llvm-aot_dylib
SRCS
"reverse.mlir"
diff --git a/iree/test/e2e/models/BUILD b/iree/test/e2e/models/BUILD
index ae84e50..d0b1107 100644
--- a/iree/test/e2e/models/BUILD
+++ b/iree/test/e2e/models/BUILD
@@ -74,19 +74,20 @@
target_backend = "vulkan-spirv",
)
-iree_check_single_backend_test_suite(
- name = "check_cuda_cuda",
- timeout = "long",
- srcs = CHECK_FRAMEWORK_TESTS,
- compiler_flags = ["-iree-input-type=mhlo"],
- driver = "cuda",
- tags = [
- # CUDA cuInit fails with sanitizer on.
- "noasan",
- "nomsan",
- "notsan",
- "noubsan",
- "requires-gpu-nvidia",
- ],
- target_backend = "cuda",
-)
+# Temporarily disabled pending build system changes.
+# iree_check_single_backend_test_suite(
+# name = "check_cuda_cuda",
+# timeout = "long",
+# srcs = CHECK_FRAMEWORK_TESTS,
+# compiler_flags = ["-iree-input-type=mhlo"],
+# driver = "cuda",
+# tags = [
+# # CUDA cuInit fails with sanitizer on.
+# "noasan",
+# "nomsan",
+# "notsan",
+# "noubsan",
+# "requires-gpu-nvidia",
+# ],
+# target_backend = "cuda",
+# )
diff --git a/iree/test/e2e/models/CMakeLists.txt b/iree/test/e2e/models/CMakeLists.txt
index a15eb09..a668981 100644
--- a/iree/test/e2e/models/CMakeLists.txt
+++ b/iree/test/e2e/models/CMakeLists.txt
@@ -58,24 +58,4 @@
"-iree-input-type=mhlo"
)
-iree_check_single_backend_test_suite(
- NAME
- check_cuda_cuda
- SRCS
- "bert_encoder_unrolled_fake_weights.mlir"
- "mobilenetv3_fake_weights.mlir"
- TARGET_BACKEND
- "cuda"
- DRIVER
- "cuda"
- COMPILER_FLAGS
- "-iree-input-type=mhlo"
- LABELS
- "noasan"
- "nomsan"
- "notsan"
- "noubsan"
- "requires-gpu-nvidia"
-)
-
### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ###
diff --git a/iree/test/e2e/regression/BUILD b/iree/test/e2e/regression/BUILD
index 0331f46..c57be78 100644
--- a/iree/test/e2e/regression/BUILD
+++ b/iree/test/e2e/regression/BUILD
@@ -91,21 +91,22 @@
target_backend = "vulkan-spirv",
)
-iree_check_single_backend_test_suite(
- name = "check_regression_cuda",
- srcs = BACKEND_TESTS,
- compiler_flags = ["-iree-input-type=mhlo"],
- driver = "cuda",
- tags = [
- # CUDA cuInit fails with sanitizer on.
- "noasan",
- "nomsan",
- "notsan",
- "noubsan",
- "requires-gpu-nvidia",
- ],
- target_backend = "cuda",
-)
+# Temporarily disabled pending build system changes.
+# iree_check_single_backend_test_suite(
+# name = "check_regression_cuda",
+# srcs = BACKEND_TESTS,
+# compiler_flags = ["-iree-input-type=mhlo"],
+# driver = "cuda",
+# tags = [
+# # CUDA cuInit fails with sanitizer on.
+# "noasan",
+# "nomsan",
+# "notsan",
+# "noubsan",
+# "requires-gpu-nvidia",
+# ],
+# target_backend = "cuda",
+# )
py_binary(
name = "generate_e2e_matmul_tests",
@@ -129,6 +130,7 @@
"f32",
]]
+# Test asm
[iree_generated_trace_runner_test(
name = "e2e_matmul_mmt4d_%s_small" % lhs_rhs_type,
generator = ":generate_e2e_matmul_tests",
@@ -176,3 +178,32 @@
"i8",
"f32",
]]
+
+# Test intrinsics. No need to run vmvx again, since it isn't affected by this
+# codegen flag.
+[iree_generated_trace_runner_test(
+ name = "e2e_matmul_mmt4d_%s_intrinsics_%s" % (lhs_rhs_type, size),
+ compiler_flags = ["--iree-codegen-mmt4d-use-intrinsics"],
+ generator = ":generate_e2e_matmul_tests",
+ generator_args = [
+ "--lhs_rhs_type=%s" % lhs_rhs_type,
+ "--shapes=%s" % size,
+ ],
+ opt_flags = [
+ "--iree-flow-convert-linalg-matmul-to-mmt4d=M0=8 K0=%d N0=8" % (4 if lhs_rhs_type == "i8" else 1),
+ ],
+ target_backends_and_drivers = [
+ ("dylib-llvm-aot", "dylib"),
+ ],
+ target_cpu_features_variants = [
+ "default",
+ "aarch64:+dotprod",
+ ],
+ trace_runner = "//iree/tools:iree-e2e-matmul-test",
+) for lhs_rhs_type in [
+ "i8",
+ "f32",
+] for size in [
+ "small",
+ "large",
+]]
diff --git a/iree/test/e2e/regression/CMakeLists.txt b/iree/test/e2e/regression/CMakeLists.txt
index e54c442..d3aedfd 100644
--- a/iree/test/e2e/regression/CMakeLists.txt
+++ b/iree/test/e2e/regression/CMakeLists.txt
@@ -96,34 +96,6 @@
"-iree-input-type=mhlo"
)
-iree_check_single_backend_test_suite(
- NAME
- check_regression_cuda
- SRCS
- "dynamic_abs.mlir"
- "dynamic_add.mlir"
- "dynamic_dot.mlir"
- "dynamic_reduce_min.mlir"
- "dynamic_torch_index_select_high_rank.mlir"
- "dynamic_torch_index_select_negative.mlir"
- "dynamic_torch_index_select_scalar.mlir"
- "dynamic_torch_index_select_vector.mlir"
- "linalg_ext_ops.mlir"
- "linalg_ops.mlir"
- TARGET_BACKEND
- "cuda"
- DRIVER
- "cuda"
- COMPILER_FLAGS
- "-iree-input-type=mhlo"
- LABELS
- "noasan"
- "nomsan"
- "notsan"
- "noubsan"
- "requires-gpu-nvidia"
-)
-
iree_generated_trace_runner_test(
NAME
e2e_matmul_direct_i8_small
@@ -248,4 +220,96 @@
"aarch64:+dotprod"
)
+iree_generated_trace_runner_test(
+ NAME
+ e2e_matmul_mmt4d_i8_intrinsics_small
+ GENERATOR
+ "generate_e2e_matmul_tests.py"
+ GENERATOR_ARGS
+ "--lhs_rhs_type=i8"
+ "--shapes=small"
+ TRACE_RUNNER
+ iree_tools_iree-e2e-matmul-test
+ TARGET_BACKENDS
+ "dylib-llvm-aot"
+ DRIVERS
+ "dylib"
+ COMPILER_FLAGS
+ "--iree-codegen-mmt4d-use-intrinsics"
+ OPT_FLAGS
+ "--iree-flow-convert-linalg-matmul-to-mmt4d=M0=8 K0=4 N0=8"
+ TARGET_CPU_FEATURES_VARIANTS
+ "default"
+ "aarch64:+dotprod"
+)
+
+iree_generated_trace_runner_test(
+ NAME
+ e2e_matmul_mmt4d_i8_intrinsics_large
+ GENERATOR
+ "generate_e2e_matmul_tests.py"
+ GENERATOR_ARGS
+ "--lhs_rhs_type=i8"
+ "--shapes=large"
+ TRACE_RUNNER
+ iree_tools_iree-e2e-matmul-test
+ TARGET_BACKENDS
+ "dylib-llvm-aot"
+ DRIVERS
+ "dylib"
+ COMPILER_FLAGS
+ "--iree-codegen-mmt4d-use-intrinsics"
+ OPT_FLAGS
+ "--iree-flow-convert-linalg-matmul-to-mmt4d=M0=8 K0=4 N0=8"
+ TARGET_CPU_FEATURES_VARIANTS
+ "default"
+ "aarch64:+dotprod"
+)
+
+iree_generated_trace_runner_test(
+ NAME
+ e2e_matmul_mmt4d_f32_intrinsics_small
+ GENERATOR
+ "generate_e2e_matmul_tests.py"
+ GENERATOR_ARGS
+ "--lhs_rhs_type=f32"
+ "--shapes=small"
+ TRACE_RUNNER
+ iree_tools_iree-e2e-matmul-test
+ TARGET_BACKENDS
+ "dylib-llvm-aot"
+ DRIVERS
+ "dylib"
+ COMPILER_FLAGS
+ "--iree-codegen-mmt4d-use-intrinsics"
+ OPT_FLAGS
+ "--iree-flow-convert-linalg-matmul-to-mmt4d=M0=8 K0=1 N0=8"
+ TARGET_CPU_FEATURES_VARIANTS
+ "default"
+ "aarch64:+dotprod"
+)
+
+iree_generated_trace_runner_test(
+ NAME
+ e2e_matmul_mmt4d_f32_intrinsics_large
+ GENERATOR
+ "generate_e2e_matmul_tests.py"
+ GENERATOR_ARGS
+ "--lhs_rhs_type=f32"
+ "--shapes=large"
+ TRACE_RUNNER
+ iree_tools_iree-e2e-matmul-test
+ TARGET_BACKENDS
+ "dylib-llvm-aot"
+ DRIVERS
+ "dylib"
+ COMPILER_FLAGS
+ "--iree-codegen-mmt4d-use-intrinsics"
+ OPT_FLAGS
+ "--iree-flow-convert-linalg-matmul-to-mmt4d=M0=8 K0=1 N0=8"
+ TARGET_CPU_FEATURES_VARIANTS
+ "default"
+ "aarch64:+dotprod"
+)
+
### BAZEL_TO_CMAKE_PRESERVES_ALL_CONTENT_BELOW_THIS_LINE ###
diff --git a/iree/test/e2e/regression/generate_e2e_matmul_tests.py b/iree/test/e2e/regression/generate_e2e_matmul_tests.py
index caffcdf..605c588 100644
--- a/iree/test/e2e/regression/generate_e2e_matmul_tests.py
+++ b/iree/test/e2e/regression/generate_e2e_matmul_tests.py
@@ -91,6 +91,8 @@
TestShape(m=2, k=3, n=4),
#TestShape(m=8, k=7, n=6),
#TestShape(m=15, k=16, n=17),
+ # Exactly the mmt4d kernel size
+ TestShape(m=8, k=4, n=8),
TestShape(m=14, k=19, n=23),
#TestShape(m=31, k=33, n=32),
TestShape(m=25, k=41, n=35),
diff --git a/iree/test/e2e/regression/lowering_config.mlir b/iree/test/e2e/regression/lowering_config.mlir
index ab4857d..2e2adab 100644
--- a/iree/test/e2e/regression/lowering_config.mlir
+++ b/iree/test/e2e/regression/lowering_config.mlir
@@ -1,10 +1,10 @@
#compilation0 = #iree_codegen.compilation.info<
- #iree_codegen.lowering.config<tile_sizes = [[], [32, 32, 32], [4, 4, 4]], native_vector_size = [4, 4, 4]>,
- #iree_codegen.translation.info<"CPUTensorToVectors", workload_per_wg = [32, 32]>,
+ #iree_codegen.lowering.config<tile_sizes = [[], [8, 8, 8]], native_vector_size = [8, 8, 8]>,
+ #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [32, 32]>,
workgroup_size = []>
#compilation1 = #iree_codegen.compilation.info<
- #iree_codegen.lowering.config<tile_sizes = [[], [64, 64, 64], [16, 16, 16]], native_vector_size = [16, 16, 16]>,
- #iree_codegen.translation.info<"CPUTensorToVectors", workload_per_wg = [64, 64]>,
+ #iree_codegen.lowering.config<tile_sizes = [[], [4, 4, 4]], native_vector_size = [4, 4, 4]>,
+ #iree_codegen.translation.info<"CPUDoubleTilingExpert", workload_per_wg = [64, 64]>,
workgroup_size = []>
func @lowering_config_test() {
%a = util.unfoldable_constant dense<1.0> : tensor<128x256xf32>
diff --git a/iree/test/e2e/tensor_ops/BUILD b/iree/test/e2e/tensor_ops/BUILD
index 1c694b2..ba43404 100644
--- a/iree/test/e2e/tensor_ops/BUILD
+++ b/iree/test/e2e/tensor_ops/BUILD
@@ -51,29 +51,30 @@
target_backend = "dylib-llvm-aot",
)
-iree_check_single_backend_test_suite(
- name = "check_cuda",
- srcs = enforce_glob(
- # keep sorted
- [
- "extract_slice.mlir",
- "tensor_insert_slice.mlir",
- ],
- include = ["*.mlir"],
- exclude = [
- "tensor_cast.mlir",
- ],
- ),
- driver = "cuda",
- tags = [
- "noasan",
- "nomsan",
- "notsan",
- "noubsan",
- "requires-gpu-nvidia",
- ],
- target_backend = "cuda",
-)
+# Temporarily disabled pending build system changes.
+# iree_check_single_backend_test_suite(
+# name = "check_cuda",
+# srcs = enforce_glob(
+# # keep sorted
+# [
+# "extract_slice.mlir",
+# "tensor_insert_slice.mlir",
+# ],
+# include = ["*.mlir"],
+# exclude = [
+# "tensor_cast.mlir",
+# ],
+# ),
+# driver = "cuda",
+# tags = [
+# "noasan",
+# "nomsan",
+# "notsan",
+# "noubsan",
+# "requires-gpu-nvidia",
+# ],
+# target_backend = "cuda",
+# )
iree_check_single_backend_test_suite(
name = "check_vulkan-spirv_vulkan",
diff --git a/iree/test/e2e/tensor_ops/CMakeLists.txt b/iree/test/e2e/tensor_ops/CMakeLists.txt
index 2acc931..279a5c4 100644
--- a/iree/test/e2e/tensor_ops/CMakeLists.txt
+++ b/iree/test/e2e/tensor_ops/CMakeLists.txt
@@ -38,24 +38,6 @@
iree_check_single_backend_test_suite(
NAME
- check_cuda
- SRCS
- "extract_slice.mlir"
- "tensor_insert_slice.mlir"
- TARGET_BACKEND
- "cuda"
- DRIVER
- "cuda"
- LABELS
- "noasan"
- "nomsan"
- "notsan"
- "noubsan"
- "requires-gpu-nvidia"
-)
-
-iree_check_single_backend_test_suite(
- NAME
check_vulkan-spirv_vulkan
SRCS
"extract_slice.mlir"
diff --git a/iree/test/e2e/xla_ops/BUILD b/iree/test/e2e/xla_ops/BUILD
index 6a2c909..50f0354 100644
--- a/iree/test/e2e/xla_ops/BUILD
+++ b/iree/test/e2e/xla_ops/BUILD
@@ -19,166 +19,167 @@
licenses = ["notice"], # Apache 2.0
)
-iree_check_single_backend_test_suite(
- name = "check_cuda_graph",
- srcs = enforce_glob(
- # keep sorted
- [
- "abs.mlir",
- "add.mlir",
- "batch_norm_inference.mlir",
- "bitcast_convert.mlir",
- "broadcast.mlir",
- "broadcast_add.mlir",
- "broadcast_in_dim.mlir",
- "clamp.mlir",
- "compare.mlir",
- "concatenate.mlir",
- "constant.mlir",
- "convert.mlir",
- "convolution.mlir",
- "cosine.mlir",
- "divide.mlir",
- "dot.mlir",
- "dot_general.mlir",
- "dynamic_slice.mlir",
- "dynamic_update_slice.mlir",
- "exponential.mlir",
- "exponential_fp16.mlir",
- "exponential_minus_one.mlir",
- "fft.mlir",
- "finite.mlir",
- "floor.mlir",
- "gather.mlir",
- "iota.mlir",
- "log.mlir",
- "log_plus_one.mlir",
- "maximum.mlir",
- "minimum.mlir",
- "multiply.mlir",
- "negate.mlir",
- "pad.mlir",
- "pow.mlir",
- "reduce.mlir",
- "reduce_window.mlir",
- "remainder.mlir",
- "reshape.mlir",
- "reverse.mlir",
- "rng_normal.mlir",
- "rng_uniform.mlir",
- "rsqrt.mlir",
- "scatter.mlir",
- "scatter_dynamic.mlir",
- "select.mlir",
- "sine.mlir",
- "slice.mlir",
- "sort.mlir",
- "sqrt.mlir",
- "subtract.mlir",
- "tanh.mlir",
- "torch_index_select.mlir",
- "transpose.mlir",
- "while.mlir",
- ],
- include = ["*.mlir"],
- exclude = [
- "round.mlir",
- ],
- ),
- compiler_flags = ["-iree-input-type=mhlo"],
- driver = "cuda",
- runner_args = ["--cuda_use_streams=false"],
- tags = [
- # CUDA cuInit fails with sanitizer on.
- "noasan",
- "nomsan",
- "notsan",
- "noubsan",
- "requires-gpu-nvidia",
- ],
- target_backend = "cuda",
-)
+# Temporarily disabled pending build system changes.
+# iree_check_single_backend_test_suite(
+# name = "check_cuda_graph",
+# srcs = enforce_glob(
+# # keep sorted
+# [
+# "abs.mlir",
+# "add.mlir",
+# "batch_norm_inference.mlir",
+# "bitcast_convert.mlir",
+# "broadcast.mlir",
+# "broadcast_add.mlir",
+# "broadcast_in_dim.mlir",
+# "clamp.mlir",
+# "compare.mlir",
+# "concatenate.mlir",
+# "constant.mlir",
+# "convert.mlir",
+# "convolution.mlir",
+# "cosine.mlir",
+# "divide.mlir",
+# "dot.mlir",
+# "dot_general.mlir",
+# "dynamic_slice.mlir",
+# "dynamic_update_slice.mlir",
+# "exponential.mlir",
+# "exponential_fp16.mlir",
+# "exponential_minus_one.mlir",
+# "fft.mlir",
+# "finite.mlir",
+# "floor.mlir",
+# "gather.mlir",
+# "iota.mlir",
+# "log.mlir",
+# "log_plus_one.mlir",
+# "maximum.mlir",
+# "minimum.mlir",
+# "multiply.mlir",
+# "negate.mlir",
+# "pad.mlir",
+# "pow.mlir",
+# "reduce.mlir",
+# "reduce_window.mlir",
+# "remainder.mlir",
+# "reshape.mlir",
+# "reverse.mlir",
+# "rng_normal.mlir",
+# "rng_uniform.mlir",
+# "rsqrt.mlir",
+# "scatter.mlir",
+# "scatter_dynamic.mlir",
+# "select.mlir",
+# "sine.mlir",
+# "slice.mlir",
+# "sort.mlir",
+# "sqrt.mlir",
+# "subtract.mlir",
+# "tanh.mlir",
+# "torch_index_select.mlir",
+# "transpose.mlir",
+# "while.mlir",
+# ],
+# include = ["*.mlir"],
+# exclude = [
+# "round.mlir",
+# ],
+# ),
+# compiler_flags = ["-iree-input-type=mhlo"],
+# driver = "cuda",
+# runner_args = ["--cuda_use_streams=false"],
+# tags = [
+# # CUDA cuInit fails with sanitizer on.
+# "noasan",
+# "nomsan",
+# "notsan",
+# "noubsan",
+# "requires-gpu-nvidia",
+# ],
+# target_backend = "cuda",
+# )
-# Run cuda tests using stream command buffer
-iree_check_single_backend_test_suite(
- name = "check_cuda_streams",
- srcs = enforce_glob(
- # keep sorted
- [
- "abs.mlir",
- "add.mlir",
- "batch_norm_inference.mlir",
- "bitcast_convert.mlir",
- "broadcast.mlir",
- "broadcast_add.mlir",
- "broadcast_in_dim.mlir",
- "clamp.mlir",
- "compare.mlir",
- "concatenate.mlir",
- "constant.mlir",
- "convert.mlir",
- "convolution.mlir",
- "cosine.mlir",
- "divide.mlir",
- "dot.mlir",
- "dot_general.mlir",
- "dynamic_slice.mlir",
- "dynamic_update_slice.mlir",
- "exponential.mlir",
- "exponential_fp16.mlir",
- "exponential_minus_one.mlir",
- "fft.mlir",
- "finite.mlir",
- "floor.mlir",
- "gather.mlir",
- "iota.mlir",
- "log.mlir",
- "log_plus_one.mlir",
- "maximum.mlir",
- "minimum.mlir",
- "multiply.mlir",
- "negate.mlir",
- "pad.mlir",
- "pow.mlir",
- "reduce.mlir",
- "reduce_window.mlir",
- "remainder.mlir",
- "reshape.mlir",
- "reverse.mlir",
- "rng_normal.mlir",
- "rng_uniform.mlir",
- "rsqrt.mlir",
- "scatter.mlir",
- "scatter_dynamic.mlir",
- "select.mlir",
- "sine.mlir",
- "slice.mlir",
- "sort.mlir",
- "sqrt.mlir",
- "subtract.mlir",
- "tanh.mlir",
- "torch_index_select.mlir",
- "transpose.mlir",
- "while.mlir",
- ],
- include = ["*.mlir"],
- exclude = [
- "round.mlir",
- ],
- ),
- compiler_flags = ["-iree-input-type=mhlo"],
- driver = "cuda",
- runner_args = ["--cuda_use_streams=true"],
- tags = [
- # CUDA cuInit fails with sanitizer on.
- "noasan",
- "nomsan",
- "notsan",
- "noubsan",
- "requires-gpu-nvidia",
- ],
- target_backend = "cuda",
-)
+# # Run cuda tests using stream command buffer
+# iree_check_single_backend_test_suite(
+# name = "check_cuda_streams",
+# srcs = enforce_glob(
+# # keep sorted
+# [
+# "abs.mlir",
+# "add.mlir",
+# "batch_norm_inference.mlir",
+# "bitcast_convert.mlir",
+# "broadcast.mlir",
+# "broadcast_add.mlir",
+# "broadcast_in_dim.mlir",
+# "clamp.mlir",
+# "compare.mlir",
+# "concatenate.mlir",
+# "constant.mlir",
+# "convert.mlir",
+# "convolution.mlir",
+# "cosine.mlir",
+# "divide.mlir",
+# "dot.mlir",
+# "dot_general.mlir",
+# "dynamic_slice.mlir",
+# "dynamic_update_slice.mlir",
+# "exponential.mlir",
+# "exponential_fp16.mlir",
+# "exponential_minus_one.mlir",
+# "fft.mlir",
+# "finite.mlir",
+# "floor.mlir",
+# "gather.mlir",
+# "iota.mlir",
+# "log.mlir",
+# "log_plus_one.mlir",
+# "maximum.mlir",
+# "minimum.mlir",
+# "multiply.mlir",
+# "negate.mlir",
+# "pad.mlir",
+# "pow.mlir",
+# "reduce.mlir",
+# "reduce_window.mlir",
+# "remainder.mlir",
+# "reshape.mlir",
+# "reverse.mlir",
+# "rng_normal.mlir",
+# "rng_uniform.mlir",
+# "rsqrt.mlir",
+# "scatter.mlir",
+# "scatter_dynamic.mlir",
+# "select.mlir",
+# "sine.mlir",
+# "slice.mlir",
+# "sort.mlir",
+# "sqrt.mlir",
+# "subtract.mlir",
+# "tanh.mlir",
+# "torch_index_select.mlir",
+# "transpose.mlir",
+# "while.mlir",
+# ],
+# include = ["*.mlir"],
+# exclude = [
+# "round.mlir",
+# ],
+# ),
+# compiler_flags = ["-iree-input-type=mhlo"],
+# driver = "cuda",
+# runner_args = ["--cuda_use_streams=true"],
+# tags = [
+# # CUDA cuInit fails with sanitizer on.
+# "noasan",
+# "nomsan",
+# "notsan",
+# "noubsan",
+# "requires-gpu-nvidia",
+# ],
+# target_backend = "cuda",
+# )
iree_check_single_backend_test_suite(
name = "check_dylib-llvm-aot_dylib",
diff --git a/iree/test/e2e/xla_ops/CMakeLists.txt b/iree/test/e2e/xla_ops/CMakeLists.txt
index 05236c9..0b0086e 100644
--- a/iree/test/e2e/xla_ops/CMakeLists.txt
+++ b/iree/test/e2e/xla_ops/CMakeLists.txt
@@ -12,156 +12,6 @@
iree_check_single_backend_test_suite(
NAME
- check_cuda_graph
- SRCS
- "abs.mlir"
- "add.mlir"
- "batch_norm_inference.mlir"
- "bitcast_convert.mlir"
- "broadcast.mlir"
- "broadcast_add.mlir"
- "broadcast_in_dim.mlir"
- "clamp.mlir"
- "compare.mlir"
- "concatenate.mlir"
- "constant.mlir"
- "convert.mlir"
- "convolution.mlir"
- "cosine.mlir"
- "divide.mlir"
- "dot.mlir"
- "dot_general.mlir"
- "dynamic_slice.mlir"
- "dynamic_update_slice.mlir"
- "exponential.mlir"
- "exponential_fp16.mlir"
- "exponential_minus_one.mlir"
- "fft.mlir"
- "finite.mlir"
- "floor.mlir"
- "gather.mlir"
- "iota.mlir"
- "log.mlir"
- "log_plus_one.mlir"
- "maximum.mlir"
- "minimum.mlir"
- "multiply.mlir"
- "negate.mlir"
- "pad.mlir"
- "pow.mlir"
- "reduce.mlir"
- "reduce_window.mlir"
- "remainder.mlir"
- "reshape.mlir"
- "reverse.mlir"
- "rng_normal.mlir"
- "rng_uniform.mlir"
- "rsqrt.mlir"
- "scatter.mlir"
- "scatter_dynamic.mlir"
- "select.mlir"
- "sine.mlir"
- "slice.mlir"
- "sort.mlir"
- "sqrt.mlir"
- "subtract.mlir"
- "tanh.mlir"
- "torch_index_select.mlir"
- "transpose.mlir"
- "while.mlir"
- TARGET_BACKEND
- "cuda"
- DRIVER
- "cuda"
- COMPILER_FLAGS
- "-iree-input-type=mhlo"
- RUNNER_ARGS
- "--cuda_use_streams=false"
- LABELS
- "noasan"
- "nomsan"
- "notsan"
- "noubsan"
- "requires-gpu-nvidia"
-)
-
-iree_check_single_backend_test_suite(
- NAME
- check_cuda_streams
- SRCS
- "abs.mlir"
- "add.mlir"
- "batch_norm_inference.mlir"
- "bitcast_convert.mlir"
- "broadcast.mlir"
- "broadcast_add.mlir"
- "broadcast_in_dim.mlir"
- "clamp.mlir"
- "compare.mlir"
- "concatenate.mlir"
- "constant.mlir"
- "convert.mlir"
- "convolution.mlir"
- "cosine.mlir"
- "divide.mlir"
- "dot.mlir"
- "dot_general.mlir"
- "dynamic_slice.mlir"
- "dynamic_update_slice.mlir"
- "exponential.mlir"
- "exponential_fp16.mlir"
- "exponential_minus_one.mlir"
- "fft.mlir"
- "finite.mlir"
- "floor.mlir"
- "gather.mlir"
- "iota.mlir"
- "log.mlir"
- "log_plus_one.mlir"
- "maximum.mlir"
- "minimum.mlir"
- "multiply.mlir"
- "negate.mlir"
- "pad.mlir"
- "pow.mlir"
- "reduce.mlir"
- "reduce_window.mlir"
- "remainder.mlir"
- "reshape.mlir"
- "reverse.mlir"
- "rng_normal.mlir"
- "rng_uniform.mlir"
- "rsqrt.mlir"
- "scatter.mlir"
- "scatter_dynamic.mlir"
- "select.mlir"
- "sine.mlir"
- "slice.mlir"
- "sort.mlir"
- "sqrt.mlir"
- "subtract.mlir"
- "tanh.mlir"
- "torch_index_select.mlir"
- "transpose.mlir"
- "while.mlir"
- TARGET_BACKEND
- "cuda"
- DRIVER
- "cuda"
- COMPILER_FLAGS
- "-iree-input-type=mhlo"
- RUNNER_ARGS
- "--cuda_use_streams=true"
- LABELS
- "noasan"
- "nomsan"
- "notsan"
- "noubsan"
- "requires-gpu-nvidia"
-)
-
-iree_check_single_backend_test_suite(
- NAME
check_dylib-llvm-aot_dylib
SRCS
"abs.mlir"
diff --git a/iree/tools/BUILD b/iree/tools/BUILD
index 4de58cd..bba42ff 100644
--- a/iree/tools/BUILD
+++ b/iree/tools/BUILD
@@ -135,6 +135,8 @@
deps = [
"@llvm-project//mlir:Affine",
"@llvm-project//mlir:AffineTransforms",
+ "@llvm-project//mlir:ArmNeon",
+ "@llvm-project//mlir:ArmNeon2dToIntr",
"@llvm-project//mlir:BufferizationDialect",
"@llvm-project//mlir:ConversionPasses",
"@llvm-project//mlir:GPUDialect",
@@ -193,7 +195,7 @@
srcs = ["init_targets.cc"],
hdrs = ["init_targets.h"],
local_defines = [
- "IREE_HAVE_CUDA_TARGET",
+ # "IREE_HAVE_CUDA_TARGET",
"IREE_HAVE_LLVMAOT_TARGET",
"IREE_HAVE_METALSPIRV_TARGET",
"IREE_HAVE_ROCM_TARGET",
@@ -201,7 +203,7 @@
"IREE_HAVE_VULKANSPIRV_TARGET",
],
deps = [
- "//iree/compiler/Dialect/HAL/Target/CUDA",
+ # "//iree/compiler/Dialect/HAL/Target/CUDA",
"//iree/compiler/Dialect/HAL/Target/LLVM",
"//iree/compiler/Dialect/HAL/Target/MetalSPIRV",
"//iree/compiler/Dialect/HAL/Target/ROCM",
@@ -389,6 +391,7 @@
"//iree/compiler/Translation:HALExecutable",
"//iree/compiler/Translation:IREEVM",
"@llvm-project//llvm:Support",
+ "@llvm-project//mlir:ArmNeonToLLVMIRTranslation",
"@llvm-project//mlir:IR",
"@llvm-project//mlir:LLVMToLLVMIRTranslation",
"@llvm-project//mlir:Pass",
diff --git a/iree/tools/CMakeLists.txt b/iree/tools/CMakeLists.txt
index eca2269..3c6caff 100644
--- a/iree/tools/CMakeLists.txt
+++ b/iree/tools/CMakeLists.txt
@@ -39,10 +39,10 @@
list(APPEND IREE_COMPILER_TARGETS iree::compiler::Dialect::HAL::Target::WebGPU)
list(APPEND IREE_COMPILER_TARGET_COPTS "-DIREE_HAVE_WEBGPU_TARGET")
endif()
-if("${IREE_TARGET_BACKEND_CUDA}")
- list(APPEND IREE_COMPILER_TARGETS iree::compiler::Dialect::HAL::Target::CUDA)
- list(APPEND IREE_COMPILER_TARGET_COPTS "-DIREE_HAVE_CUDA_TARGET")
-endif()
+# if("${IREE_TARGET_BACKEND_CUDA}")
+# list(APPEND IREE_COMPILER_TARGETS iree::compiler::Dialect::HAL::Target::CUDA)
+# list(APPEND IREE_COMPILER_TARGET_COPTS "-DIREE_HAVE_CUDA_TARGET")
+# endif()
if("${IREE_TARGET_BACKEND_ROCM}")
list(APPEND IREE_COMPILER_TARGETS iree::compiler::Dialect::HAL::Target::ROCM)
list(APPEND IREE_COMPILER_TARGET_COPTS "-DIREE_HAVE_ROCM_TARGET")
@@ -275,6 +275,8 @@
MLIRTosa
MLIRTosaTransforms
MLIRTransforms
+ MLIRArmNeon
+ MLIRArmNeon2dToIntr
MLIRVector
PUBLIC
)
@@ -373,6 +375,7 @@
::init_xla_dialects
LLVMSupport
MLIRIR
+ MLIRArmNeonToLLVMIRTranslation
MLIRLLVMToLLVMIRTranslation
MLIRSCFTransforms
MLIRPass
diff --git a/iree/tools/init_mlir_dialects.h b/iree/tools/init_mlir_dialects.h
index af7a9cb..4d9ec16 100644
--- a/iree/tools/init_mlir_dialects.h
+++ b/iree/tools/init_mlir_dialects.h
@@ -13,6 +13,7 @@
#define IREE_TOOLS_INIT_MLIR_DIALECTS_H_
#include "mlir/Dialect/Affine/IR/AffineOps.h"
+#include "mlir/Dialect/ArmNeon/ArmNeonDialect.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
@@ -49,6 +50,7 @@
scf::SCFDialect,
quant::QuantizationDialect,
spirv::SPIRVDialect,
+ arm_neon::ArmNeonDialect,
StandardOpsDialect,
mlir::arith::ArithmeticDialect,
vector::VectorDialect,
diff --git a/iree/tools/init_mlir_passes.h b/iree/tools/init_mlir_passes.h
index 4d6c5f3..5635714 100644
--- a/iree/tools/init_mlir_passes.h
+++ b/iree/tools/init_mlir_passes.h
@@ -62,6 +62,9 @@
// Linalg
registerLinalgPasses();
+ // LLVM
+ registerConvertArmNeon2dToIntrPass();
+
// MemRef
memref::registerMemRefPasses();
diff --git a/iree/tools/iree_translate_lib.cc b/iree/tools/iree_translate_lib.cc
index f12725f..0fe1b8e 100644
--- a/iree/tools/iree_translate_lib.cc
+++ b/iree/tools/iree_translate_lib.cc
@@ -36,6 +36,7 @@
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/Timing.h"
#include "mlir/Support/ToolUtilities.h"
+#include "mlir/Target/LLVMIR/Dialect/ArmNeon/ArmNeonToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h"
#include "mlir/Translation.h"
@@ -44,6 +45,8 @@
mlir::DialectRegistry registry;
mlir::registerMlirDialects(registry);
mlir::registerLLVMDialectTranslation(registry);
+ // TODO: Make this conditional?
+ mlir::registerArmNeonDialectTranslation(registry);
mlir::registerXLADialects(registry);
mlir::iree_compiler::registerAllPasses();
mlir::iree_compiler::registerIreeDialects(registry);
diff --git a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/ConvertToLoops.cpp b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/ConvertToLoops.cpp
index 985c004..4f9f8c5 100644
--- a/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/ConvertToLoops.cpp
+++ b/llvm-external-projects/iree-dialects/lib/Dialect/LinalgExt/Transforms/ConvertToLoops.cpp
@@ -99,7 +99,7 @@
void runOnOperation() override {
MLIRContext *context = &getContext();
- OwningRewritePatternList patterns(context);
+ RewritePatternSet patterns(context);
patterns.insert<TiledOpInterfaceLowerToLoopsPattern>(context);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
diff --git a/third_party/cuda/LICENSE b/third_party/cuda/LICENSE
deleted file mode 100644
index 093a0ac..0000000
--- a/third_party/cuda/LICENSE
+++ /dev/null
@@ -1,41 +0,0 @@
-Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
-NOTICE TO LICENSEE:
-This source code and/or documentation ("Licensed Deliverables") are
-subject to NVIDIA intellectual property rights under U.S. and
-international Copyright laws.
-These Licensed Deliverables contained herein is PROPRIETARY and
-CONFIDENTIAL to NVIDIA and is being provided under the terms and
-conditions of a form of NVIDIA software license agreement by and
-between NVIDIA and Licensee ("License Agreement") or electronically
-accepted by Licensee. Notwithstanding any terms or conditions to
-the contrary in the License Agreement, reproduction or disclosure
-of the Licensed Deliverables to any third party without the express
-written consent of NVIDIA is prohibited.
-NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
-LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
-SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
-PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
-NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
-DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
-NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
-NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
-LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
-SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
-DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-OF THESE LICENSED DELIVERABLES.
-U.S. Government End Users. These Licensed Deliverables are a
-"commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
-1995), consisting of "commercial computer software" and "commercial
-computer software documentation" as such terms are used in 48
-C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
-only as a commercial end item. Consistent with 48 C.F.R.12.212 and
-48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
-U.S. Government End Users acquire the Licensed Deliverables with
-only those rights set forth herein.
-Any use of the Licensed Deliverables in individual and commercial
-software must include, in the user documentation and internal
-comments to the code, the above Disclaimer and U.S. Government End
-Users Notice.
-
diff --git a/third_party/cuda/README.txt b/third_party/cuda/README.txt
deleted file mode 100644
index be1490c..0000000
--- a/third_party/cuda/README.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-This folder contains a subset of CUDA SDK headers needed to build IREE.
-It also contains libdevice.10.bc llvm moddule used to import __nv* function
-during cuda kernel compilation.
\ No newline at end of file
diff --git a/third_party/cuda/UPDATING.md b/third_party/cuda/UPDATING.md
deleted file mode 100644
index 39f9331..0000000
--- a/third_party/cuda/UPDATING.md
+++ /dev/null
@@ -1,13 +0,0 @@
-Those headers come from CUDA SDK.
-
-To update, install CUDA SDK locally:
-```
-sudo apt-get install cuda
-```
-
-Copy cuda.h, version.txt and libdevice.10.bc:
-```
-cp /usr/local/cuda/include/cuda.h ./include/
-cp /usr/local/cuda/version.txt .
-cp /usr/local/cuda/nvvm/libdevice/libdevice.10.bc ./nvvm/libdevice/
-```
diff --git a/third_party/cuda/include/cuda.h b/third_party/cuda/include/cuda.h
deleted file mode 100644
index 456fe0c..0000000
--- a/third_party/cuda/include/cuda.h
+++ /dev/null
@@ -1,15925 +0,0 @@
-/*
- * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
- *
- * NOTICE TO LICENSEE:
- *
- * This source code and/or documentation ("Licensed Deliverables") are
- * subject to NVIDIA intellectual property rights under U.S. and
- * international Copyright laws.
- *
- * These Licensed Deliverables contained herein is PROPRIETARY and
- * CONFIDENTIAL to NVIDIA and is being provided under the terms and
- * conditions of a form of NVIDIA software license agreement by and
- * between NVIDIA and Licensee ("License Agreement") or electronically
- * accepted by Licensee. Notwithstanding any terms or conditions to
- * the contrary in the License Agreement, reproduction or disclosure
- * of the Licensed Deliverables to any third party without the express
- * written consent of NVIDIA is prohibited.
- *
- * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
- * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
- * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
- * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
- * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
- * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
- * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
- * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
- * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
- * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
- * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
- * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
- * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THESE LICENSED DELIVERABLES.
- *
- * U.S. Government End Users. These Licensed Deliverables are a
- * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
- * 1995), consisting of "commercial computer software" and "commercial
- * computer software documentation" as such terms are used in 48
- * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
- * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
- * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
- * U.S. Government End Users acquire the Licensed Deliverables with
- * only those rights set forth herein.
- *
- * Any use of the Licensed Deliverables in individual and commercial
- * software must include, in the user documentation and internal
- * comments to the code, the above Disclaimer and U.S. Government End
- * Users Notice.
- */
-
-#ifndef __cuda_cuda_h__
-#define __cuda_cuda_h__
-
-#include <stdlib.h>
-#ifdef _MSC_VER
-typedef unsigned __int32 cuuint32_t;
-typedef unsigned __int64 cuuint64_t;
-#else
-#include <stdint.h>
-typedef uint32_t cuuint32_t;
-typedef uint64_t cuuint64_t;
-#endif
-
-#if defined(__CUDA_API_VERSION_INTERNAL) || defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED)
-#define __CUDA_DEPRECATED
-#elif defined(_MSC_VER)
-#define __CUDA_DEPRECATED __declspec(deprecated)
-#elif defined(__GNUC__)
-#define __CUDA_DEPRECATED __attribute__((deprecated))
-#else
-#define __CUDA_DEPRECATED
-#endif
-
-#if defined(CUDA_FORCE_API_VERSION)
-#error "CUDA_FORCE_API_VERSION is no longer supported."
-#endif
-
-#if defined(__CUDA_API_VERSION_INTERNAL) || defined(CUDA_API_PER_THREAD_DEFAULT_STREAM)
- #define __CUDA_API_PER_THREAD_DEFAULT_STREAM
- #define __CUDA_API_PTDS(api) api ## _ptds
- #define __CUDA_API_PTSZ(api) api ## _ptsz
-#else
- #define __CUDA_API_PTDS(api) api
- #define __CUDA_API_PTSZ(api) api
-#endif
-
-#define cuDeviceTotalMem cuDeviceTotalMem_v2
-#define cuCtxCreate cuCtxCreate_v2
-#define cuModuleGetGlobal cuModuleGetGlobal_v2
-#define cuMemGetInfo cuMemGetInfo_v2
-#define cuMemAlloc cuMemAlloc_v2
-#define cuMemAllocPitch cuMemAllocPitch_v2
-#define cuMemFree cuMemFree_v2
-#define cuMemGetAddressRange cuMemGetAddressRange_v2
-#define cuMemAllocHost cuMemAllocHost_v2
-#define cuMemHostGetDevicePointer cuMemHostGetDevicePointer_v2
-#define cuMemcpyHtoD __CUDA_API_PTDS(cuMemcpyHtoD_v2)
-#define cuMemcpyDtoH __CUDA_API_PTDS(cuMemcpyDtoH_v2)
-#define cuMemcpyDtoD __CUDA_API_PTDS(cuMemcpyDtoD_v2)
-#define cuMemcpyDtoA __CUDA_API_PTDS(cuMemcpyDtoA_v2)
-#define cuMemcpyAtoD __CUDA_API_PTDS(cuMemcpyAtoD_v2)
-#define cuMemcpyHtoA __CUDA_API_PTDS(cuMemcpyHtoA_v2)
-#define cuMemcpyAtoH __CUDA_API_PTDS(cuMemcpyAtoH_v2)
-#define cuMemcpyAtoA __CUDA_API_PTDS(cuMemcpyAtoA_v2)
-#define cuMemcpyHtoAAsync __CUDA_API_PTSZ(cuMemcpyHtoAAsync_v2)
-#define cuMemcpyAtoHAsync __CUDA_API_PTSZ(cuMemcpyAtoHAsync_v2)
-#define cuMemcpy2D __CUDA_API_PTDS(cuMemcpy2D_v2)
-#define cuMemcpy2DUnaligned __CUDA_API_PTDS(cuMemcpy2DUnaligned_v2)
-#define cuMemcpy3D __CUDA_API_PTDS(cuMemcpy3D_v2)
-#define cuMemcpyHtoDAsync __CUDA_API_PTSZ(cuMemcpyHtoDAsync_v2)
-#define cuMemcpyDtoHAsync __CUDA_API_PTSZ(cuMemcpyDtoHAsync_v2)
-#define cuMemcpyDtoDAsync __CUDA_API_PTSZ(cuMemcpyDtoDAsync_v2)
-#define cuMemcpy2DAsync __CUDA_API_PTSZ(cuMemcpy2DAsync_v2)
-#define cuMemcpy3DAsync __CUDA_API_PTSZ(cuMemcpy3DAsync_v2)
-#define cuMemsetD8 __CUDA_API_PTDS(cuMemsetD8_v2)
-#define cuMemsetD16 __CUDA_API_PTDS(cuMemsetD16_v2)
-#define cuMemsetD32 __CUDA_API_PTDS(cuMemsetD32_v2)
-#define cuMemsetD2D8 __CUDA_API_PTDS(cuMemsetD2D8_v2)
-#define cuMemsetD2D16 __CUDA_API_PTDS(cuMemsetD2D16_v2)
-#define cuMemsetD2D32 __CUDA_API_PTDS(cuMemsetD2D32_v2)
-#define cuArrayCreate cuArrayCreate_v2
-#define cuArrayGetDescriptor cuArrayGetDescriptor_v2
-#define cuArray3DCreate cuArray3DCreate_v2
-#define cuArray3DGetDescriptor cuArray3DGetDescriptor_v2
-#define cuTexRefSetAddress cuTexRefSetAddress_v2
-#define cuTexRefGetAddress cuTexRefGetAddress_v2
-#define cuGraphicsResourceGetMappedPointer cuGraphicsResourceGetMappedPointer_v2
-#define cuCtxDestroy cuCtxDestroy_v2
-#define cuCtxPopCurrent cuCtxPopCurrent_v2
-#define cuCtxPushCurrent cuCtxPushCurrent_v2
-#define cuStreamDestroy cuStreamDestroy_v2
-#define cuEventDestroy cuEventDestroy_v2
-#define cuTexRefSetAddress2D cuTexRefSetAddress2D_v3
-#define cuLinkCreate cuLinkCreate_v2
-#define cuLinkAddData cuLinkAddData_v2
-#define cuLinkAddFile cuLinkAddFile_v2
-#define cuMemHostRegister cuMemHostRegister_v2
-#define cuGraphicsResourceSetMapFlags cuGraphicsResourceSetMapFlags_v2
-#define cuStreamBeginCapture __CUDA_API_PTSZ(cuStreamBeginCapture_v2)
-#define cuDevicePrimaryCtxRelease cuDevicePrimaryCtxRelease_v2
-#define cuDevicePrimaryCtxReset cuDevicePrimaryCtxReset_v2
-#define cuDevicePrimaryCtxSetFlags cuDevicePrimaryCtxSetFlags_v2
-#define cuGraphInstantiate cuGraphInstantiate_v2
-
-#if defined(__CUDA_API_PER_THREAD_DEFAULT_STREAM)
- #define cuMemcpy __CUDA_API_PTDS(cuMemcpy)
- #define cuMemcpyAsync __CUDA_API_PTSZ(cuMemcpyAsync)
- #define cuMemcpyPeer __CUDA_API_PTDS(cuMemcpyPeer)
- #define cuMemcpyPeerAsync __CUDA_API_PTSZ(cuMemcpyPeerAsync)
- #define cuMemcpy3DPeer __CUDA_API_PTDS(cuMemcpy3DPeer)
- #define cuMemcpy3DPeerAsync __CUDA_API_PTSZ(cuMemcpy3DPeerAsync)
- #define cuMemPrefetchAsync __CUDA_API_PTSZ(cuMemPrefetchAsync)
-
- #define cuMemsetD8Async __CUDA_API_PTSZ(cuMemsetD8Async)
- #define cuMemsetD16Async __CUDA_API_PTSZ(cuMemsetD16Async)
- #define cuMemsetD32Async __CUDA_API_PTSZ(cuMemsetD32Async)
- #define cuMemsetD2D8Async __CUDA_API_PTSZ(cuMemsetD2D8Async)
- #define cuMemsetD2D16Async __CUDA_API_PTSZ(cuMemsetD2D16Async)
- #define cuMemsetD2D32Async __CUDA_API_PTSZ(cuMemsetD2D32Async)
-
- #define cuStreamGetPriority __CUDA_API_PTSZ(cuStreamGetPriority)
- #define cuStreamGetFlags __CUDA_API_PTSZ(cuStreamGetFlags)
- #define cuStreamGetCtx __CUDA_API_PTSZ(cuStreamGetCtx)
- #define cuStreamWaitEvent __CUDA_API_PTSZ(cuStreamWaitEvent)
- #define cuStreamEndCapture __CUDA_API_PTSZ(cuStreamEndCapture)
- #define cuStreamIsCapturing __CUDA_API_PTSZ(cuStreamIsCapturing)
- #define cuStreamGetCaptureInfo __CUDA_API_PTSZ(cuStreamGetCaptureInfo)
- #define cuStreamAddCallback __CUDA_API_PTSZ(cuStreamAddCallback)
- #define cuStreamAttachMemAsync __CUDA_API_PTSZ(cuStreamAttachMemAsync)
- #define cuStreamQuery __CUDA_API_PTSZ(cuStreamQuery)
- #define cuStreamSynchronize __CUDA_API_PTSZ(cuStreamSynchronize)
- #define cuEventRecord __CUDA_API_PTSZ(cuEventRecord)
- #define cuLaunchKernel __CUDA_API_PTSZ(cuLaunchKernel)
- #define cuLaunchHostFunc __CUDA_API_PTSZ(cuLaunchHostFunc)
- #define cuGraphicsMapResources __CUDA_API_PTSZ(cuGraphicsMapResources)
- #define cuGraphicsUnmapResources __CUDA_API_PTSZ(cuGraphicsUnmapResources)
-
- #define cuStreamWriteValue32 __CUDA_API_PTSZ(cuStreamWriteValue32)
- #define cuStreamWaitValue32 __CUDA_API_PTSZ(cuStreamWaitValue32)
- #define cuStreamWriteValue64 __CUDA_API_PTSZ(cuStreamWriteValue64)
- #define cuStreamWaitValue64 __CUDA_API_PTSZ(cuStreamWaitValue64)
- #define cuStreamBatchMemOp __CUDA_API_PTSZ(cuStreamBatchMemOp)
-
- #define cuLaunchCooperativeKernel __CUDA_API_PTSZ(cuLaunchCooperativeKernel)
-
- #define cuSignalExternalSemaphoresAsync __CUDA_API_PTSZ(cuSignalExternalSemaphoresAsync)
- #define cuWaitExternalSemaphoresAsync __CUDA_API_PTSZ(cuWaitExternalSemaphoresAsync)
-
- #define cuGraphLaunch __CUDA_API_PTSZ(cuGraphLaunch)
- #define cuStreamCopyAttributes __CUDA_API_PTSZ(cuStreamCopyAttributes)
- #define cuStreamGetAttribute __CUDA_API_PTSZ(cuStreamGetAttribute)
- #define cuStreamSetAttribute __CUDA_API_PTSZ(cuStreamSetAttribute)
-#endif
-
-/**
- * \file cuda.h
- * \brief Header file for the CUDA Toolkit application programming interface.
- *
- * \file cudaGL.h
- * \brief Header file for the OpenGL interoperability functions of the
- * low-level CUDA driver application programming interface.
- *
- * \file cudaD3D9.h
- * \brief Header file for the Direct3D 9 interoperability functions of the
- * low-level CUDA driver application programming interface.
- */
-
-/**
- * \defgroup CUDA_TYPES Data types used by CUDA driver
- * @{
- */
-
-/**
- * CUDA API version number
- */
-#define CUDA_VERSION 11000
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * CUDA device pointer
- * CUdeviceptr is defined as an unsigned integer type whose size matches the size of a pointer on the target platform.
- */
-#if defined(_WIN64) || defined(__LP64__)
-typedef unsigned long long CUdeviceptr;
-#else
-typedef unsigned int CUdeviceptr;
-#endif
-
-typedef int CUdevice; /**< CUDA device */
-typedef struct CUctx_st *CUcontext; /**< CUDA context */
-typedef struct CUmod_st *CUmodule; /**< CUDA module */
-typedef struct CUfunc_st *CUfunction; /**< CUDA function */
-typedef struct CUarray_st *CUarray; /**< CUDA array */
-typedef struct CUmipmappedArray_st *CUmipmappedArray; /**< CUDA mipmapped array */
-typedef struct CUtexref_st *CUtexref; /**< CUDA texture reference */
-typedef struct CUsurfref_st *CUsurfref; /**< CUDA surface reference */
-typedef struct CUevent_st *CUevent; /**< CUDA event */
-typedef struct CUstream_st *CUstream; /**< CUDA stream */
-typedef struct CUgraphicsResource_st *CUgraphicsResource; /**< CUDA graphics interop resource */
-typedef unsigned long long CUtexObject; /**< An opaque value that represents a CUDA texture object */
-typedef unsigned long long CUsurfObject; /**< An opaque value that represents a CUDA surface object */
-typedef struct CUextMemory_st *CUexternalMemory; /**< CUDA external memory */
-typedef struct CUextSemaphore_st *CUexternalSemaphore; /**< CUDA external semaphore */
-typedef struct CUgraph_st *CUgraph; /**< CUDA graph */
-typedef struct CUgraphNode_st *CUgraphNode; /**< CUDA graph node */
-typedef struct CUgraphExec_st *CUgraphExec; /**< CUDA executable graph */
-
-#ifndef CU_UUID_HAS_BEEN_DEFINED
-#define CU_UUID_HAS_BEEN_DEFINED
-typedef struct CUuuid_st { /**< CUDA definition of UUID */
- char bytes[16];
-} CUuuid;
-#endif
-
-/**
- * CUDA IPC handle size
- */
-#define CU_IPC_HANDLE_SIZE 64
-
-/**
- * CUDA IPC event handle
- */
-typedef struct CUipcEventHandle_st {
- char reserved[CU_IPC_HANDLE_SIZE];
-} CUipcEventHandle;
-
-/**
- * CUDA IPC mem handle
- */
-typedef struct CUipcMemHandle_st {
- char reserved[CU_IPC_HANDLE_SIZE];
-} CUipcMemHandle;
-
-/**
- * CUDA Ipc Mem Flags
- */
-typedef enum CUipcMem_flags_enum {
- CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS = 0x1 /**< Automatically enable peer access between remote devices as needed */
-} CUipcMem_flags;
-
-
-/**
- * CUDA Mem Attach Flags
- */
-typedef enum CUmemAttach_flags_enum {
- CU_MEM_ATTACH_GLOBAL = 0x1, /**< Memory can be accessed by any stream on any device */
- CU_MEM_ATTACH_HOST = 0x2, /**< Memory cannot be accessed by any stream on any device */
- CU_MEM_ATTACH_SINGLE = 0x4 /**< Memory can only be accessed by a single stream on the associated device */
-} CUmemAttach_flags;
-
-/**
- * Context creation flags
- */
-typedef enum CUctx_flags_enum {
- CU_CTX_SCHED_AUTO = 0x00, /**< Automatic scheduling */
- CU_CTX_SCHED_SPIN = 0x01, /**< Set spin as default scheduling */
- CU_CTX_SCHED_YIELD = 0x02, /**< Set yield as default scheduling */
- CU_CTX_SCHED_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling */
- CU_CTX_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling
- * \deprecated This flag was deprecated as of CUDA 4.0
- * and was replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. */
- CU_CTX_SCHED_MASK = 0x07,
- CU_CTX_MAP_HOST = 0x08, /**< Support mapped pinned allocations */
- CU_CTX_LMEM_RESIZE_TO_MAX = 0x10, /**< Keep local memory allocation after launch */
- CU_CTX_FLAGS_MASK = 0x1f
-} CUctx_flags;
-
-/**
- * Stream creation flags
- */
-typedef enum CUstream_flags_enum {
- CU_STREAM_DEFAULT = 0x0, /**< Default stream flag */
- CU_STREAM_NON_BLOCKING = 0x1 /**< Stream does not synchronize with stream 0 (the NULL stream) */
-} CUstream_flags;
-
-/**
- * Legacy stream handle
- *
- * Stream handle that can be passed as a CUstream to use an implicit stream
- * with legacy synchronization behavior.
- *
- * See details of the \link_sync_behavior
- */
-#define CU_STREAM_LEGACY ((CUstream)0x1)
-
-/**
- * Per-thread stream handle
- *
- * Stream handle that can be passed as a CUstream to use an implicit stream
- * with per-thread synchronization behavior.
- *
- * See details of the \link_sync_behavior
- */
-#define CU_STREAM_PER_THREAD ((CUstream)0x2)
-
-/**
- * Event creation flags
- */
-typedef enum CUevent_flags_enum {
- CU_EVENT_DEFAULT = 0x0, /**< Default event flag */
- CU_EVENT_BLOCKING_SYNC = 0x1, /**< Event uses blocking synchronization */
- CU_EVENT_DISABLE_TIMING = 0x2, /**< Event will not record timing data */
- CU_EVENT_INTERPROCESS = 0x4 /**< Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set */
-} CUevent_flags;
-
-/**
- * Flags for ::cuStreamWaitValue32 and ::cuStreamWaitValue64
- */
-typedef enum CUstreamWaitValue_flags_enum {
- CU_STREAM_WAIT_VALUE_GEQ = 0x0, /**< Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit
- values). Note this is a cyclic comparison which ignores wraparound.
- (Default behavior.) */
- CU_STREAM_WAIT_VALUE_EQ = 0x1, /**< Wait until *addr == value. */
- CU_STREAM_WAIT_VALUE_AND = 0x2, /**< Wait until (*addr & value) != 0. */
- CU_STREAM_WAIT_VALUE_NOR = 0x3, /**< Wait until ~(*addr | value) != 0. Support for this operation can be
- queried with ::cuDeviceGetAttribute() and
- ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR.*/
- CU_STREAM_WAIT_VALUE_FLUSH = 1<<30 /**< Follow the wait operation with a flush of outstanding remote writes. This
- means that, if a remote write operation is guaranteed to have reached the
- device before the wait can be satisfied, that write is guaranteed to be
- visible to downstream device work. The device is permitted to reorder
- remote writes internally. For example, this flag would be required if
- two remote writes arrive in a defined order, the wait is satisfied by the
- second write, and downstream work needs to observe the first write.
- Support for this operation is restricted to selected platforms and can be
- queried with ::CU_DEVICE_ATTRIBUTE_CAN_USE_WAIT_VALUE_FLUSH.*/
-} CUstreamWaitValue_flags;
-
-/**
- * Flags for ::cuStreamWriteValue32
- */
-typedef enum CUstreamWriteValue_flags_enum {
- CU_STREAM_WRITE_VALUE_DEFAULT = 0x0, /**< Default behavior */
- CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER = 0x1 /**< Permits the write to be reordered with writes which were issued
- before it, as a performance optimization. Normally,
- ::cuStreamWriteValue32 will provide a memory fence before the
- write, which has similar semantics to
- __threadfence_system() but is scoped to the stream
- rather than a CUDA thread. */
-} CUstreamWriteValue_flags;
-
-/**
- * Operations for ::cuStreamBatchMemOp
- */
-typedef enum CUstreamBatchMemOpType_enum {
- CU_STREAM_MEM_OP_WAIT_VALUE_32 = 1, /**< Represents a ::cuStreamWaitValue32 operation */
- CU_STREAM_MEM_OP_WRITE_VALUE_32 = 2, /**< Represents a ::cuStreamWriteValue32 operation */
- CU_STREAM_MEM_OP_WAIT_VALUE_64 = 4, /**< Represents a ::cuStreamWaitValue64 operation */
- CU_STREAM_MEM_OP_WRITE_VALUE_64 = 5, /**< Represents a ::cuStreamWriteValue64 operation */
- CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES = 3 /**< This has the same effect as ::CU_STREAM_WAIT_VALUE_FLUSH, but as a
- standalone operation. */
-} CUstreamBatchMemOpType;
-
-/**
- * Per-operation parameters for ::cuStreamBatchMemOp
- */
-typedef union CUstreamBatchMemOpParams_union {
- CUstreamBatchMemOpType operation;
- struct CUstreamMemOpWaitValueParams_st {
- CUstreamBatchMemOpType operation;
- CUdeviceptr address;
- union {
- cuuint32_t value;
- cuuint64_t value64;
- };
- unsigned int flags;
- CUdeviceptr alias; /**< For driver internal use. Initial value is unimportant. */
- } waitValue;
- struct CUstreamMemOpWriteValueParams_st {
- CUstreamBatchMemOpType operation;
- CUdeviceptr address;
- union {
- cuuint32_t value;
- cuuint64_t value64;
- };
- unsigned int flags;
- CUdeviceptr alias; /**< For driver internal use. Initial value is unimportant. */
- } writeValue;
- struct CUstreamMemOpFlushRemoteWritesParams_st {
- CUstreamBatchMemOpType operation;
- unsigned int flags;
- } flushRemoteWrites;
- cuuint64_t pad[6];
-} CUstreamBatchMemOpParams;
-
-/**
- * Occupancy calculator flag
- */
-typedef enum CUoccupancy_flags_enum {
- CU_OCCUPANCY_DEFAULT = 0x0, /**< Default behavior */
- CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE = 0x1 /**< Assume global caching is enabled and cannot be automatically turned off */
-} CUoccupancy_flags;
-
-/**
- * Array formats
- */
-typedef enum CUarray_format_enum {
- CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, /**< Unsigned 8-bit integers */
- CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, /**< Unsigned 16-bit integers */
- CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, /**< Unsigned 32-bit integers */
- CU_AD_FORMAT_SIGNED_INT8 = 0x08, /**< Signed 8-bit integers */
- CU_AD_FORMAT_SIGNED_INT16 = 0x09, /**< Signed 16-bit integers */
- CU_AD_FORMAT_SIGNED_INT32 = 0x0a, /**< Signed 32-bit integers */
- CU_AD_FORMAT_HALF = 0x10, /**< 16-bit floating point */
- CU_AD_FORMAT_FLOAT = 0x20 /**< 32-bit floating point */
-} CUarray_format;
-
-/**
- * Texture reference addressing modes
- */
-typedef enum CUaddress_mode_enum {
- CU_TR_ADDRESS_MODE_WRAP = 0, /**< Wrapping address mode */
- CU_TR_ADDRESS_MODE_CLAMP = 1, /**< Clamp to edge address mode */
- CU_TR_ADDRESS_MODE_MIRROR = 2, /**< Mirror address mode */
- CU_TR_ADDRESS_MODE_BORDER = 3 /**< Border address mode */
-} CUaddress_mode;
-
-/**
- * Texture reference filtering modes
- */
-typedef enum CUfilter_mode_enum {
- CU_TR_FILTER_MODE_POINT = 0, /**< Point filter mode */
- CU_TR_FILTER_MODE_LINEAR = 1 /**< Linear filter mode */
-} CUfilter_mode;
-
-/**
- * Device properties
- */
-typedef enum CUdevice_attribute_enum {
- CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 1, /**< Maximum number of threads per block */
- CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X = 2, /**< Maximum block dimension X */
- CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y = 3, /**< Maximum block dimension Y */
- CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z = 4, /**< Maximum block dimension Z */
- CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X = 5, /**< Maximum grid dimension X */
- CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y = 6, /**< Maximum grid dimension Y */
- CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z = 7, /**< Maximum grid dimension Z */
- CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK = 8, /**< Maximum shared memory available per block in bytes */
- CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK = 8, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK */
- CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY = 9, /**< Memory available on device for __constant__ variables in a CUDA C kernel in bytes */
- CU_DEVICE_ATTRIBUTE_WARP_SIZE = 10, /**< Warp size in threads */
- CU_DEVICE_ATTRIBUTE_MAX_PITCH = 11, /**< Maximum pitch in bytes allowed by memory copies */
- CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK = 12, /**< Maximum number of 32-bit registers available per block */
- CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK = 12, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK */
- CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13, /**< Typical clock frequency in kilohertz */
- CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT = 14, /**< Alignment requirement for textures */
- CU_DEVICE_ATTRIBUTE_GPU_OVERLAP = 15, /**< Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT. */
- CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16, /**< Number of multiprocessors on device */
- CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT = 17, /**< Specifies whether there is a run time limit on kernels */
- CU_DEVICE_ATTRIBUTE_INTEGRATED = 18, /**< Device is integrated with host memory */
- CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY = 19, /**< Device can map host memory into CUDA address space */
- CU_DEVICE_ATTRIBUTE_COMPUTE_MODE = 20, /**< Compute mode (See ::CUcomputemode for details) */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH = 21, /**< Maximum 1D texture width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH = 22, /**< Maximum 2D texture width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT = 23, /**< Maximum 2D texture height */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH = 24, /**< Maximum 3D texture width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT = 25, /**< Maximum 3D texture height */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH = 26, /**< Maximum 3D texture depth */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH = 27, /**< Maximum 2D layered texture width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT = 28, /**< Maximum 2D layered texture height */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS = 29, /**< Maximum layers in a 2D layered texture */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH = 27, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT = 28, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES = 29, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS */
- CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT = 30, /**< Alignment requirement for surfaces */
- CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS = 31, /**< Device can possibly execute multiple kernels concurrently */
- CU_DEVICE_ATTRIBUTE_ECC_ENABLED = 32, /**< Device has ECC support enabled */
- CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33, /**< PCI bus ID of the device */
- CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34, /**< PCI device ID of the device */
- CU_DEVICE_ATTRIBUTE_TCC_DRIVER = 35, /**< Device is using TCC driver model */
- CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36, /**< Peak memory clock frequency in kilohertz */
- CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH = 37, /**< Global memory bus width in bits */
- CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE = 38, /**< Size of L2 cache in bytes */
- CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39, /**< Maximum resident threads per multiprocessor */
- CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT = 40, /**< Number of asynchronous engines */
- CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING = 41, /**< Device shares a unified address space with the host */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH = 42, /**< Maximum 1D layered texture width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS = 43, /**< Maximum layers in a 1D layered texture */
- CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER = 44, /**< Deprecated, do not use. */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH = 45, /**< Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT = 46, /**< Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE = 47, /**< Alternate maximum 3D texture width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE = 48, /**< Alternate maximum 3D texture height */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE = 49, /**< Alternate maximum 3D texture depth */
- CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID = 50, /**< PCI domain ID of the device */
- CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT = 51, /**< Pitch alignment requirement for textures */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH = 52, /**< Maximum cubemap texture width/height */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH = 53, /**< Maximum cubemap layered texture width/height */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS = 54, /**< Maximum layers in a cubemap layered texture */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH = 55, /**< Maximum 1D surface width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH = 56, /**< Maximum 2D surface width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT = 57, /**< Maximum 2D surface height */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH = 58, /**< Maximum 3D surface width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT = 59, /**< Maximum 3D surface height */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH = 60, /**< Maximum 3D surface depth */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH = 61, /**< Maximum 1D layered surface width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS = 62, /**< Maximum layers in a 1D layered surface */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH = 63, /**< Maximum 2D layered surface width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT = 64, /**< Maximum 2D layered surface height */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS = 65, /**< Maximum layers in a 2D layered surface */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH = 66, /**< Maximum cubemap surface width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH = 67, /**< Maximum cubemap layered surface width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS = 68, /**< Maximum layers in a cubemap layered surface */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH = 69, /**< Maximum 1D linear texture width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH = 70, /**< Maximum 2D linear texture width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT = 71, /**< Maximum 2D linear texture height */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH = 72, /**< Maximum 2D linear texture pitch in bytes */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH = 73, /**< Maximum mipmapped 2D texture width */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT = 74, /**< Maximum mipmapped 2D texture height */
- CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = 75, /**< Major compute capability version number */
- CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = 76, /**< Minor compute capability version number */
- CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH = 77, /**< Maximum mipmapped 1D texture width */
- CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED = 78, /**< Device supports stream priorities */
- CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED = 79, /**< Device supports caching globals in L1 */
- CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED = 80, /**< Device supports caching locals in L1 */
- CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR = 81, /**< Maximum shared memory available per multiprocessor in bytes */
- CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR = 82, /**< Maximum number of 32-bit registers available per multiprocessor */
- CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY = 83, /**< Device can allocate managed memory on this system */
- CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD = 84, /**< Device is on a multi-GPU board */
- CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID = 85, /**< Unique id for a group of devices on the same multi-GPU board */
- CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED = 86, /**< Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware)*/
- CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO = 87, /**< Ratio of single precision performance (in floating-point operations per second) to double precision performance */
- CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS = 88, /**< Device supports coherently accessing pageable memory without calling cudaHostRegister on it */
- CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS = 89, /**< Device can coherently access managed memory concurrently with the CPU */
- CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED = 90, /**< Device supports compute preemption. */
- CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM = 91, /**< Device can access host registered memory at the same virtual address as the CPU */
- CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS = 92, /**< ::cuStreamBatchMemOp and related APIs are supported. */
- CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS = 93, /**< 64-bit operations are supported in ::cuStreamBatchMemOp and related APIs. */
- CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR = 94, /**< ::CU_STREAM_WAIT_VALUE_NOR is supported. */
- CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH = 95, /**< Device supports launching cooperative kernels via ::cuLaunchCooperativeKernel */
- CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH = 96, /**< Device can participate in cooperative kernels launched via ::cuLaunchCooperativeKernelMultiDevice */
- CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN = 97, /**< Maximum optin shared memory per block */
- CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES = 98, /**< Both the ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. See \ref CUDA_MEMOP for additional details. */
- CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED = 99, /**< Device supports host memory registration via ::cudaHostRegister. */
- CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES = 100, /**< Device accesses pageable memory via the host's page tables. */
- CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST = 101, /**< The host can directly access managed memory on the device without migration. */
- CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED = 102, /**< Device supports virtual address management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs */
- CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED = 103, /**< Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate */
- CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED = 104, /**< Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate */
- CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED = 105, /**< Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested ::cuMemCreate */
- CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR = 106, /**< Maximum number of blocks per multiprocessor */
- CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED = 107, /**< Device supports compression of memory */
- CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE = 108, /**< Device's maximum L2 persisting lines capacity setting in bytes */
- CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE = 109, /**< The maximum value of CUaccessPolicyWindow::num_bytes. */
- CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED = 110, /**< Device supports specifying the GPUDirect RDMA flag with ::cuMemCreate */
- CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK = 111, /**< Shared memory reserved by CUDA driver per block in bytes */
- CU_DEVICE_ATTRIBUTE_MAX
-} CUdevice_attribute;
-
-/**
- * Legacy device properties
- */
-typedef struct CUdevprop_st {
- int maxThreadsPerBlock; /**< Maximum number of threads per block */
- int maxThreadsDim[3]; /**< Maximum size of each dimension of a block */
- int maxGridSize[3]; /**< Maximum size of each dimension of a grid */
- int sharedMemPerBlock; /**< Shared memory available per block in bytes */
- int totalConstantMemory; /**< Constant memory available on device in bytes */
- int SIMDWidth; /**< Warp size in threads */
- int memPitch; /**< Maximum pitch in bytes allowed by memory copies */
- int regsPerBlock; /**< 32-bit registers available per block */
- int clockRate; /**< Clock frequency in kilohertz */
- int textureAlign; /**< Alignment requirement for textures */
-} CUdevprop;
-
-/**
- * Pointer information
- */
-typedef enum CUpointer_attribute_enum {
- CU_POINTER_ATTRIBUTE_CONTEXT = 1, /**< The ::CUcontext on which a pointer was allocated or registered */
- CU_POINTER_ATTRIBUTE_MEMORY_TYPE = 2, /**< The ::CUmemorytype describing the physical location of a pointer */
- CU_POINTER_ATTRIBUTE_DEVICE_POINTER = 3, /**< The address at which a pointer's memory may be accessed on the device */
- CU_POINTER_ATTRIBUTE_HOST_POINTER = 4, /**< The address at which a pointer's memory may be accessed on the host */
- CU_POINTER_ATTRIBUTE_P2P_TOKENS = 5, /**< A pair of tokens for use with the nv-p2p.h Linux kernel interface */
- CU_POINTER_ATTRIBUTE_SYNC_MEMOPS = 6, /**< Synchronize every synchronous memory operation initiated on this region */
- CU_POINTER_ATTRIBUTE_BUFFER_ID = 7, /**< A process-wide unique ID for an allocated memory region*/
- CU_POINTER_ATTRIBUTE_IS_MANAGED = 8, /**< Indicates if the pointer points to managed memory */
- CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL = 9, /**< A device ordinal of a device on which a pointer was allocated or registered */
- CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE = 10, /**< 1 if this pointer maps to an allocation that is suitable for ::cudaIpcGetMemHandle, 0 otherwise **/
- CU_POINTER_ATTRIBUTE_RANGE_START_ADDR = 11, /**< Starting address for this requested pointer */
- CU_POINTER_ATTRIBUTE_RANGE_SIZE = 12, /**< Size of the address range for this requested pointer */
- CU_POINTER_ATTRIBUTE_MAPPED = 13, /**< 1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise **/
- CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES = 14, /**< Bitmask of allowed ::CUmemAllocationHandleType for this allocation **/
- CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE = 15 /**< 1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API **/
-} CUpointer_attribute;
-
-/**
- * Function properties
- */
-typedef enum CUfunction_attribute_enum {
- /**
- * The maximum number of threads per block, beyond which a launch of the
- * function would fail. This number depends on both the function and the
- * device on which the function is currently loaded.
- */
- CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 0,
-
- /**
- * The size in bytes of statically-allocated shared memory required by
- * this function. This does not include dynamically-allocated shared
- * memory requested by the user at runtime.
- */
- CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES = 1,
-
- /**
- * The size in bytes of user-allocated constant memory required by this
- * function.
- */
- CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES = 2,
-
- /**
- * The size in bytes of local memory used by each thread of this function.
- */
- CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES = 3,
-
- /**
- * The number of registers used by each thread of this function.
- */
- CU_FUNC_ATTRIBUTE_NUM_REGS = 4,
-
- /**
- * The PTX virtual architecture version for which the function was
- * compiled. This value is the major PTX version * 10 + the minor PTX
- * version, so a PTX version 1.3 function would return the value 13.
- * Note that this may return the undefined value of 0 for cubins
- * compiled prior to CUDA 3.0.
- */
- CU_FUNC_ATTRIBUTE_PTX_VERSION = 5,
-
- /**
- * The binary architecture version for which the function was compiled.
- * This value is the major binary version * 10 + the minor binary version,
- * so a binary version 1.3 function would return the value 13. Note that
- * this will return a value of 10 for legacy cubins that do not have a
- * properly-encoded binary architecture version.
- */
- CU_FUNC_ATTRIBUTE_BINARY_VERSION = 6,
-
- /**
- * The attribute to indicate whether the function has been compiled with
- * user specified option "-Xptxas --dlcm=ca" set .
- */
- CU_FUNC_ATTRIBUTE_CACHE_MODE_CA = 7,
-
- /**
- * The maximum size in bytes of dynamically-allocated shared memory that can be used by
- * this function. If the user-specified dynamic shared memory size is larger than this
- * value, the launch will fail.
- * See ::cuFuncSetAttribute
- */
- CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES = 8,
-
- /**
- * On devices where the L1 cache and shared memory use the same hardware resources,
- * this sets the shared memory carveout preference, in percent of the total shared memory.
- * Refer to ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR.
- * This is only a hint, and the driver can choose a different ratio if required to execute the function.
- * See ::cuFuncSetAttribute
- */
- CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = 9,
-
- CU_FUNC_ATTRIBUTE_MAX
-} CUfunction_attribute;
-
-/**
- * Function cache configurations
- */
-typedef enum CUfunc_cache_enum {
- CU_FUNC_CACHE_PREFER_NONE = 0x00, /**< no preference for shared memory or L1 (default) */
- CU_FUNC_CACHE_PREFER_SHARED = 0x01, /**< prefer larger shared memory and smaller L1 cache */
- CU_FUNC_CACHE_PREFER_L1 = 0x02, /**< prefer larger L1 cache and smaller shared memory */
- CU_FUNC_CACHE_PREFER_EQUAL = 0x03 /**< prefer equal sized L1 cache and shared memory */
-} CUfunc_cache;
-
-/**
- * Shared memory configurations
- */
-typedef enum CUsharedconfig_enum {
- CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE = 0x00, /**< set default shared memory bank size */
- CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE = 0x01, /**< set shared memory bank width to four bytes */
- CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE = 0x02 /**< set shared memory bank width to eight bytes */
-} CUsharedconfig;
-
-/**
- * Shared memory carveout configurations. These may be passed to ::cuFuncSetAttribute
- */
-typedef enum CUshared_carveout_enum {
- CU_SHAREDMEM_CARVEOUT_DEFAULT = -1, /**< No preference for shared memory or L1 (default) */
- CU_SHAREDMEM_CARVEOUT_MAX_SHARED = 100, /**< Prefer maximum available shared memory, minimum L1 cache */
- CU_SHAREDMEM_CARVEOUT_MAX_L1 = 0 /**< Prefer maximum available L1 cache, minimum shared memory */
-} CUshared_carveout;
-
-/**
- * Memory types
- */
-typedef enum CUmemorytype_enum {
- CU_MEMORYTYPE_HOST = 0x01, /**< Host memory */
- CU_MEMORYTYPE_DEVICE = 0x02, /**< Device memory */
- CU_MEMORYTYPE_ARRAY = 0x03, /**< Array memory */
- CU_MEMORYTYPE_UNIFIED = 0x04 /**< Unified device or host memory */
-} CUmemorytype;
-
-/**
- * Compute Modes
- */
-typedef enum CUcomputemode_enum {
- CU_COMPUTEMODE_DEFAULT = 0, /**< Default compute mode (Multiple contexts allowed per device) */
- CU_COMPUTEMODE_PROHIBITED = 2, /**< Compute-prohibited mode (No contexts can be created on this device at this time) */
- CU_COMPUTEMODE_EXCLUSIVE_PROCESS = 3 /**< Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time) */
-} CUcomputemode;
-
-/**
- * Memory advise values
- */
-typedef enum CUmem_advise_enum {
- CU_MEM_ADVISE_SET_READ_MOSTLY = 1, /**< Data will mostly be read and only occassionally be written to */
- CU_MEM_ADVISE_UNSET_READ_MOSTLY = 2, /**< Undo the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY */
- CU_MEM_ADVISE_SET_PREFERRED_LOCATION = 3, /**< Set the preferred location for the data as the specified device */
- CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION = 4, /**< Clear the preferred location for the data */
- CU_MEM_ADVISE_SET_ACCESSED_BY = 5, /**< Data will be accessed by the specified device, so prevent page faults as much as possible */
- CU_MEM_ADVISE_UNSET_ACCESSED_BY = 6 /**< Let the Unified Memory subsystem decide on the page faulting policy for the specified device */
-} CUmem_advise;
-
-typedef enum CUmem_range_attribute_enum {
- CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY = 1, /**< Whether the range will mostly be read and only occassionally be written to */
- CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION = 2, /**< The preferred location of the range */
- CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY = 3, /**< Memory range has ::CU_MEM_ADVISE_SET_ACCESSED_BY set for specified device */
- CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION = 4 /**< The last location to which the range was prefetched */
-} CUmem_range_attribute;
-
-/**
- * Online compiler and linker options
- */
-typedef enum CUjit_option_enum
-{
- /**
- * Max number of registers that a thread may use.\n
- * Option type: unsigned int\n
- * Applies to: compiler only
- */
- CU_JIT_MAX_REGISTERS = 0,
-
- /**
- * IN: Specifies minimum number of threads per block to target compilation
- * for\n
- * OUT: Returns the number of threads the compiler actually targeted.
- * This restricts the resource utilization fo the compiler (e.g. max
- * registers) such that a block with the given number of threads should be
- * able to launch based on register limitations. Note, this option does not
- * currently take into account any other resource limitations, such as
- * shared memory utilization.\n
- * Cannot be combined with ::CU_JIT_TARGET.\n
- * Option type: unsigned int\n
- * Applies to: compiler only
- */
- CU_JIT_THREADS_PER_BLOCK,
-
- /**
- * Overwrites the option value with the total wall clock time, in
- * milliseconds, spent in the compiler and linker\n
- * Option type: float\n
- * Applies to: compiler and linker
- */
- CU_JIT_WALL_TIME,
-
- /**
- * Pointer to a buffer in which to print any log messages
- * that are informational in nature (the buffer size is specified via
- * option ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES)\n
- * Option type: char *\n
- * Applies to: compiler and linker
- */
- CU_JIT_INFO_LOG_BUFFER,
-
- /**
- * IN: Log buffer size in bytes. Log messages will be capped at this size
- * (including null terminator)\n
- * OUT: Amount of log buffer filled with messages\n
- * Option type: unsigned int\n
- * Applies to: compiler and linker
- */
- CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES,
-
- /**
- * Pointer to a buffer in which to print any log messages that
- * reflect errors (the buffer size is specified via option
- * ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES)\n
- * Option type: char *\n
- * Applies to: compiler and linker
- */
- CU_JIT_ERROR_LOG_BUFFER,
-
- /**
- * IN: Log buffer size in bytes. Log messages will be capped at this size
- * (including null terminator)\n
- * OUT: Amount of log buffer filled with messages\n
- * Option type: unsigned int\n
- * Applies to: compiler and linker
- */
- CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES,
-
- /**
- * Level of optimizations to apply to generated code (0 - 4), with 4
- * being the default and highest level of optimizations.\n
- * Option type: unsigned int\n
- * Applies to: compiler only
- */
- CU_JIT_OPTIMIZATION_LEVEL,
-
- /**
- * No option value required. Determines the target based on the current
- * attached context (default)\n
- * Option type: No option value needed\n
- * Applies to: compiler and linker
- */
- CU_JIT_TARGET_FROM_CUCONTEXT,
-
- /**
- * Target is chosen based on supplied ::CUjit_target. Cannot be
- * combined with ::CU_JIT_THREADS_PER_BLOCK.\n
- * Option type: unsigned int for enumerated type ::CUjit_target\n
- * Applies to: compiler and linker
- */
- CU_JIT_TARGET,
-
- /**
- * Specifies choice of fallback strategy if matching cubin is not found.
- * Choice is based on supplied ::CUjit_fallback. This option cannot be
- * used with cuLink* APIs as the linker requires exact matches.\n
- * Option type: unsigned int for enumerated type ::CUjit_fallback\n
- * Applies to: compiler only
- */
- CU_JIT_FALLBACK_STRATEGY,
-
- /**
- * Specifies whether to create debug information in output (-g)
- * (0: false, default)\n
- * Option type: int\n
- * Applies to: compiler and linker
- */
- CU_JIT_GENERATE_DEBUG_INFO,
-
- /**
- * Generate verbose log messages (0: false, default)\n
- * Option type: int\n
- * Applies to: compiler and linker
- */
- CU_JIT_LOG_VERBOSE,
-
- /**
- * Generate line number information (-lineinfo) (0: false, default)\n
- * Option type: int\n
- * Applies to: compiler only
- */
- CU_JIT_GENERATE_LINE_INFO,
-
- /**
- * Specifies whether to enable caching explicitly (-dlcm) \n
- * Choice is based on supplied ::CUjit_cacheMode_enum.\n
- * Option type: unsigned int for enumerated type ::CUjit_cacheMode_enum\n
- * Applies to: compiler only
- */
- CU_JIT_CACHE_MODE,
-
- /**
- * The below jit options are used for internal purposes only, in this version of CUDA
- */
- CU_JIT_NEW_SM3X_OPT,
- CU_JIT_FAST_COMPILE,
-
- /**
- * Array of device symbol names that will be relocated to the corresponing
- * host addresses stored in ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES.\n
- * Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n
- * When loding a device module, driver will relocate all encountered
- * unresolved symbols to the host addresses.\n
- * It is only allowed to register symbols that correspond to unresolved
- * global variables.\n
- * It is illegal to register the same device symbol at multiple addresses.\n
- * Option type: const char **\n
- * Applies to: dynamic linker only
- */
- CU_JIT_GLOBAL_SYMBOL_NAMES,
-
- /**
- * Array of host addresses that will be used to relocate corresponding
- * device symbols stored in ::CU_JIT_GLOBAL_SYMBOL_NAMES.\n
- * Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n
- * Option type: void **\n
- * Applies to: dynamic linker only
- */
- CU_JIT_GLOBAL_SYMBOL_ADDRESSES,
-
- /**
- * Number of entries in ::CU_JIT_GLOBAL_SYMBOL_NAMES and
- * ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES arrays.\n
- * Option type: unsigned int\n
- * Applies to: dynamic linker only
- */
- CU_JIT_GLOBAL_SYMBOL_COUNT,
-
- CU_JIT_NUM_OPTIONS
-
-} CUjit_option;
-
-/**
- * Online compilation targets
- */
-typedef enum CUjit_target_enum
-{
- CU_TARGET_COMPUTE_20 = 20, /**< Compute device class 2.0 */
- CU_TARGET_COMPUTE_21 = 21, /**< Compute device class 2.1 */
- CU_TARGET_COMPUTE_30 = 30, /**< Compute device class 3.0 */
- CU_TARGET_COMPUTE_32 = 32, /**< Compute device class 3.2 */
- CU_TARGET_COMPUTE_35 = 35, /**< Compute device class 3.5 */
- CU_TARGET_COMPUTE_37 = 37, /**< Compute device class 3.7 */
- CU_TARGET_COMPUTE_50 = 50, /**< Compute device class 5.0 */
- CU_TARGET_COMPUTE_52 = 52, /**< Compute device class 5.2 */
- CU_TARGET_COMPUTE_53 = 53, /**< Compute device class 5.3 */
- CU_TARGET_COMPUTE_60 = 60, /**< Compute device class 6.0.*/
- CU_TARGET_COMPUTE_61 = 61, /**< Compute device class 6.1.*/
- CU_TARGET_COMPUTE_62 = 62, /**< Compute device class 6.2.*/
- CU_TARGET_COMPUTE_70 = 70, /**< Compute device class 7.0.*/
- CU_TARGET_COMPUTE_72 = 72, /**< Compute device class 7.2.*/
- CU_TARGET_COMPUTE_75 = 75, /**< Compute device class 7.5.*/
- CU_TARGET_COMPUTE_80 = 80 /**< Compute device class 8.0.*/
-} CUjit_target;
-
-/**
- * Cubin matching fallback strategies
- */
-typedef enum CUjit_fallback_enum
-{
- CU_PREFER_PTX = 0, /**< Prefer to compile ptx if exact binary match not found */
-
- CU_PREFER_BINARY /**< Prefer to fall back to compatible binary code if exact match not found */
-
-} CUjit_fallback;
-
-/**
- * Caching modes for dlcm
- */
-typedef enum CUjit_cacheMode_enum
-{
- CU_JIT_CACHE_OPTION_NONE = 0, /**< Compile with no -dlcm flag specified */
- CU_JIT_CACHE_OPTION_CG, /**< Compile with L1 cache disabled */
- CU_JIT_CACHE_OPTION_CA /**< Compile with L1 cache enabled */
-} CUjit_cacheMode;
-
-/**
- * Device code formats
- */
-typedef enum CUjitInputType_enum
-{
- /**
- * Compiled device-class-specific device code\n
- * Applicable options: none
- */
- CU_JIT_INPUT_CUBIN = 0,
-
- /**
- * PTX source code\n
- * Applicable options: PTX compiler options
- */
- CU_JIT_INPUT_PTX,
-
- /**
- * Bundle of multiple cubins and/or PTX of some device code\n
- * Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY
- */
- CU_JIT_INPUT_FATBINARY,
-
- /**
- * Host object with embedded device code\n
- * Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY
- */
- CU_JIT_INPUT_OBJECT,
-
- /**
- * Archive of host objects with embedded device code\n
- * Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY
- */
- CU_JIT_INPUT_LIBRARY,
-
- CU_JIT_NUM_INPUT_TYPES
-} CUjitInputType;
-
-typedef struct CUlinkState_st *CUlinkState;
-
-/**
- * Flags to register a graphics resource
- */
-typedef enum CUgraphicsRegisterFlags_enum {
- CU_GRAPHICS_REGISTER_FLAGS_NONE = 0x00,
- CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY = 0x01,
- CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD = 0x02,
- CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST = 0x04,
- CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER = 0x08
-} CUgraphicsRegisterFlags;
-
-/**
- * Flags for mapping and unmapping interop resources
- */
-typedef enum CUgraphicsMapResourceFlags_enum {
- CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE = 0x00,
- CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY = 0x01,
- CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 0x02
-} CUgraphicsMapResourceFlags;
-
-/**
- * Array indices for cube faces
- */
-typedef enum CUarray_cubemap_face_enum {
- CU_CUBEMAP_FACE_POSITIVE_X = 0x00, /**< Positive X face of cubemap */
- CU_CUBEMAP_FACE_NEGATIVE_X = 0x01, /**< Negative X face of cubemap */
- CU_CUBEMAP_FACE_POSITIVE_Y = 0x02, /**< Positive Y face of cubemap */
- CU_CUBEMAP_FACE_NEGATIVE_Y = 0x03, /**< Negative Y face of cubemap */
- CU_CUBEMAP_FACE_POSITIVE_Z = 0x04, /**< Positive Z face of cubemap */
- CU_CUBEMAP_FACE_NEGATIVE_Z = 0x05 /**< Negative Z face of cubemap */
-} CUarray_cubemap_face;
-
-/**
- * Limits
- */
-typedef enum CUlimit_enum {
- CU_LIMIT_STACK_SIZE = 0x00, /**< GPU thread stack size */
- CU_LIMIT_PRINTF_FIFO_SIZE = 0x01, /**< GPU printf FIFO size */
- CU_LIMIT_MALLOC_HEAP_SIZE = 0x02, /**< GPU malloc heap size */
- CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH = 0x03, /**< GPU device runtime launch synchronize depth */
- CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT = 0x04, /**< GPU device runtime pending launch count */
- CU_LIMIT_MAX_L2_FETCH_GRANULARITY = 0x05, /**< A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint */
- CU_LIMIT_PERSISTING_L2_CACHE_SIZE = 0x06, /**< A size in bytes for L2 persisting lines cache size */
- CU_LIMIT_MAX
-} CUlimit;
-
-/**
- * Resource types
- */
-typedef enum CUresourcetype_enum {
- CU_RESOURCE_TYPE_ARRAY = 0x00, /**< Array resoure */
- CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = 0x01, /**< Mipmapped array resource */
- CU_RESOURCE_TYPE_LINEAR = 0x02, /**< Linear resource */
- CU_RESOURCE_TYPE_PITCH2D = 0x03 /**< Pitch 2D resource */
-} CUresourcetype;
-
-#ifdef _WIN32
-#define CUDA_CB __stdcall
-#else
-#define CUDA_CB
-#endif
-
-/**
- * CUDA host function
- * \param userData Argument value passed to the function
- */
-typedef void (CUDA_CB *CUhostFn)(void *userData);
-
-/**
- * Specifies performance hint with ::CUaccessPolicyWindow for hitProp and missProp members
- */
-typedef enum CUaccessProperty_enum {
- CU_ACCESS_PROPERTY_NORMAL = 0, /**< Normal cache persistence. */
- CU_ACCESS_PROPERTY_STREAMING = 1, /**< Streaming access is less likely to persit from cache. */
- CU_ACCESS_PROPERTY_PERSISTING = 2 /**< Persisting access is more likely to persist in cache.*/
-} CUaccessProperty;
-
-/**
- * Specifies an access policy for a window, a contiguous extent of memory
- * beginning at base_ptr and ending at base_ptr + num_bytes.
- * num_bytes is limited by CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE.
- * Partition into many segments and assign segments such that:
- * sum of "hit segments" / window == approx. ratio.
- * sum of "miss segments" / window == approx 1-ratio.
- * Segments and ratio specifications are fitted to the capabilities of
- * the architecture.
- * Accesses in a hit segment apply the hitProp access policy.
- * Accesses in a miss segment apply the missProp access policy.
- */
-typedef struct CUaccessPolicyWindow_st {
- void *base_ptr; /**< Starting address of the access policy window. CUDA driver may align it. */
- size_t num_bytes; /**< Size in bytes of the window policy. CUDA driver may restrict the maximum size and alignment. */
- float hitRatio; /**< hitRatio specifies percentage of lines assigned hitProp, rest are assigned missProp. */
- CUaccessProperty hitProp; /**< ::CUaccessProperty set for hit. */
- CUaccessProperty missProp; /**< ::CUaccessProperty set for miss. Must be either NORMAL or STREAMING */
-} CUaccessPolicyWindow;
-
-/**
- * GPU kernel node parameters
- */
-typedef struct CUDA_KERNEL_NODE_PARAMS_st {
- CUfunction func; /**< Kernel to launch */
- unsigned int gridDimX; /**< Width of grid in blocks */
- unsigned int gridDimY; /**< Height of grid in blocks */
- unsigned int gridDimZ; /**< Depth of grid in blocks */
- unsigned int blockDimX; /**< X dimension of each thread block */
- unsigned int blockDimY; /**< Y dimension of each thread block */
- unsigned int blockDimZ; /**< Z dimension of each thread block */
- unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */
- void **kernelParams; /**< Array of pointers to kernel parameters */
- void **extra; /**< Extra options */
-} CUDA_KERNEL_NODE_PARAMS;
-
-/**
- * Memset node parameters
- */
-typedef struct CUDA_MEMSET_NODE_PARAMS_st {
- CUdeviceptr dst; /**< Destination device pointer */
- size_t pitch; /**< Pitch of destination device pointer. Unused if height is 1 */
- unsigned int value; /**< Value to be set */
- unsigned int elementSize; /**< Size of each element in bytes. Must be 1, 2, or 4. */
- size_t width; /**< Width in bytes, of the row */
- size_t height; /**< Number of rows */
-} CUDA_MEMSET_NODE_PARAMS;
-
-/**
- * Host node parameters
- */
-typedef struct CUDA_HOST_NODE_PARAMS_st {
- CUhostFn fn; /**< The function to call when the node executes */
- void* userData; /**< Argument to pass to the function */
-} CUDA_HOST_NODE_PARAMS;
-
-/**
- * Graph node types
- */
-typedef enum CUgraphNodeType_enum {
- CU_GRAPH_NODE_TYPE_KERNEL = 0, /**< GPU kernel node */
- CU_GRAPH_NODE_TYPE_MEMCPY = 1, /**< Memcpy node */
- CU_GRAPH_NODE_TYPE_MEMSET = 2, /**< Memset node */
- CU_GRAPH_NODE_TYPE_HOST = 3, /**< Host (executable) node */
- CU_GRAPH_NODE_TYPE_GRAPH = 4, /**< Node which executes an embedded graph */
- CU_GRAPH_NODE_TYPE_EMPTY = 5 /**< Empty (no-op) node */
-} CUgraphNodeType;
-
-typedef enum CUsynchronizationPolicy_enum {
- CU_SYNC_POLICY_AUTO = 1,
- CU_SYNC_POLICY_SPIN = 2,
- CU_SYNC_POLICY_YIELD = 3,
- CU_SYNC_POLICY_BLOCKING_SYNC = 4
-} CUsynchronizationPolicy;
-
-/**
- * Graph kernel node Attributes
- */
-typedef enum CUkernelNodeAttrID_enum {
- CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1, /**< Identifier for ::CUkernelNodeAttrValue::accessPolicyWindow. */
- CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE = 2 /**< Allows a kernel node to be cooperative (see ::cuLaunchCooperativeKernel). */
-} CUkernelNodeAttrID;
-
-/**
- * Graph attributes union, used with ::cuKernelNodeSetAttribute/::cuKernelNodeGetAttribute
- */
-typedef union CUkernelNodeAttrValue_union {
- CUaccessPolicyWindow accessPolicyWindow; /**< Attribute ::CUaccessPolicyWindow. */
- int cooperative; /**< Nonzero indicates a cooperative kernel (see ::cuLaunchCooperativeKernel). */
-} CUkernelNodeAttrValue;
-
-/**
- * Possible stream capture statuses returned by ::cuStreamIsCapturing
- */
-typedef enum CUstreamCaptureStatus_enum {
- CU_STREAM_CAPTURE_STATUS_NONE = 0, /**< Stream is not capturing */
- CU_STREAM_CAPTURE_STATUS_ACTIVE = 1, /**< Stream is actively capturing */
- CU_STREAM_CAPTURE_STATUS_INVALIDATED = 2 /**< Stream is part of a capture sequence that
- has been invalidated, but not terminated */
-} CUstreamCaptureStatus;
-
-/**
- * Possible modes for stream capture thread interactions. For more details see
- * ::cuStreamBeginCapture and ::cuThreadExchangeStreamCaptureMode
- */
-typedef enum CUstreamCaptureMode_enum {
- CU_STREAM_CAPTURE_MODE_GLOBAL = 0,
- CU_STREAM_CAPTURE_MODE_THREAD_LOCAL = 1,
- CU_STREAM_CAPTURE_MODE_RELAXED = 2
-} CUstreamCaptureMode;
-
-/**
- * Stream Attributes
- */
-typedef enum CUstreamAttrID_enum {
- CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1, /**< Identifier for ::CUstreamAttrValue::accessPolicyWindow. */
- CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY = 3 /**< ::CUsynchronizationPolicy for work queued up in this stream */
-} CUstreamAttrID;
-
-/**
- * Stream attributes union, used with ::cuStreamSetAttribute/::cuStreamGetAttribute
- */
-typedef union CUstreamAttrValue_union {
- CUaccessPolicyWindow accessPolicyWindow; /**< Attribute ::CUaccessPolicyWindow. */
- CUsynchronizationPolicy syncPolicy; /**< Value for ::CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY. */
-} CUstreamAttrValue;
-
-/**
- * Error codes
- */
-typedef enum cudaError_enum {
- /**
- * The API call returned with no errors. In the case of query calls, this
- * also means that the operation being queried is complete (see
- * ::cuEventQuery() and ::cuStreamQuery()).
- */
- CUDA_SUCCESS = 0,
-
- /**
- * This indicates that one or more of the parameters passed to the API call
- * is not within an acceptable range of values.
- */
- CUDA_ERROR_INVALID_VALUE = 1,
-
- /**
- * The API call failed because it was unable to allocate enough memory to
- * perform the requested operation.
- */
- CUDA_ERROR_OUT_OF_MEMORY = 2,
-
- /**
- * This indicates that the CUDA driver has not been initialized with
- * ::cuInit() or that initialization has failed.
- */
- CUDA_ERROR_NOT_INITIALIZED = 3,
-
- /**
- * This indicates that the CUDA driver is in the process of shutting down.
- */
- CUDA_ERROR_DEINITIALIZED = 4,
-
- /**
- * This indicates profiler is not initialized for this run. This can
- * happen when the application is running with external profiling tools
- * like visual profiler.
- */
- CUDA_ERROR_PROFILER_DISABLED = 5,
-
- /**
- * \deprecated
- * This error return is deprecated as of CUDA 5.0. It is no longer an error
- * to attempt to enable/disable the profiling via ::cuProfilerStart or
- * ::cuProfilerStop without initialization.
- */
- CUDA_ERROR_PROFILER_NOT_INITIALIZED = 6,
-
- /**
- * \deprecated
- * This error return is deprecated as of CUDA 5.0. It is no longer an error
- * to call cuProfilerStart() when profiling is already enabled.
- */
- CUDA_ERROR_PROFILER_ALREADY_STARTED = 7,
-
- /**
- * \deprecated
- * This error return is deprecated as of CUDA 5.0. It is no longer an error
- * to call cuProfilerStop() when profiling is already disabled.
- */
- CUDA_ERROR_PROFILER_ALREADY_STOPPED = 8,
-
- /**
- * This indicates that no CUDA-capable devices were detected by the installed
- * CUDA driver.
- */
- CUDA_ERROR_NO_DEVICE = 100,
-
- /**
- * This indicates that the device ordinal supplied by the user does not
- * correspond to a valid CUDA device.
- */
- CUDA_ERROR_INVALID_DEVICE = 101,
-
-
- /**
- * This indicates that the device kernel image is invalid. This can also
- * indicate an invalid CUDA module.
- */
- CUDA_ERROR_INVALID_IMAGE = 200,
-
- /**
- * This most frequently indicates that there is no context bound to the
- * current thread. This can also be returned if the context passed to an
- * API call is not a valid handle (such as a context that has had
- * ::cuCtxDestroy() invoked on it). This can also be returned if a user
- * mixes different API versions (i.e. 3010 context with 3020 API calls).
- * See ::cuCtxGetApiVersion() for more details.
- */
- CUDA_ERROR_INVALID_CONTEXT = 201,
-
- /**
- * This indicated that the context being supplied as a parameter to the
- * API call was already the active context.
- * \deprecated
- * This error return is deprecated as of CUDA 3.2. It is no longer an
- * error to attempt to push the active context via ::cuCtxPushCurrent().
- */
- CUDA_ERROR_CONTEXT_ALREADY_CURRENT = 202,
-
- /**
- * This indicates that a map or register operation has failed.
- */
- CUDA_ERROR_MAP_FAILED = 205,
-
- /**
- * This indicates that an unmap or unregister operation has failed.
- */
- CUDA_ERROR_UNMAP_FAILED = 206,
-
- /**
- * This indicates that the specified array is currently mapped and thus
- * cannot be destroyed.
- */
- CUDA_ERROR_ARRAY_IS_MAPPED = 207,
-
- /**
- * This indicates that the resource is already mapped.
- */
- CUDA_ERROR_ALREADY_MAPPED = 208,
-
- /**
- * This indicates that there is no kernel image available that is suitable
- * for the device. This can occur when a user specifies code generation
- * options for a particular CUDA source file that do not include the
- * corresponding device configuration.
- */
- CUDA_ERROR_NO_BINARY_FOR_GPU = 209,
-
- /**
- * This indicates that a resource has already been acquired.
- */
- CUDA_ERROR_ALREADY_ACQUIRED = 210,
-
- /**
- * This indicates that a resource is not mapped.
- */
- CUDA_ERROR_NOT_MAPPED = 211,
-
- /**
- * This indicates that a mapped resource is not available for access as an
- * array.
- */
- CUDA_ERROR_NOT_MAPPED_AS_ARRAY = 212,
-
- /**
- * This indicates that a mapped resource is not available for access as a
- * pointer.
- */
- CUDA_ERROR_NOT_MAPPED_AS_POINTER = 213,
-
- /**
- * This indicates that an uncorrectable ECC error was detected during
- * execution.
- */
- CUDA_ERROR_ECC_UNCORRECTABLE = 214,
-
- /**
- * This indicates that the ::CUlimit passed to the API call is not
- * supported by the active device.
- */
- CUDA_ERROR_UNSUPPORTED_LIMIT = 215,
-
- /**
- * This indicates that the ::CUcontext passed to the API call can
- * only be bound to a single CPU thread at a time but is already
- * bound to a CPU thread.
- */
- CUDA_ERROR_CONTEXT_ALREADY_IN_USE = 216,
-
- /**
- * This indicates that peer access is not supported across the given
- * devices.
- */
- CUDA_ERROR_PEER_ACCESS_UNSUPPORTED = 217,
-
- /**
- * This indicates that a PTX JIT compilation failed.
- */
- CUDA_ERROR_INVALID_PTX = 218,
-
- /**
- * This indicates an error with OpenGL or DirectX context.
- */
- CUDA_ERROR_INVALID_GRAPHICS_CONTEXT = 219,
-
- /**
- * This indicates that an uncorrectable NVLink error was detected during the
- * execution.
- */
- CUDA_ERROR_NVLINK_UNCORRECTABLE = 220,
-
- /**
- * This indicates that the PTX JIT compiler library was not found.
- */
- CUDA_ERROR_JIT_COMPILER_NOT_FOUND = 221,
-
- /**
- * This indicates that the device kernel source is invalid.
- */
- CUDA_ERROR_INVALID_SOURCE = 300,
-
- /**
- * This indicates that the file specified was not found.
- */
- CUDA_ERROR_FILE_NOT_FOUND = 301,
-
- /**
- * This indicates that a link to a shared object failed to resolve.
- */
- CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND = 302,
-
- /**
- * This indicates that initialization of a shared object failed.
- */
- CUDA_ERROR_SHARED_OBJECT_INIT_FAILED = 303,
-
- /**
- * This indicates that an OS call failed.
- */
- CUDA_ERROR_OPERATING_SYSTEM = 304,
-
- /**
- * This indicates that a resource handle passed to the API call was not
- * valid. Resource handles are opaque types like ::CUstream and ::CUevent.
- */
- CUDA_ERROR_INVALID_HANDLE = 400,
-
- /**
- * This indicates that a resource required by the API call is not in a
- * valid state to perform the requested operation.
- */
- CUDA_ERROR_ILLEGAL_STATE = 401,
-
- /**
- * This indicates that a named symbol was not found. Examples of symbols
- * are global/constant variable names, texture names, and surface names.
- */
- CUDA_ERROR_NOT_FOUND = 500,
-
- /**
- * This indicates that asynchronous operations issued previously have not
- * completed yet. This result is not actually an error, but must be indicated
- * differently than ::CUDA_SUCCESS (which indicates completion). Calls that
- * may return this value include ::cuEventQuery() and ::cuStreamQuery().
- */
- CUDA_ERROR_NOT_READY = 600,
-
- /**
- * While executing a kernel, the device encountered a
- * load or store instruction on an invalid memory address.
- * This leaves the process in an inconsistent state and any further CUDA work
- * will return the same error. To continue using CUDA, the process must be terminated
- * and relaunched.
- */
- CUDA_ERROR_ILLEGAL_ADDRESS = 700,
-
- /**
- * This indicates that a launch did not occur because it did not have
- * appropriate resources. This error usually indicates that the user has
- * attempted to pass too many arguments to the device kernel, or the
- * kernel launch specifies too many threads for the kernel's register
- * count. Passing arguments of the wrong size (i.e. a 64-bit pointer
- * when a 32-bit int is expected) is equivalent to passing too many
- * arguments and can also result in this error.
- */
- CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES = 701,
-
- /**
- * This indicates that the device kernel took too long to execute. This can
- * only occur if timeouts are enabled - see the device attribute
- * ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT for more information.
- * This leaves the process in an inconsistent state and any further CUDA work
- * will return the same error. To continue using CUDA, the process must be terminated
- * and relaunched.
- */
- CUDA_ERROR_LAUNCH_TIMEOUT = 702,
-
- /**
- * This error indicates a kernel launch that uses an incompatible texturing
- * mode.
- */
- CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING = 703,
-
- /**
- * This error indicates that a call to ::cuCtxEnablePeerAccess() is
- * trying to re-enable peer access to a context which has already
- * had peer access to it enabled.
- */
- CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED = 704,
-
- /**
- * This error indicates that ::cuCtxDisablePeerAccess() is
- * trying to disable peer access which has not been enabled yet
- * via ::cuCtxEnablePeerAccess().
- */
- CUDA_ERROR_PEER_ACCESS_NOT_ENABLED = 705,
-
- /**
- * This error indicates that the primary context for the specified device
- * has already been initialized.
- */
- CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE = 708,
-
- /**
- * This error indicates that the context current to the calling thread
- * has been destroyed using ::cuCtxDestroy, or is a primary context which
- * has not yet been initialized.
- */
- CUDA_ERROR_CONTEXT_IS_DESTROYED = 709,
-
- /**
- * A device-side assert triggered during kernel execution. The context
- * cannot be used anymore, and must be destroyed. All existing device
- * memory allocations from this context are invalid and must be
- * reconstructed if the program is to continue using CUDA.
- */
- CUDA_ERROR_ASSERT = 710,
-
- /**
- * This error indicates that the hardware resources required to enable
- * peer access have been exhausted for one or more of the devices
- * passed to ::cuCtxEnablePeerAccess().
- */
- CUDA_ERROR_TOO_MANY_PEERS = 711,
-
- /**
- * This error indicates that the memory range passed to ::cuMemHostRegister()
- * has already been registered.
- */
- CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED = 712,
-
- /**
- * This error indicates that the pointer passed to ::cuMemHostUnregister()
- * does not correspond to any currently registered memory region.
- */
- CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED = 713,
-
- /**
- * While executing a kernel, the device encountered a stack error.
- * This can be due to stack corruption or exceeding the stack size limit.
- * This leaves the process in an inconsistent state and any further CUDA work
- * will return the same error. To continue using CUDA, the process must be terminated
- * and relaunched.
- */
- CUDA_ERROR_HARDWARE_STACK_ERROR = 714,
-
- /**
- * While executing a kernel, the device encountered an illegal instruction.
- * This leaves the process in an inconsistent state and any further CUDA work
- * will return the same error. To continue using CUDA, the process must be terminated
- * and relaunched.
- */
- CUDA_ERROR_ILLEGAL_INSTRUCTION = 715,
-
- /**
- * While executing a kernel, the device encountered a load or store instruction
- * on a memory address which is not aligned.
- * This leaves the process in an inconsistent state and any further CUDA work
- * will return the same error. To continue using CUDA, the process must be terminated
- * and relaunched.
- */
- CUDA_ERROR_MISALIGNED_ADDRESS = 716,
-
- /**
- * While executing a kernel, the device encountered an instruction
- * which can only operate on memory locations in certain address spaces
- * (global, shared, or local), but was supplied a memory address not
- * belonging to an allowed address space.
- * This leaves the process in an inconsistent state and any further CUDA work
- * will return the same error. To continue using CUDA, the process must be terminated
- * and relaunched.
- */
- CUDA_ERROR_INVALID_ADDRESS_SPACE = 717,
-
- /**
- * While executing a kernel, the device program counter wrapped its address space.
- * This leaves the process in an inconsistent state and any further CUDA work
- * will return the same error. To continue using CUDA, the process must be terminated
- * and relaunched.
- */
- CUDA_ERROR_INVALID_PC = 718,
-
- /**
- * An exception occurred on the device while executing a kernel. Common
- * causes include dereferencing an invalid device pointer and accessing
- * out of bounds shared memory. Less common cases can be system specific - more
- * information about these cases can be found in the system specific user guide.
- * This leaves the process in an inconsistent state and any further CUDA work
- * will return the same error. To continue using CUDA, the process must be terminated
- * and relaunched.
- */
- CUDA_ERROR_LAUNCH_FAILED = 719,
-
- /**
- * This error indicates that the number of blocks launched per grid for a kernel that was
- * launched via either ::cuLaunchCooperativeKernel or ::cuLaunchCooperativeKernelMultiDevice
- * exceeds the maximum number of blocks as allowed by ::cuOccupancyMaxActiveBlocksPerMultiprocessor
- * or ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors
- * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT.
- */
- CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE = 720,
-
- /**
- * This error indicates that the attempted operation is not permitted.
- */
- CUDA_ERROR_NOT_PERMITTED = 800,
-
- /**
- * This error indicates that the attempted operation is not supported
- * on the current system or device.
- */
- CUDA_ERROR_NOT_SUPPORTED = 801,
-
- /**
- * This error indicates that the system is not yet ready to start any CUDA
- * work. To continue using CUDA, verify the system configuration is in a
- * valid state and all required driver daemons are actively running.
- * More information about this error can be found in the system specific
- * user guide.
- */
- CUDA_ERROR_SYSTEM_NOT_READY = 802,
-
- /**
- * This error indicates that there is a mismatch between the versions of
- * the display driver and the CUDA driver. Refer to the compatibility documentation
- * for supported versions.
- */
- CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = 803,
-
- /**
- * This error indicates that the system was upgraded to run with forward compatibility
- * but the visible hardware detected by CUDA does not support this configuration.
- * Refer to the compatibility documentation for the supported hardware matrix or ensure
- * that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES
- * environment variable.
- */
- CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE = 804,
-
- /**
- * This error indicates that the operation is not permitted when
- * the stream is capturing.
- */
- CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED = 900,
-
- /**
- * This error indicates that the current capture sequence on the stream
- * has been invalidated due to a previous error.
- */
- CUDA_ERROR_STREAM_CAPTURE_INVALIDATED = 901,
-
- /**
- * This error indicates that the operation would have resulted in a merge
- * of two independent capture sequences.
- */
- CUDA_ERROR_STREAM_CAPTURE_MERGE = 902,
-
- /**
- * This error indicates that the capture was not initiated in this stream.
- */
- CUDA_ERROR_STREAM_CAPTURE_UNMATCHED = 903,
-
- /**
- * This error indicates that the capture sequence contains a fork that was
- * not joined to the primary stream.
- */
- CUDA_ERROR_STREAM_CAPTURE_UNJOINED = 904,
-
- /**
- * This error indicates that a dependency would have been created which
- * crosses the capture sequence boundary. Only implicit in-stream ordering
- * dependencies are allowed to cross the boundary.
- */
- CUDA_ERROR_STREAM_CAPTURE_ISOLATION = 905,
-
- /**
- * This error indicates a disallowed implicit dependency on a current capture
- * sequence from cudaStreamLegacy.
- */
- CUDA_ERROR_STREAM_CAPTURE_IMPLICIT = 906,
-
- /**
- * This error indicates that the operation is not permitted on an event which
- * was last recorded in a capturing stream.
- */
- CUDA_ERROR_CAPTURED_EVENT = 907,
-
- /**
- * A stream capture sequence not initiated with the ::CU_STREAM_CAPTURE_MODE_RELAXED
- * argument to ::cuStreamBeginCapture was passed to ::cuStreamEndCapture in a
- * different thread.
- */
- CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD = 908,
-
- /**
- * This error indicates that the timeout specified for the wait operation has lapsed.
- */
- CUDA_ERROR_TIMEOUT = 909,
-
- /**
- * This error indicates that the graph update was not performed because it included
- * changes which violated constraints specific to instantiated graph update.
- */
- CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE = 910,
-
- /**
- * This indicates that an unknown internal error has occurred.
- */
- CUDA_ERROR_UNKNOWN = 999
-} CUresult;
-
-/**
- * P2P Attributes
- */
-typedef enum CUdevice_P2PAttribute_enum {
- CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK = 0x01, /**< A relative value indicating the performance of the link between two devices */
- CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED = 0x02, /**< P2P Access is enable */
- CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED = 0x03, /**< Atomic operation over the link supported */
- CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED = 0x04, /**< \deprecated use CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED instead */
- CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED = 0x04 /**< Accessing CUDA arrays over the link supported */
-} CUdevice_P2PAttribute;
-
-/**
- * CUDA stream callback
- * \param hStream The stream the callback was added to, as passed to ::cuStreamAddCallback. May be NULL.
- * \param status ::CUDA_SUCCESS or any persistent error on the stream.
- * \param userData User parameter provided at registration.
- */
-typedef void (CUDA_CB *CUstreamCallback)(CUstream hStream, CUresult status, void *userData);
-
-/**
- * Block size to per-block dynamic shared memory mapping for a certain
- * kernel \param blockSize Block size of the kernel.
- *
- * \return The dynamic shared memory needed by a block.
- */
-typedef size_t (CUDA_CB *CUoccupancyB2DSize)(int blockSize);
-
-/**
- * If set, host memory is portable between CUDA contexts.
- * Flag for ::cuMemHostAlloc()
- */
-#define CU_MEMHOSTALLOC_PORTABLE 0x01
-
-/**
- * If set, host memory is mapped into CUDA address space and
- * ::cuMemHostGetDevicePointer() may be called on the host pointer.
- * Flag for ::cuMemHostAlloc()
- */
-#define CU_MEMHOSTALLOC_DEVICEMAP 0x02
-
-/**
- * If set, host memory is allocated as write-combined - fast to write,
- * faster to DMA, slow to read except via SSE4 streaming load instruction
- * (MOVNTDQA).
- * Flag for ::cuMemHostAlloc()
- */
-#define CU_MEMHOSTALLOC_WRITECOMBINED 0x04
-
-/**
- * If set, host memory is portable between CUDA contexts.
- * Flag for ::cuMemHostRegister()
- */
-#define CU_MEMHOSTREGISTER_PORTABLE 0x01
-
-/**
- * If set, host memory is mapped into CUDA address space and
- * ::cuMemHostGetDevicePointer() may be called on the host pointer.
- * Flag for ::cuMemHostRegister()
- */
-#define CU_MEMHOSTREGISTER_DEVICEMAP 0x02
-
-/**
- * If set, the passed memory pointer is treated as pointing to some
- * memory-mapped I/O space, e.g. belonging to a third-party PCIe device.
- * On Windows the flag is a no-op.
- * On Linux that memory is marked as non cache-coherent for the GPU and
- * is expected to be physically contiguous. It may return
- * CUDA_ERROR_NOT_PERMITTED if run as an unprivileged user,
- * CUDA_ERROR_NOT_SUPPORTED on older Linux kernel versions.
- * On all other platforms, it is not supported and CUDA_ERROR_NOT_SUPPORTED
- * is returned.
- * Flag for ::cuMemHostRegister()
- */
-#define CU_MEMHOSTREGISTER_IOMEMORY 0x04
-
-/**
- * 2D memory copy parameters
- */
-typedef struct CUDA_MEMCPY2D_st {
- size_t srcXInBytes; /**< Source X in bytes */
- size_t srcY; /**< Source Y */
-
- CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */
- const void *srcHost; /**< Source host pointer */
- CUdeviceptr srcDevice; /**< Source device pointer */
- CUarray srcArray; /**< Source array reference */
- size_t srcPitch; /**< Source pitch (ignored when src is array) */
-
- size_t dstXInBytes; /**< Destination X in bytes */
- size_t dstY; /**< Destination Y */
-
- CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */
- void *dstHost; /**< Destination host pointer */
- CUdeviceptr dstDevice; /**< Destination device pointer */
- CUarray dstArray; /**< Destination array reference */
- size_t dstPitch; /**< Destination pitch (ignored when dst is array) */
-
- size_t WidthInBytes; /**< Width of 2D memory copy in bytes */
- size_t Height; /**< Height of 2D memory copy */
-} CUDA_MEMCPY2D;
-
-/**
- * 3D memory copy parameters
- */
-typedef struct CUDA_MEMCPY3D_st {
- size_t srcXInBytes; /**< Source X in bytes */
- size_t srcY; /**< Source Y */
- size_t srcZ; /**< Source Z */
- size_t srcLOD; /**< Source LOD */
- CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */
- const void *srcHost; /**< Source host pointer */
- CUdeviceptr srcDevice; /**< Source device pointer */
- CUarray srcArray; /**< Source array reference */
- void *reserved0; /**< Must be NULL */
- size_t srcPitch; /**< Source pitch (ignored when src is array) */
- size_t srcHeight; /**< Source height (ignored when src is array; may be 0 if Depth==1) */
-
- size_t dstXInBytes; /**< Destination X in bytes */
- size_t dstY; /**< Destination Y */
- size_t dstZ; /**< Destination Z */
- size_t dstLOD; /**< Destination LOD */
- CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */
- void *dstHost; /**< Destination host pointer */
- CUdeviceptr dstDevice; /**< Destination device pointer */
- CUarray dstArray; /**< Destination array reference */
- void *reserved1; /**< Must be NULL */
- size_t dstPitch; /**< Destination pitch (ignored when dst is array) */
- size_t dstHeight; /**< Destination height (ignored when dst is array; may be 0 if Depth==1) */
-
- size_t WidthInBytes; /**< Width of 3D memory copy in bytes */
- size_t Height; /**< Height of 3D memory copy */
- size_t Depth; /**< Depth of 3D memory copy */
-} CUDA_MEMCPY3D;
-
-/**
- * 3D memory cross-context copy parameters
- */
-typedef struct CUDA_MEMCPY3D_PEER_st {
- size_t srcXInBytes; /**< Source X in bytes */
- size_t srcY; /**< Source Y */
- size_t srcZ; /**< Source Z */
- size_t srcLOD; /**< Source LOD */
- CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */
- const void *srcHost; /**< Source host pointer */
- CUdeviceptr srcDevice; /**< Source device pointer */
- CUarray srcArray; /**< Source array reference */
- CUcontext srcContext; /**< Source context (ignored with srcMemoryType is ::CU_MEMORYTYPE_ARRAY) */
- size_t srcPitch; /**< Source pitch (ignored when src is array) */
- size_t srcHeight; /**< Source height (ignored when src is array; may be 0 if Depth==1) */
-
- size_t dstXInBytes; /**< Destination X in bytes */
- size_t dstY; /**< Destination Y */
- size_t dstZ; /**< Destination Z */
- size_t dstLOD; /**< Destination LOD */
- CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */
- void *dstHost; /**< Destination host pointer */
- CUdeviceptr dstDevice; /**< Destination device pointer */
- CUarray dstArray; /**< Destination array reference */
- CUcontext dstContext; /**< Destination context (ignored with dstMemoryType is ::CU_MEMORYTYPE_ARRAY) */
- size_t dstPitch; /**< Destination pitch (ignored when dst is array) */
- size_t dstHeight; /**< Destination height (ignored when dst is array; may be 0 if Depth==1) */
-
- size_t WidthInBytes; /**< Width of 3D memory copy in bytes */
- size_t Height; /**< Height of 3D memory copy */
- size_t Depth; /**< Depth of 3D memory copy */
-} CUDA_MEMCPY3D_PEER;
-
-/**
- * Array descriptor
- */
-typedef struct CUDA_ARRAY_DESCRIPTOR_st
-{
- size_t Width; /**< Width of array */
- size_t Height; /**< Height of array */
-
- CUarray_format Format; /**< Array format */
- unsigned int NumChannels; /**< Channels per array element */
-} CUDA_ARRAY_DESCRIPTOR;
-
-/**
- * 3D array descriptor
- */
-typedef struct CUDA_ARRAY3D_DESCRIPTOR_st
-{
- size_t Width; /**< Width of 3D array */
- size_t Height; /**< Height of 3D array */
- size_t Depth; /**< Depth of 3D array */
-
- CUarray_format Format; /**< Array format */
- unsigned int NumChannels; /**< Channels per array element */
- unsigned int Flags; /**< Flags */
-} CUDA_ARRAY3D_DESCRIPTOR;
-
-/**
- * CUDA Resource descriptor
- */
-typedef struct CUDA_RESOURCE_DESC_st
-{
- CUresourcetype resType; /**< Resource type */
-
- union {
- struct {
- CUarray hArray; /**< CUDA array */
- } array;
- struct {
- CUmipmappedArray hMipmappedArray; /**< CUDA mipmapped array */
- } mipmap;
- struct {
- CUdeviceptr devPtr; /**< Device pointer */
- CUarray_format format; /**< Array format */
- unsigned int numChannels; /**< Channels per array element */
- size_t sizeInBytes; /**< Size in bytes */
- } linear;
- struct {
- CUdeviceptr devPtr; /**< Device pointer */
- CUarray_format format; /**< Array format */
- unsigned int numChannels; /**< Channels per array element */
- size_t width; /**< Width of the array in elements */
- size_t height; /**< Height of the array in elements */
- size_t pitchInBytes; /**< Pitch between two rows in bytes */
- } pitch2D;
- struct {
- int reserved[32];
- } reserved;
- } res;
-
- unsigned int flags; /**< Flags (must be zero) */
-} CUDA_RESOURCE_DESC;
-
-/**
- * Texture descriptor
- */
-typedef struct CUDA_TEXTURE_DESC_st {
- CUaddress_mode addressMode[3]; /**< Address modes */
- CUfilter_mode filterMode; /**< Filter mode */
- unsigned int flags; /**< Flags */
- unsigned int maxAnisotropy; /**< Maximum anisotropy ratio */
- CUfilter_mode mipmapFilterMode; /**< Mipmap filter mode */
- float mipmapLevelBias; /**< Mipmap level bias */
- float minMipmapLevelClamp; /**< Mipmap minimum level clamp */
- float maxMipmapLevelClamp; /**< Mipmap maximum level clamp */
- float borderColor[4]; /**< Border Color */
- int reserved[12];
-} CUDA_TEXTURE_DESC;
-
-/**
- * Resource view format
- */
-typedef enum CUresourceViewFormat_enum
-{
- CU_RES_VIEW_FORMAT_NONE = 0x00, /**< No resource view format (use underlying resource format) */
- CU_RES_VIEW_FORMAT_UINT_1X8 = 0x01, /**< 1 channel unsigned 8-bit integers */
- CU_RES_VIEW_FORMAT_UINT_2X8 = 0x02, /**< 2 channel unsigned 8-bit integers */
- CU_RES_VIEW_FORMAT_UINT_4X8 = 0x03, /**< 4 channel unsigned 8-bit integers */
- CU_RES_VIEW_FORMAT_SINT_1X8 = 0x04, /**< 1 channel signed 8-bit integers */
- CU_RES_VIEW_FORMAT_SINT_2X8 = 0x05, /**< 2 channel signed 8-bit integers */
- CU_RES_VIEW_FORMAT_SINT_4X8 = 0x06, /**< 4 channel signed 8-bit integers */
- CU_RES_VIEW_FORMAT_UINT_1X16 = 0x07, /**< 1 channel unsigned 16-bit integers */
- CU_RES_VIEW_FORMAT_UINT_2X16 = 0x08, /**< 2 channel unsigned 16-bit integers */
- CU_RES_VIEW_FORMAT_UINT_4X16 = 0x09, /**< 4 channel unsigned 16-bit integers */
- CU_RES_VIEW_FORMAT_SINT_1X16 = 0x0a, /**< 1 channel signed 16-bit integers */
- CU_RES_VIEW_FORMAT_SINT_2X16 = 0x0b, /**< 2 channel signed 16-bit integers */
- CU_RES_VIEW_FORMAT_SINT_4X16 = 0x0c, /**< 4 channel signed 16-bit integers */
- CU_RES_VIEW_FORMAT_UINT_1X32 = 0x0d, /**< 1 channel unsigned 32-bit integers */
- CU_RES_VIEW_FORMAT_UINT_2X32 = 0x0e, /**< 2 channel unsigned 32-bit integers */
- CU_RES_VIEW_FORMAT_UINT_4X32 = 0x0f, /**< 4 channel unsigned 32-bit integers */
- CU_RES_VIEW_FORMAT_SINT_1X32 = 0x10, /**< 1 channel signed 32-bit integers */
- CU_RES_VIEW_FORMAT_SINT_2X32 = 0x11, /**< 2 channel signed 32-bit integers */
- CU_RES_VIEW_FORMAT_SINT_4X32 = 0x12, /**< 4 channel signed 32-bit integers */
- CU_RES_VIEW_FORMAT_FLOAT_1X16 = 0x13, /**< 1 channel 16-bit floating point */
- CU_RES_VIEW_FORMAT_FLOAT_2X16 = 0x14, /**< 2 channel 16-bit floating point */
- CU_RES_VIEW_FORMAT_FLOAT_4X16 = 0x15, /**< 4 channel 16-bit floating point */
- CU_RES_VIEW_FORMAT_FLOAT_1X32 = 0x16, /**< 1 channel 32-bit floating point */
- CU_RES_VIEW_FORMAT_FLOAT_2X32 = 0x17, /**< 2 channel 32-bit floating point */
- CU_RES_VIEW_FORMAT_FLOAT_4X32 = 0x18, /**< 4 channel 32-bit floating point */
- CU_RES_VIEW_FORMAT_UNSIGNED_BC1 = 0x19, /**< Block compressed 1 */
- CU_RES_VIEW_FORMAT_UNSIGNED_BC2 = 0x1a, /**< Block compressed 2 */
- CU_RES_VIEW_FORMAT_UNSIGNED_BC3 = 0x1b, /**< Block compressed 3 */
- CU_RES_VIEW_FORMAT_UNSIGNED_BC4 = 0x1c, /**< Block compressed 4 unsigned */
- CU_RES_VIEW_FORMAT_SIGNED_BC4 = 0x1d, /**< Block compressed 4 signed */
- CU_RES_VIEW_FORMAT_UNSIGNED_BC5 = 0x1e, /**< Block compressed 5 unsigned */
- CU_RES_VIEW_FORMAT_SIGNED_BC5 = 0x1f, /**< Block compressed 5 signed */
- CU_RES_VIEW_FORMAT_UNSIGNED_BC6H = 0x20, /**< Block compressed 6 unsigned half-float */
- CU_RES_VIEW_FORMAT_SIGNED_BC6H = 0x21, /**< Block compressed 6 signed half-float */
- CU_RES_VIEW_FORMAT_UNSIGNED_BC7 = 0x22 /**< Block compressed 7 */
-} CUresourceViewFormat;
-
-/**
- * Resource view descriptor
- */
-typedef struct CUDA_RESOURCE_VIEW_DESC_st
-{
- CUresourceViewFormat format; /**< Resource view format */
- size_t width; /**< Width of the resource view */
- size_t height; /**< Height of the resource view */
- size_t depth; /**< Depth of the resource view */
- unsigned int firstMipmapLevel; /**< First defined mipmap level */
- unsigned int lastMipmapLevel; /**< Last defined mipmap level */
- unsigned int firstLayer; /**< First layer index */
- unsigned int lastLayer; /**< Last layer index */
- unsigned int reserved[16];
-} CUDA_RESOURCE_VIEW_DESC;
-
-/**
- * GPU Direct v3 tokens
- */
-typedef struct CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st {
- unsigned long long p2pToken;
- unsigned int vaSpaceToken;
-} CUDA_POINTER_ATTRIBUTE_P2P_TOKENS;
-
-/**
- * Kernel launch parameters
- */
-typedef struct CUDA_LAUNCH_PARAMS_st {
- CUfunction function; /**< Kernel to launch */
- unsigned int gridDimX; /**< Width of grid in blocks */
- unsigned int gridDimY; /**< Height of grid in blocks */
- unsigned int gridDimZ; /**< Depth of grid in blocks */
- unsigned int blockDimX; /**< X dimension of each thread block */
- unsigned int blockDimY; /**< Y dimension of each thread block */
- unsigned int blockDimZ; /**< Z dimension of each thread block */
- unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */
- CUstream hStream; /**< Stream identifier */
- void **kernelParams; /**< Array of pointers to kernel parameters */
-} CUDA_LAUNCH_PARAMS;
-
-/**
- * External memory handle types
- */
-typedef enum CUexternalMemoryHandleType_enum {
- /**
- * Handle is an opaque file descriptor
- */
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = 1,
- /**
- * Handle is an opaque shared NT handle
- */
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = 2,
- /**
- * Handle is an opaque, globally shared handle
- */
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3,
- /**
- * Handle is a D3D12 heap object
- */
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = 4,
- /**
- * Handle is a D3D12 committed resource
- */
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = 5,
- /**
- * Handle is a shared NT handle to a D3D11 resource
- */
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = 6,
- /**
- * Handle is a globally shared handle to a D3D11 resource
- */
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = 7,
- /**
- * Handle is an NvSciBuf object
- */
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8
-} CUexternalMemoryHandleType;
-
-/**
- * Indicates that the external memory object is a dedicated resource
- */
-#define CUDA_EXTERNAL_MEMORY_DEDICATED 0x1
-
-/** When the /p flags parameter of ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS
- * contains this flag, it indicates that signaling an external semaphore object
- * should skip performing appropriate memory synchronization operations over all
- * the external memory objects that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF,
- * which otherwise are performed by default to ensure data coherency with other
- * importers of the same NvSciBuf memory objects.
- */
-#define CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC 0x01
-
-/** When the /p flags parameter of ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS
- * contains this flag, it indicates that waiting on an external semaphore object
- * should skip performing appropriate memory synchronization operations over all
- * the external memory objects that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF,
- * which otherwise are performed by default to ensure data coherency with other
- * importers of the same NvSciBuf memory objects.
- */
-#define CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC 0x02
-
-/**
- * When /p flags of ::cuDeviceGetNvSciSyncAttributes is set to this,
- * it indicates that application needs signaler specific NvSciSyncAttr
- * to be filled by ::cuDeviceGetNvSciSyncAttributes.
- */
-#define CUDA_NVSCISYNC_ATTR_SIGNAL 0x1
-
-/**
- * When /p flags of ::cuDeviceGetNvSciSyncAttributes is set to this,
- * it indicates that application needs waiter specific NvSciSyncAttr
- * to be filled by ::cuDeviceGetNvSciSyncAttributes.
- */
-#define CUDA_NVSCISYNC_ATTR_WAIT 0x2
-/**
- * External memory handle descriptor
- */
-typedef struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st {
- /**
- * Type of the handle
- */
- CUexternalMemoryHandleType type;
- union {
- /**
- * File descriptor referencing the memory object. Valid
- * when type is
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD
- */
- int fd;
- /**
- * Win32 handle referencing the semaphore object. Valid when
- * type is one of the following:
- * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32
- * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT
- * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP
- * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE
- * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE
- * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT
- * Exactly one of 'handle' and 'name' must be non-NULL. If
- * type is one of the following:
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT
- * then 'name' must be NULL.
- */
- struct {
- /**
- * Valid NT handle. Must be NULL if 'name' is non-NULL
- */
- void *handle;
- /**
- * Name of a valid memory object.
- * Must be NULL if 'handle' is non-NULL.
- */
- const void *name;
- } win32;
- /**
- * A handle representing an NvSciBuf Object. Valid when type
- * is ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF
- */
- const void *nvSciBufObject;
- } handle;
- /**
- * Size of the memory allocation
- */
- unsigned long long size;
- /**
- * Flags must either be zero or ::CUDA_EXTERNAL_MEMORY_DEDICATED
- */
- unsigned int flags;
- unsigned int reserved[16];
-} CUDA_EXTERNAL_MEMORY_HANDLE_DESC;
-
-/**
- * External memory buffer descriptor
- */
-typedef struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st {
- /**
- * Offset into the memory object where the buffer's base is
- */
- unsigned long long offset;
- /**
- * Size of the buffer
- */
- unsigned long long size;
- /**
- * Flags reserved for future use. Must be zero.
- */
- unsigned int flags;
- unsigned int reserved[16];
-} CUDA_EXTERNAL_MEMORY_BUFFER_DESC;
-
-/**
- * External memory mipmap descriptor
- */
-typedef struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st {
- /**
- * Offset into the memory object where the base level of the
- * mipmap chain is.
- */
- unsigned long long offset;
- /**
- * Format, dimension and type of base level of the mipmap chain
- */
- CUDA_ARRAY3D_DESCRIPTOR arrayDesc;
- /**
- * Total number of levels in the mipmap chain
- */
- unsigned int numLevels;
- unsigned int reserved[16];
-} CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC;
-
-/**
- * External semaphore handle types
- */
-typedef enum CUexternalSemaphoreHandleType_enum {
- /**
- * Handle is an opaque file descriptor
- */
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = 1,
- /**
- * Handle is an opaque shared NT handle
- */
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = 2,
- /**
- * Handle is an opaque, globally shared handle
- */
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3,
- /**
- * Handle is a shared NT handle referencing a D3D12 fence object
- */
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = 4,
- /**
- * Handle is a shared NT handle referencing a D3D11 fence object
- */
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = 5,
- /**
- * Opaque handle to NvSciSync Object
- */
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = 6,
- /**
- * Handle is a shared NT handle referencing a D3D11 keyed mutex object
- */
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = 7,
- /**
- * Handle is a globally shared handle referencing a D3D11 keyed mutex object
- */
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = 8
-} CUexternalSemaphoreHandleType;
-
-/**
- * External semaphore handle descriptor
- */
-typedef struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st {
- /**
- * Type of the handle
- */
- CUexternalSemaphoreHandleType type;
- union {
- /**
- * File descriptor referencing the semaphore object. Valid
- * when type is
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD
- */
- int fd;
- /**
- * Win32 handle referencing the semaphore object. Valid when
- * type is one of the following:
- * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32
- * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT
- * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE
- * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE
- * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX
- * Exactly one of 'handle' and 'name' must be non-NULL. If
- * type is one of the following:
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT
- * then 'name' must be NULL.
- */
- struct {
- /**
- * Valid NT handle. Must be NULL if 'name' is non-NULL
- */
- void *handle;
- /**
- * Name of a valid synchronization primitive.
- * Must be NULL if 'handle' is non-NULL.
- */
- const void *name;
- } win32;
- /**
- * Valid NvSciSyncObj. Must be non NULL
- */
- const void* nvSciSyncObj;
- } handle;
- /**
- * Flags reserved for the future. Must be zero.
- */
- unsigned int flags;
- unsigned int reserved[16];
-} CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC;
-
-/**
- * External semaphore signal parameters
- */
-typedef struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st {
- struct {
- /**
- * Parameters for fence objects
- */
- struct {
- /**
- * Value of fence to be signaled
- */
- unsigned long long value;
- } fence;
- union {
- /**
- * Pointer to NvSciSyncFence. Valid if ::CUexternalSemaphoreHandleType
- * is of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC.
- */
- void *fence;
- unsigned long long reserved;
- } nvSciSync;
- /**
- * Parameters for keyed mutex objects
- */
- struct {
- /**
- * Value of key to release the mutex with
- */
- unsigned long long key;
- } keyedMutex;
- unsigned int reserved[12];
- } params;
- /**
- * Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to
- * signal a ::CUexternalSemaphore of type
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is
- * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which indicates
- * that while signaling the ::CUexternalSemaphore, no memory synchronization
- * operations should be performed for any external memory object imported
- * as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF.
- * For all other types of ::CUexternalSemaphore, flags must be zero.
- */
- unsigned int flags;
- unsigned int reserved[16];
-} CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS;
-
-/**
- * External semaphore wait parameters
- */
-typedef struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st {
- struct {
- /**
- * Parameters for fence objects
- */
- struct {
- /**
- * Value of fence to be waited on
- */
- unsigned long long value;
- } fence;
- /**
- * Pointer to NvSciSyncFence. Valid if CUexternalSemaphoreHandleType
- * is of type CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC.
- */
- union {
- void *fence;
- unsigned long long reserved;
- } nvSciSync;
- /**
- * Parameters for keyed mutex objects
- */
- struct {
- /**
- * Value of key to acquire the mutex with
- */
- unsigned long long key;
- /**
- * Timeout in milliseconds to wait to acquire the mutex
- */
- unsigned int timeoutMs;
- } keyedMutex;
- unsigned int reserved[10];
- } params;
- /**
- * Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on
- * a ::CUexternalSemaphore of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC,
- * the valid flag is ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC
- * which indicates that while waiting for the ::CUexternalSemaphore, no memory
- * synchronization operations should be performed for any external memory
- * object imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF.
- * For all other types of ::CUexternalSemaphore, flags must be zero.
- */
- unsigned int flags;
- unsigned int reserved[16];
-} CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS;
-
-
-typedef unsigned long long CUmemGenericAllocationHandle;
-
-/**
- * Flags for specifying particular handle types
- */
-typedef enum CUmemAllocationHandleType_enum {
- CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR = 0x1, /**< Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) */
- CU_MEM_HANDLE_TYPE_WIN32 = 0x2, /**< Allows a Win32 NT handle to be used for exporting. (HANDLE) */
- CU_MEM_HANDLE_TYPE_WIN32_KMT = 0x4, /**< Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) */
- CU_MEM_HANDLE_TYPE_MAX = 0xFFFFFFFF
-} CUmemAllocationHandleType;
-
-/**
- * Specifies the memory protection flags for mapping.
- */
-typedef enum CUmemAccess_flags_enum {
- CU_MEM_ACCESS_FLAGS_PROT_NONE = 0x0, /**< Default, make the address range not accessible */
- CU_MEM_ACCESS_FLAGS_PROT_READ = 0x1, /**< Make the address range read accessible */
- CU_MEM_ACCESS_FLAGS_PROT_READWRITE = 0x3, /**< Make the address range read-write accessible */
- CU_MEM_ACCESS_FLAGS_PROT_MAX = 0xFFFFFFFF
-} CUmemAccess_flags;
-
-/**
- * Specifies the type of location
- */
-typedef enum CUmemLocationType_enum {
- CU_MEM_LOCATION_TYPE_INVALID = 0x0,
- CU_MEM_LOCATION_TYPE_DEVICE = 0x1, /**< Location is a device location, thus id is a device ordinal */
- CU_MEM_LOCATION_TYPE_MAX = 0xFFFFFFFF
-} CUmemLocationType;
-
-/**
-* Defines the allocation types available
-*/
-typedef enum CUmemAllocationType_enum {
- CU_MEM_ALLOCATION_TYPE_INVALID = 0x0,
-
- /** This allocation type is 'pinned', i.e. cannot migrate from its current
- * location while the application is actively using it
- */
- CU_MEM_ALLOCATION_TYPE_PINNED = 0x1,
- CU_MEM_ALLOCATION_TYPE_MAX = 0xFFFFFFFF
-} CUmemAllocationType;
-
-/**
-* Flag for requesting different optimal and required granularities for an allocation.
-*/
-typedef enum CUmemAllocationGranularity_flags_enum {
- CU_MEM_ALLOC_GRANULARITY_MINIMUM = 0x0, /**< Minimum required granularity for allocation */
- CU_MEM_ALLOC_GRANULARITY_RECOMMENDED = 0x1 /**< Recommended granularity for allocation for best performance */
-} CUmemAllocationGranularity_flags;
-
-/**
- * Specifies a location for an allocation.
- */
-typedef struct CUmemLocation_st {
- CUmemLocationType type; /**< Specifies the location type, which modifies the meaning of id. */
- int id; /**< identifier for a given this location's ::CUmemLocationType. */
-} CUmemLocation;
-
-/**
- * Specifies compression attribute for an allocation.
- */
-typedef enum CUmemAllocationCompType_enum {
- CU_MEM_ALLOCATION_COMP_NONE = 0x0, /**< Allocating non-compressible memory */
- CU_MEM_ALLOCATION_COMP_GENERIC = 0x1 /**< Allocating compressible memory */
-} CUmemAllocationCompType;
-
-/**
-* Specifies the allocation properties for a allocation.
-*/
-typedef struct CUmemAllocationProp_st {
- /** Allocation type */
- CUmemAllocationType type;
- /** requested ::CUmemAllocationHandleType */
- CUmemAllocationHandleType requestedHandleTypes;
- /** Location of allocation */
- CUmemLocation location;
- /**
- * Windows-specific LPSECURITYATTRIBUTES required when
- * ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute defines
- * the scope of which exported allocations may be tranferred to other
- * processes. In all other cases, this field is required to be zero.
- */
- void *win32HandleMetaData;
- struct {
- /**
- * Additional allocation hint for requesting compressible memory.
- * Compressed memory allows higher bandwidth, but may cause
- * compression resource thrashing, and compressed memory may not be
- * mappeable on all devices.
- */
- unsigned char compressionType;
- unsigned char gpuDirectRDMACapable;
- unsigned char reserved[6];
- } allocFlags;
-} CUmemAllocationProp;
-
-/**
-* Memory access descriptor
-*/
-typedef struct CUmemAccessDesc_st {
- CUmemLocation location; /**< Location on which the request is to change it's accessibility */
- CUmemAccess_flags flags; /**< ::CUmemProt accessibility flags to set on the request */
-} CUmemAccessDesc;
-
-typedef enum CUgraphExecUpdateResult_enum {
- CU_GRAPH_EXEC_UPDATE_SUCCESS = 0x0, /**< The update succeeded */
- CU_GRAPH_EXEC_UPDATE_ERROR = 0x1, /**< The update failed for an unexpected reason which is described in the return value of the function */
- CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED = 0x2, /**< The update failed because the topology changed */
- CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED = 0x3, /**< The update failed because a node type changed */
- CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED = 0x4, /**< The update failed because the function of a kernel node changed */
- CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED = 0x5, /**< The update failed because the parameters changed in a way that is not supported */
- CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED = 0x6 /**< The update failed because something about the node is not supported */
-} CUgraphExecUpdateResult;
-
-/**
- * If set, each kernel launched as part of ::cuLaunchCooperativeKernelMultiDevice only
- * waits for prior work in the stream corresponding to that GPU to complete before the
- * kernel begins execution.
- */
-#define CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC 0x01
-
-/**
- * If set, any subsequent work pushed in a stream that participated in a call to
- * ::cuLaunchCooperativeKernelMultiDevice will only wait for the kernel launched on
- * the GPU corresponding to that stream to complete before it begins execution.
- */
-#define CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC 0x02
-
-/**
- * If set, the CUDA array is a collection of layers, where each layer is either a 1D
- * or a 2D array and the Depth member of CUDA_ARRAY3D_DESCRIPTOR specifies the number
- * of layers, not the depth of a 3D array.
- */
-#define CUDA_ARRAY3D_LAYERED 0x01
-
-/**
- * Deprecated, use CUDA_ARRAY3D_LAYERED
- */
-#define CUDA_ARRAY3D_2DARRAY 0x01
-
-/**
- * This flag must be set in order to bind a surface reference
- * to the CUDA array
- */
-#define CUDA_ARRAY3D_SURFACE_LDST 0x02
-
-/**
- * If set, the CUDA array is a collection of six 2D arrays, representing faces of a cube. The
- * width of such a CUDA array must be equal to its height, and Depth must be six.
- * If ::CUDA_ARRAY3D_LAYERED flag is also set, then the CUDA array is a collection of cubemaps
- * and Depth must be a multiple of six.
- */
-#define CUDA_ARRAY3D_CUBEMAP 0x04
-
-/**
- * This flag must be set in order to perform texture gather operations
- * on a CUDA array.
- */
-#define CUDA_ARRAY3D_TEXTURE_GATHER 0x08
-
-/**
- * This flag if set indicates that the CUDA
- * array is a DEPTH_TEXTURE.
- */
-#define CUDA_ARRAY3D_DEPTH_TEXTURE 0x10
-
-/**
- * This flag indicates that the CUDA array may be bound as a color target
- * in an external graphics API
- */
-#define CUDA_ARRAY3D_COLOR_ATTACHMENT 0x20
-
-/**
- * Override the texref format with a format inferred from the array.
- * Flag for ::cuTexRefSetArray()
- */
-#define CU_TRSA_OVERRIDE_FORMAT 0x01
-
-/**
- * Read the texture as integers rather than promoting the values to floats
- * in the range [0,1].
- * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate()
- */
-#define CU_TRSF_READ_AS_INTEGER 0x01
-
-/**
- * Use normalized texture coordinates in the range [0,1) instead of [0,dim).
- * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate()
- */
-#define CU_TRSF_NORMALIZED_COORDINATES 0x02
-
-/**
- * Perform sRGB->linear conversion during texture read.
- * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate()
- */
-#define CU_TRSF_SRGB 0x10
-
- /**
- * Disable any trilinear filtering optimizations.
- * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate()
- */
-#define CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION 0x20
-
-/**
- * End of array terminator for the \p extra parameter to
- * ::cuLaunchKernel
- */
-#define CU_LAUNCH_PARAM_END ((void*)0x00)
-
-/**
- * Indicator that the next value in the \p extra parameter to
- * ::cuLaunchKernel will be a pointer to a buffer containing all kernel
- * parameters used for launching kernel \p f. This buffer needs to
- * honor all alignment/padding requirements of the individual parameters.
- * If ::CU_LAUNCH_PARAM_BUFFER_SIZE is not also specified in the
- * \p extra array, then ::CU_LAUNCH_PARAM_BUFFER_POINTER will have no
- * effect.
- */
-#define CU_LAUNCH_PARAM_BUFFER_POINTER ((void*)0x01)
-
-/**
- * Indicator that the next value in the \p extra parameter to
- * ::cuLaunchKernel will be a pointer to a size_t which contains the
- * size of the buffer specified with ::CU_LAUNCH_PARAM_BUFFER_POINTER.
- * It is required that ::CU_LAUNCH_PARAM_BUFFER_POINTER also be specified
- * in the \p extra array if the value associated with
- * ::CU_LAUNCH_PARAM_BUFFER_SIZE is not zero.
- */
-#define CU_LAUNCH_PARAM_BUFFER_SIZE ((void*)0x02)
-
-/**
- * For texture references loaded into the module, use default texunit from
- * texture reference.
- */
-#define CU_PARAM_TR_DEFAULT -1
-
-/**
- * Device that represents the CPU
- */
-#define CU_DEVICE_CPU ((CUdevice)-1)
-
-/**
- * Device that represents an invalid device
- */
-#define CU_DEVICE_INVALID ((CUdevice)-2)
-
-/** @} */ /* END CUDA_TYPES */
-
-#if defined(__GNUC__)
- #if defined(__CUDA_API_PUSH_VISIBILITY_DEFAULT)
- #pragma GCC visibility push(default)
- #endif
-#endif
-
-#ifdef _WIN32
-#define CUDAAPI __stdcall
-#else
-#define CUDAAPI
-#endif
-
-/**
- * \defgroup CUDA_ERROR Error Handling
- *
- * ___MANBRIEF___ error handling functions of the low-level CUDA driver API
- * (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the error handling functions of the low-level CUDA
- * driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Gets the string description of an error code
- *
- * Sets \p *pStr to the address of a NULL-terminated string description
- * of the error code \p error.
- * If the error code is not recognized, ::CUDA_ERROR_INVALID_VALUE
- * will be returned and \p *pStr will be set to the NULL address.
- *
- * \param error - Error code to convert to string
- * \param pStr - Address of the string pointer.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::CUresult,
- * ::cudaGetErrorString
- */
-CUresult CUDAAPI cuGetErrorString(CUresult error, const char **pStr);
-
-/**
- * \brief Gets the string representation of an error code enum name
- *
- * Sets \p *pStr to the address of a NULL-terminated string representation
- * of the name of the enum error code \p error.
- * If the error code is not recognized, ::CUDA_ERROR_INVALID_VALUE
- * will be returned and \p *pStr will be set to the NULL address.
- *
- * \param error - Error code to convert to string
- * \param pStr - Address of the string pointer.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::CUresult,
- * ::cudaGetErrorName
- */
-CUresult CUDAAPI cuGetErrorName(CUresult error, const char **pStr);
-
-/** @} */ /* END CUDA_ERROR */
-
-/**
- * \defgroup CUDA_INITIALIZE Initialization
- *
- * ___MANBRIEF___ initialization functions of the low-level CUDA driver API
- * (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the initialization functions of the low-level CUDA
- * driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Initialize the CUDA driver API
- *
- * Initializes the driver API and must be called before any other function from
- * the driver API. Currently, the \p Flags parameter must be 0. If ::cuInit()
- * has not been called, any function from the driver API will return
- * ::CUDA_ERROR_NOT_INITIALIZED.
- *
- * \param Flags - Initialization flag for CUDA.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE,
- * ::CUDA_ERROR_SYSTEM_DRIVER_MISMATCH,
- * ::CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE
- * \notefnerr
- */
-CUresult CUDAAPI cuInit(unsigned int Flags);
-
-/** @} */ /* END CUDA_INITIALIZE */
-
-/**
- * \defgroup CUDA_VERSION Version Management
- *
- * ___MANBRIEF___ version management functions of the low-level CUDA driver
- * API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the version management functions of the low-level
- * CUDA driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Returns the latest CUDA version supported by driver
- *
- * Returns in \p *driverVersion the version of CUDA supported by
- * the driver. The version is returned as
- * (1000 × major + 10 × minor). For example, CUDA 9.2
- * would be represented by 9020.
- *
- * This function automatically returns ::CUDA_ERROR_INVALID_VALUE if
- * \p driverVersion is NULL.
- *
- * \param driverVersion - Returns the CUDA driver version
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa
- * ::cudaDriverGetVersion,
- * ::cudaRuntimeGetVersion
- */
-CUresult CUDAAPI cuDriverGetVersion(int *driverVersion);
-
-/** @} */ /* END CUDA_VERSION */
-
-/**
- * \defgroup CUDA_DEVICE Device Management
- *
- * ___MANBRIEF___ device management functions of the low-level CUDA driver API
- * (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the device management functions of the low-level
- * CUDA driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Returns a handle to a compute device
- *
- * Returns in \p *device a device handle given an ordinal in the range <b>[0,
- * ::cuDeviceGetCount()-1]</b>.
- *
- * \param device - Returned device handle
- * \param ordinal - Device number to get handle for
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuDeviceGetAttribute,
- * ::cuDeviceGetCount,
- * ::cuDeviceGetName,
- * ::cuDeviceGetUuid,
- * ::cuDeviceGetLuid,
- * ::cuDeviceTotalMem
- */
-CUresult CUDAAPI cuDeviceGet(CUdevice *device, int ordinal);
-
-/**
- * \brief Returns the number of compute-capable devices
- *
- * Returns in \p *count the number of devices with compute capability greater
- * than or equal to 2.0 that are available for execution. If there is no such
- * device, ::cuDeviceGetCount() returns 0.
- *
- * \param count - Returned number of compute-capable devices
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa
- * ::cuDeviceGetAttribute,
- * ::cuDeviceGetName,
- * ::cuDeviceGetUuid,
- * ::cuDeviceGetLuid,
- * ::cuDeviceGet,
- * ::cuDeviceTotalMem,
- * ::cudaGetDeviceCount
- */
-CUresult CUDAAPI cuDeviceGetCount(int *count);
-
-/**
- * \brief Returns an identifer string for the device
- *
- * Returns an ASCII string identifying the device \p dev in the NULL-terminated
- * string pointed to by \p name. \p len specifies the maximum length of the
- * string that may be returned.
- *
- * \param name - Returned identifier string for the device
- * \param len - Maximum length of string to store in \p name
- * \param dev - Device to get identifier string for
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuDeviceGetAttribute,
- * ::cuDeviceGetUuid,
- * ::cuDeviceGetLuid,
- * ::cuDeviceGetCount,
- * ::cuDeviceGet,
- * ::cuDeviceTotalMem,
- * ::cudaGetDeviceProperties
- */
-CUresult CUDAAPI cuDeviceGetName(char *name, int len, CUdevice dev);
-
-/**
- * \brief Return an UUID for the device
- *
- * Returns 16-octets identifing the device \p dev in the structure
- * pointed by the \p uuid.
- *
- * \param uuid - Returned UUID
- * \param dev - Device to get identifier string for
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuDeviceGetAttribute,
- * ::cuDeviceGetCount,
- * ::cuDeviceGetName,
- * ::cuDeviceGetLuid,
- * ::cuDeviceGet,
- * ::cuDeviceTotalMem,
- * ::cudaGetDeviceProperties
- */
-CUresult CUDAAPI cuDeviceGetUuid(CUuuid *uuid, CUdevice dev);
-
-#if defined(_WIN32)
-/**
- * \brief Return an LUID and device node mask for the device
- *
- * Return identifying information (\p luid and \p deviceNodeMask) to allow
- * matching device with graphics APIs.
- *
- * \param luid - Returned LUID
- * \param deviceNodeMask - Returned device node mask
- * \param dev - Device to get identifier string for
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuDeviceGetAttribute,
- * ::cuDeviceGetCount,
- * ::cuDeviceGetName,
- * ::cuDeviceGet,
- * ::cuDeviceTotalMem,
- * ::cudaGetDeviceProperties
- */
-CUresult CUDAAPI cuDeviceGetLuid(char *luid, unsigned int *deviceNodeMask, CUdevice dev);
-#endif
-
-/**
- * \brief Returns the total amount of memory on the device
- *
- * Returns in \p *bytes the total amount of memory available on the device
- * \p dev in bytes.
- *
- * \param bytes - Returned memory available on device in bytes
- * \param dev - Device handle
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuDeviceGetAttribute,
- * ::cuDeviceGetCount,
- * ::cuDeviceGetName,
- * ::cuDeviceGetUuid,
- * ::cuDeviceGet,
- * ::cudaMemGetInfo
- */
-CUresult CUDAAPI cuDeviceTotalMem(size_t *bytes, CUdevice dev);
-
-/**
- * \brief Returns information about the device
- *
- * Returns in \p *pi the integer value of the attribute \p attrib on device
- * \p dev. The supported attributes are:
- * - ::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK: Maximum number of threads per
- * block;
- * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X: Maximum x-dimension of a block;
- * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y: Maximum y-dimension of a block;
- * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z: Maximum z-dimension of a block;
- * - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X: Maximum x-dimension of a grid;
- * - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y: Maximum y-dimension of a grid;
- * - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z: Maximum z-dimension of a grid;
- * - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK: Maximum amount of
- * shared memory available to a thread block in bytes;
- * - ::CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY: Memory available on device for
- * __constant__ variables in a CUDA C kernel in bytes;
- * - ::CU_DEVICE_ATTRIBUTE_WARP_SIZE: Warp size in threads;
- * - ::CU_DEVICE_ATTRIBUTE_MAX_PITCH: Maximum pitch in bytes allowed by the
- * memory copy functions that involve memory regions allocated through
- * ::cuMemAllocPitch();
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH: Maximum 1D
- * texture width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH: Maximum width
- * for a 1D texture bound to linear memory;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH: Maximum
- * mipmapped 1D texture width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH: Maximum 2D
- * texture width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT: Maximum 2D
- * texture height;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH: Maximum width
- * for a 2D texture bound to linear memory;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT: Maximum height
- * for a 2D texture bound to linear memory;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH: Maximum pitch
- * in bytes for a 2D texture bound to linear memory;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH: Maximum
- * mipmapped 2D texture width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT: Maximum
- * mipmapped 2D texture height;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH: Maximum 3D
- * texture width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT: Maximum 3D
- * texture height;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH: Maximum 3D
- * texture depth;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE:
- * Alternate maximum 3D texture width, 0 if no alternate
- * maximum 3D texture size is supported;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE:
- * Alternate maximum 3D texture height, 0 if no alternate
- * maximum 3D texture size is supported;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE:
- * Alternate maximum 3D texture depth, 0 if no alternate
- * maximum 3D texture size is supported;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH:
- * Maximum cubemap texture width or height;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH:
- * Maximum 1D layered texture width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS:
- * Maximum layers in a 1D layered texture;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH:
- * Maximum 2D layered texture width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT:
- * Maximum 2D layered texture height;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS:
- * Maximum layers in a 2D layered texture;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH:
- * Maximum cubemap layered texture width or height;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS:
- * Maximum layers in a cubemap layered texture;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH:
- * Maximum 1D surface width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH:
- * Maximum 2D surface width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT:
- * Maximum 2D surface height;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH:
- * Maximum 3D surface width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT:
- * Maximum 3D surface height;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH:
- * Maximum 3D surface depth;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH:
- * Maximum 1D layered surface width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS:
- * Maximum layers in a 1D layered surface;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH:
- * Maximum 2D layered surface width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT:
- * Maximum 2D layered surface height;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS:
- * Maximum layers in a 2D layered surface;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH:
- * Maximum cubemap surface width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH:
- * Maximum cubemap layered surface width;
- * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS:
- * Maximum layers in a cubemap layered surface;
- * - ::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK: Maximum number of 32-bit
- * registers available to a thread block;
- * - ::CU_DEVICE_ATTRIBUTE_CLOCK_RATE: The typical clock frequency in kilohertz;
- * - ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT: Alignment requirement; texture
- * base addresses aligned to ::textureAlign bytes do not need an offset
- * applied to texture fetches;
- * - ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT: Pitch alignment requirement
- * for 2D texture references bound to pitched memory;
- * - ::CU_DEVICE_ATTRIBUTE_GPU_OVERLAP: 1 if the device can concurrently copy
- * memory between host and device while executing a kernel, or 0 if not;
- * - ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT: Number of multiprocessors on
- * the device;
- * - ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT: 1 if there is a run time limit
- * for kernels executed on the device, or 0 if not;
- * - ::CU_DEVICE_ATTRIBUTE_INTEGRATED: 1 if the device is integrated with the
- * memory subsystem, or 0 if not;
- * - ::CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY: 1 if the device can map host
- * memory into the CUDA address space, or 0 if not;
- * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE: Compute mode that device is currently
- * in. Available modes are as follows:
- * - ::CU_COMPUTEMODE_DEFAULT: Default mode - Device is not restricted and
- * can have multiple CUDA contexts present at a single time.
- * - ::CU_COMPUTEMODE_PROHIBITED: Compute-prohibited mode - Device is
- * prohibited from creating new CUDA contexts.
- * - ::CU_COMPUTEMODE_EXCLUSIVE_PROCESS: Compute-exclusive-process mode - Device
- * can have only one context used by a single process at a time.
- * - ::CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS: 1 if the device supports
- * executing multiple kernels within the same context simultaneously, or 0 if
- * not. It is not guaranteed that multiple kernels will be resident
- * on the device concurrently so this feature should not be relied upon for
- * correctness;
- * - ::CU_DEVICE_ATTRIBUTE_ECC_ENABLED: 1 if error correction is enabled on the
- * device, 0 if error correction is disabled or not supported by the device;
- * - ::CU_DEVICE_ATTRIBUTE_PCI_BUS_ID: PCI bus identifier of the device;
- * - ::CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID: PCI device (also known as slot) identifier
- * of the device;
- * - ::CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID: PCI domain identifier of the device
- * - ::CU_DEVICE_ATTRIBUTE_TCC_DRIVER: 1 if the device is using a TCC driver. TCC
- * is only available on Tesla hardware running Windows Vista or later;
- * - ::CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE: Peak memory clock frequency in kilohertz;
- * - ::CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH: Global memory bus width in bits;
- * - ::CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE: Size of L2 cache in bytes. 0 if the device doesn't have L2 cache;
- * - ::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR: Maximum resident threads per multiprocessor;
- * - ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING: 1 if the device shares a unified address space with
- * the host, or 0 if not;
- * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR: Major compute capability version number;
- * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR: Minor compute capability version number;
- * - ::CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED: 1 if device supports caching globals
- * in L1 cache, 0 if caching globals in L1 cache is not supported by the device;
- * - ::CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED: 1 if device supports caching locals
- * in L1 cache, 0 if caching locals in L1 cache is not supported by the device;
- * - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR: Maximum amount of
- * shared memory available to a multiprocessor in bytes; this amount is shared
- * by all thread blocks simultaneously resident on a multiprocessor;
- * - ::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR: Maximum number of 32-bit
- * registers available to a multiprocessor; this number is shared by all thread
- * blocks simultaneously resident on a multiprocessor;
- * - ::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY: 1 if device supports allocating managed memory
- * on this system, 0 if allocating managed memory is not supported by the device on this system.
- * - ::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD: 1 if device is on a multi-GPU board, 0 if not.
- * - ::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID: Unique identifier for a group of devices
- * associated with the same board. Devices on the same multi-GPU board will share the same identifier.
- * - ::CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED: 1 if Link between the device and the host
- * supports native atomic operations.
- * - ::CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO: Ratio of single precision performance
- * (in floating-point operations per second) to double precision performance.
- * - ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS: Device suppports coherently accessing
- * pageable memory without calling cudaHostRegister on it.
- * - ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS: Device can coherently access managed memory
- * concurrently with the CPU.
- * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED: Device supports Compute Preemption.
- * - ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM: Device can access host registered
- * memory at the same virtual address as the CPU.
- * - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN: The maximum per block shared memory size
- * suported on this device. This is the maximum value that can be opted into when using the cuFuncSetAttribute() call.
- * For more details see ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES
- * - ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES: Device accesses pageable memory via the host's
- * page tables.
- * - ::CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST: The host can directly access managed memory on the device without migration.
- * - ::CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED: Device supports virtual address management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs
- * - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED: Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
- * - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED: Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
- * - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED: Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested ::cuMemCreate
- * - ::CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE: maximum L2 cache size in byte for persisting lines
- * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR: Maximum number of thread blocks that can reside on a multiprocessor.
- * - ::CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED: Device supports compressible memory allocation via ::cuMemCreate
- * - ::CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK: Amount of shared memory per block reserved by CUDA driver in bytes.
- *
- * \param pi - Returned device attribute value
- * \param attrib - Device attribute to query
- * \param dev - Device handle
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuDeviceGetCount,
- * ::cuDeviceGetName,
- * ::cuDeviceGetUuid,
- * ::cuDeviceGet,
- * ::cuDeviceTotalMem,
- * ::cudaDeviceGetAttribute,
- * ::cudaGetDeviceProperties
- */
-CUresult CUDAAPI cuDeviceGetAttribute(int *pi, CUdevice_attribute attrib, CUdevice dev);
-
-/**
- * \brief Return NvSciSync attributes that this device can support.
- *
- * Returns in \p nvSciSyncAttrList, the properties of NvSciSync that
- * this CUDA device, \p dev can support. The returned \p nvSciSyncAttrList
- * can be used to create an NvSciSync object that matches this device's capabilities.
- *
- * If NvSciSyncAttrKey_RequiredPerm field in \p nvSciSyncAttrList is
- * already set this API will return ::CUDA_ERROR_INVALID_VALUE.
- *
- * The applications should set \p nvSciSyncAttrList to a valid
- * NvSciSyncAttrList failing which this API will return
- * ::CUDA_ERROR_INVALID_HANDLE.
- *
- * The \p flags controls how applications intends to use
- * the NvSciSync created from the \p nvSciSyncAttrList. The valid flags are:
- * - ::CUDA_NVSCISYNC_ATTR_SIGNAL, specifies that the applications intends to
- * signal an NvSciSync on this CUDA device.
- * - ::CUDA_NVSCISYNC_ATTR_WAIT, specifies that the applications intends to
- * wait on an NvSciSync on this CUDA device.
- *
- * At least one of these flags must be set, failing which the API
- * returns ::CUDA_ERROR_INVALID_VALUE. Both the flags are orthogonal
- * to one another: a developer may set both these flags that allows to
- * set both wait and signal specific attributes in the same \p nvSciSyncAttrList.
- *
- * \param nvSciSyncAttrList - Return NvSciSync attributes supported.
- * \param dev - Valid Cuda Device to get NvSciSync attributes for.
- * \param flags - flags describing NvSciSync usage.
- *
- * \return
- *
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_DEVICE,
- * ::CUDA_ERROR_NOT_SUPPORTED,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- *
- * \sa
- * ::cuImportExternalSemaphore,
- * ::cuDestroyExternalSemaphore,
- * ::cuSignalExternalSemaphoresAsync,
- * ::cuWaitExternalSemaphoresAsync
- */
-CUresult CUDAAPI cuDeviceGetNvSciSyncAttributes(void *nvSciSyncAttrList, CUdevice dev, int flags);
-
-/** @} */ /* END CUDA_DEVICE */
-
-/**
- * \defgroup CUDA_DEVICE_DEPRECATED Device Management [DEPRECATED]
- *
- * ___MANBRIEF___ deprecated device management functions of the low-level CUDA
- * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the device management functions of the low-level
- * CUDA driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Returns properties for a selected device
- *
- * \deprecated
- *
- * This function was deprecated as of CUDA 5.0 and replaced by ::cuDeviceGetAttribute().
- *
- * Returns in \p *prop the properties of device \p dev. The ::CUdevprop
- * structure is defined as:
- *
- * \code
- typedef struct CUdevprop_st {
- int maxThreadsPerBlock;
- int maxThreadsDim[3];
- int maxGridSize[3];
- int sharedMemPerBlock;
- int totalConstantMemory;
- int SIMDWidth;
- int memPitch;
- int regsPerBlock;
- int clockRate;
- int textureAlign
- } CUdevprop;
- * \endcode
- * where:
- *
- * - ::maxThreadsPerBlock is the maximum number of threads per block;
- * - ::maxThreadsDim[3] is the maximum sizes of each dimension of a block;
- * - ::maxGridSize[3] is the maximum sizes of each dimension of a grid;
- * - ::sharedMemPerBlock is the total amount of shared memory available per
- * block in bytes;
- * - ::totalConstantMemory is the total amount of constant memory available on
- * the device in bytes;
- * - ::SIMDWidth is the warp size;
- * - ::memPitch is the maximum pitch allowed by the memory copy functions that
- * involve memory regions allocated through ::cuMemAllocPitch();
- * - ::regsPerBlock is the total number of registers available per block;
- * - ::clockRate is the clock frequency in kilohertz;
- * - ::textureAlign is the alignment requirement; texture base addresses that
- * are aligned to ::textureAlign bytes do not need an offset applied to
- * texture fetches.
- *
- * \param prop - Returned properties of device
- * \param dev - Device to get properties for
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuDeviceGetAttribute,
- * ::cuDeviceGetCount,
- * ::cuDeviceGetName,
- * ::cuDeviceGetUuid,
- * ::cuDeviceGet,
- * ::cuDeviceTotalMem
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuDeviceGetProperties(CUdevprop *prop, CUdevice dev);
-
-/**
- * \brief Returns the compute capability of the device
- *
- * \deprecated
- *
- * This function was deprecated as of CUDA 5.0 and its functionality superceded
- * by ::cuDeviceGetAttribute().
- *
- * Returns in \p *major and \p *minor the major and minor revision numbers that
- * define the compute capability of the device \p dev.
- *
- * \param major - Major revision number
- * \param minor - Minor revision number
- * \param dev - Device handle
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuDeviceGetAttribute,
- * ::cuDeviceGetCount,
- * ::cuDeviceGetName,
- * ::cuDeviceGetUuid,
- * ::cuDeviceGet,
- * ::cuDeviceTotalMem
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuDeviceComputeCapability(int *major, int *minor, CUdevice dev);
-
-/** @} */ /* END CUDA_DEVICE_DEPRECATED */
-
-/**
- * \defgroup CUDA_PRIMARY_CTX Primary Context Management
- *
- * ___MANBRIEF___ primary context management functions of the low-level CUDA driver
- * API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the primary context management functions of the low-level
- * CUDA driver application programming interface.
- *
- * The primary context is unique per device and shared with the CUDA runtime API.
- * These functions allow integration with other libraries using CUDA.
- *
- * @{
- */
-
-/**
- * \brief Retain the primary context on the GPU
- *
- * Retains the primary context on the device, creating it if necessary,
- * increasing its usage count. The caller must call
- * ::cuDevicePrimaryCtxRelease() when done using the context.
- * Unlike ::cuCtxCreate() the newly created context is not pushed onto the stack.
- *
- * Context creation will fail with ::CUDA_ERROR_UNKNOWN if the compute mode of
- * the device is ::CU_COMPUTEMODE_PROHIBITED. The function ::cuDeviceGetAttribute()
- * can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the compute mode
- * of the device.
- * The <i>nvidia-smi</i> tool can be used to set the compute mode for
- * devices. Documentation for <i>nvidia-smi</i> can be obtained by passing a
- * -h option to it.
- *
- * Please note that the primary context always supports pinned allocations. Other
- * flags can be specified by ::cuDevicePrimaryCtxSetFlags().
- *
- * \param pctx - Returned context handle of the new context
- * \param dev - Device for which primary context is requested
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_DEVICE,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_UNKNOWN
- * \notefnerr
- *
- * \sa ::cuDevicePrimaryCtxRelease,
- * ::cuDevicePrimaryCtxSetFlags,
- * ::cuCtxCreate,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize
- */
-CUresult CUDAAPI cuDevicePrimaryCtxRetain(CUcontext *pctx, CUdevice dev);
-
-/**
- * \brief Release the primary context on the GPU
- *
- * Releases the primary context interop on the device by decreasing the usage
- * count by 1. If the usage drops to 0 the primary context of device \p dev
- * will be destroyed regardless of how many threads it is current to.
- *
- * Please note that unlike ::cuCtxDestroy() this method does not pop the context
- * from stack in any circumstances.
- *
- * \param dev - Device which primary context is released
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa ::cuDevicePrimaryCtxRetain,
- * ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize
- */
-CUresult CUDAAPI cuDevicePrimaryCtxRelease(CUdevice dev);
-
-/**
- * \brief Set flags for the primary context
- *
- * Sets the flags for the primary context on the device overwriting perviously
- * set ones.
- *
- * The three LSBs of the \p flags parameter can be used to control how the OS
- * thread, which owns the CUDA context at the time of an API call, interacts
- * with the OS scheduler when waiting for results from the GPU. Only one of
- * the scheduling flags can be set when creating a context.
- *
- * - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for
- * results from the GPU. This can decrease latency when waiting for the GPU,
- * but may lower the performance of CPU threads if they are performing work in
- * parallel with the CUDA thread.
- *
- * - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for
- * results from the GPU. This can increase latency when waiting for the GPU,
- * but can increase the performance of CPU threads performing work in parallel
- * with the GPU.
- *
- * - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a
- * synchronization primitive when waiting for the GPU to finish work.
- *
- * - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a
- * synchronization primitive when waiting for the GPU to finish work. <br>
- * <b>Deprecated:</b> This flag was deprecated as of CUDA 4.0 and was
- * replaced with ::CU_CTX_SCHED_BLOCKING_SYNC.
- *
- * - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero,
- * uses a heuristic based on the number of active CUDA contexts in the
- * process \e C and the number of logical processors in the system \e P. If
- * \e C > \e P, then CUDA will yield to other OS threads when waiting for
- * the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while
- * waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN).
- * Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on
- * the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC
- * for low-powered devices.
- *
- * - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory
- * after resizing local memory for a kernel. This can prevent thrashing by
- * local memory allocations when launching many kernels with high local
- * memory usage at the cost of potentially increased memory usage. <br>
- * <b>Deprecated:</b> This flag is deprecated and the behavior enabled
- * by this flag is now the default and cannot be disabled.
- *
- * \param dev - Device for which the primary context flags are set
- * \param flags - New flags for the device
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_DEVICE,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \notefnerr
- *
- * \sa ::cuDevicePrimaryCtxRetain,
- * ::cuDevicePrimaryCtxGetState,
- * ::cuCtxCreate,
- * ::cuCtxGetFlags,
- * ::cudaSetDeviceFlags
- */
-CUresult CUDAAPI cuDevicePrimaryCtxSetFlags(CUdevice dev, unsigned int flags);
-
-/**
- * \brief Get the state of the primary context
- *
- * Returns in \p *flags the flags for the primary context of \p dev, and in
- * \p *active whether it is active. See ::cuDevicePrimaryCtxSetFlags for flag
- * values.
- *
- * \param dev - Device to get primary context flags for
- * \param flags - Pointer to store flags
- * \param active - Pointer to store context state; 0 = inactive, 1 = active
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_DEVICE,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \notefnerr
- *
- * \sa
- * ::cuDevicePrimaryCtxSetFlags,
- * ::cuCtxGetFlags,
- * ::cudaGetDeviceFlags
- */
-CUresult CUDAAPI cuDevicePrimaryCtxGetState(CUdevice dev, unsigned int *flags, int *active);
-
-/**
- * \brief Destroy all allocations and reset all state on the primary context
- *
- * Explicitly destroys and cleans up all resources associated with the current
- * device in the current process.
- *
- * Note that it is responsibility of the calling function to ensure that no
- * other module in the process is using the device any more. For that reason
- * it is recommended to use ::cuDevicePrimaryCtxRelease() in most cases.
- * However it is safe for other modules to call ::cuDevicePrimaryCtxRelease()
- * even after resetting the device.
- *
- * \param dev - Device for which primary context is destroyed
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_DEVICE,
- * ::CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE
- * \notefnerr
- *
- * \sa ::cuDevicePrimaryCtxRetain,
- * ::cuDevicePrimaryCtxRelease,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize,
- * ::cudaDeviceReset
- */
-CUresult CUDAAPI cuDevicePrimaryCtxReset(CUdevice dev);
-
-/** @} */ /* END CUDA_PRIMARY_CTX */
-
-
-/**
- * \defgroup CUDA_CTX Context Management
- *
- * ___MANBRIEF___ context management functions of the low-level CUDA driver
- * API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the context management functions of the low-level
- * CUDA driver application programming interface.
- *
- * Please note that some functions are described in
- * \ref CUDA_PRIMARY_CTX "Primary Context Management" section.
- *
- * @{
- */
-
-/**
- * \brief Create a CUDA context
- *
- * \note In most cases it is recommended to use ::cuDevicePrimaryCtxRetain.
- *
- * Creates a new CUDA context and associates it with the calling thread. The
- * \p flags parameter is described below. The context is created with a usage
- * count of 1 and the caller of ::cuCtxCreate() must call ::cuCtxDestroy() or
- * when done using the context. If a context is already current to the thread,
- * it is supplanted by the newly created context and may be restored by a subsequent
- * call to ::cuCtxPopCurrent().
- *
- * The three LSBs of the \p flags parameter can be used to control how the OS
- * thread, which owns the CUDA context at the time of an API call, interacts
- * with the OS scheduler when waiting for results from the GPU. Only one of
- * the scheduling flags can be set when creating a context.
- *
- * - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for
- * results from the GPU. This can decrease latency when waiting for the GPU,
- * but may lower the performance of CPU threads if they are performing work in
- * parallel with the CUDA thread.
- *
- * - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for
- * results from the GPU. This can increase latency when waiting for the GPU,
- * but can increase the performance of CPU threads performing work in parallel
- * with the GPU.
- *
- * - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a
- * synchronization primitive when waiting for the GPU to finish work.
- *
- * - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a
- * synchronization primitive when waiting for the GPU to finish work. <br>
- * <b>Deprecated:</b> This flag was deprecated as of CUDA 4.0 and was
- * replaced with ::CU_CTX_SCHED_BLOCKING_SYNC.
- *
- * - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero,
- * uses a heuristic based on the number of active CUDA contexts in the
- * process \e C and the number of logical processors in the system \e P. If
- * \e C > \e P, then CUDA will yield to other OS threads when waiting for
- * the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while
- * waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN).
- * Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on
- * the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC
- * for low-powered devices.
- *
- * - ::CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned allocations.
- * This flag must be set in order to allocate pinned host memory that is
- * accessible to the GPU.
- *
- * - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory
- * after resizing local memory for a kernel. This can prevent thrashing by
- * local memory allocations when launching many kernels with high local
- * memory usage at the cost of potentially increased memory usage. <br>
- * <b>Deprecated:</b> This flag is deprecated and the behavior enabled
- * by this flag is now the default and cannot be disabled.
- * Instead, the per-thread stack size can be controlled with ::cuCtxSetLimit().
- *
- * Context creation will fail with ::CUDA_ERROR_UNKNOWN if the compute mode of
- * the device is ::CU_COMPUTEMODE_PROHIBITED. The function ::cuDeviceGetAttribute()
- * can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the
- * compute mode of the device. The <i>nvidia-smi</i> tool can be used to set
- * the compute mode for * devices.
- * Documentation for <i>nvidia-smi</i> can be obtained by passing a
- * -h option to it.
- *
- * \param pctx - Returned context handle of the new context
- * \param flags - Context creation flags
- * \param dev - Device to create context on
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_DEVICE,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_UNKNOWN
- * \notefnerr
- *
- * \sa ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize
- */
-CUresult CUDAAPI cuCtxCreate(CUcontext *pctx, unsigned int flags, CUdevice dev);
-
-/**
- * \brief Destroy a CUDA context
- *
- * Destroys the CUDA context specified by \p ctx. The context \p ctx will be
- * destroyed regardless of how many threads it is current to.
- * It is the responsibility of the calling function to ensure that no API
- * call issues using \p ctx while ::cuCtxDestroy() is executing.
- *
- * If \p ctx is current to the calling thread then \p ctx will also be
- * popped from the current thread's context stack (as though ::cuCtxPopCurrent()
- * were called). If \p ctx is current to other threads, then \p ctx will
- * remain current to those threads, and attempting to access \p ctx from
- * those threads will result in the error ::CUDA_ERROR_CONTEXT_IS_DESTROYED.
- *
- * \param ctx - Context to destroy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize
- */
-CUresult CUDAAPI cuCtxDestroy(CUcontext ctx);
-
-/**
- * \brief Pushes a context on the current CPU thread
- *
- * Pushes the given context \p ctx onto the CPU thread's stack of current
- * contexts. The specified context becomes the CPU thread's current context, so
- * all CUDA functions that operate on the current context are affected.
- *
- * The previous current context may be made current again by calling
- * ::cuCtxDestroy() or ::cuCtxPopCurrent().
- *
- * \param ctx - Context to push
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize
- */
-CUresult CUDAAPI cuCtxPushCurrent(CUcontext ctx);
-
-/**
- * \brief Pops the current CUDA context from the current CPU thread.
- *
- * Pops the current CUDA context from the CPU thread and passes back the
- * old context handle in \p *pctx. That context may then be made current
- * to a different CPU thread by calling ::cuCtxPushCurrent().
- *
- * If a context was current to the CPU thread before ::cuCtxCreate() or
- * ::cuCtxPushCurrent() was called, this function makes that context current to
- * the CPU thread again.
- *
- * \param pctx - Returned new context handle
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize
- */
-CUresult CUDAAPI cuCtxPopCurrent(CUcontext *pctx);
-
-/**
- * \brief Binds the specified CUDA context to the calling CPU thread
- *
- * Binds the specified CUDA context to the calling CPU thread.
- * If \p ctx is NULL then the CUDA context previously bound to the
- * calling CPU thread is unbound and ::CUDA_SUCCESS is returned.
- *
- * If there exists a CUDA context stack on the calling CPU thread, this
- * will replace the top of that stack with \p ctx.
- * If \p ctx is NULL then this will be equivalent to popping the top
- * of the calling CPU thread's CUDA context stack (or a no-op if the
- * calling CPU thread's CUDA context stack is empty).
- *
- * \param ctx - Context to bind to the calling CPU thread
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT
- * \notefnerr
- *
- * \sa
- * ::cuCtxGetCurrent,
- * ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cudaSetDevice
- */
-CUresult CUDAAPI cuCtxSetCurrent(CUcontext ctx);
-
-/**
- * \brief Returns the CUDA context bound to the calling CPU thread.
- *
- * Returns in \p *pctx the CUDA context bound to the calling CPU thread.
- * If no context is bound to the calling CPU thread then \p *pctx is
- * set to NULL and ::CUDA_SUCCESS is returned.
- *
- * \param pctx - Returned context handle
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * \notefnerr
- *
- * \sa
- * ::cuCtxSetCurrent,
- * ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cudaGetDevice
- */
-CUresult CUDAAPI cuCtxGetCurrent(CUcontext *pctx);
-
-/**
- * \brief Returns the device ID for the current context
- *
- * Returns in \p *device the ordinal of the current context's device.
- *
- * \param device - Returned device ID for the current context
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize,
- * ::cudaGetDevice
- */
-CUresult CUDAAPI cuCtxGetDevice(CUdevice *device);
-
-/**
- * \brief Returns the flags for the current context
- *
- * Returns in \p *flags the flags of the current context. See ::cuCtxCreate
- * for flag values.
- *
- * \param flags - Pointer to store flags of current context
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetCurrent,
- * ::cuCtxGetDevice
- * ::cuCtxGetLimit,
- * ::cuCtxGetSharedMemConfig,
- * ::cuCtxGetStreamPriorityRange,
- * ::cudaGetDeviceFlags
- */
-CUresult CUDAAPI cuCtxGetFlags(unsigned int *flags);
-
-/**
- * \brief Block for a context's tasks to complete
- *
- * Blocks until the device has completed all preceding requested tasks.
- * ::cuCtxSynchronize() returns an error if one of the preceding tasks failed.
- * If the context was created with the ::CU_CTX_SCHED_BLOCKING_SYNC flag, the
- * CPU thread will block until the GPU context has finished its work.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cudaDeviceSynchronize
- */
-CUresult CUDAAPI cuCtxSynchronize(void);
-
-/**
- * \brief Set resource limits
- *
- * Setting \p limit to \p value is a request by the application to update
- * the current limit maintained by the context. The driver is free to
- * modify the requested value to meet h/w requirements (this could be
- * clamping to minimum or maximum values, rounding up to nearest element
- * size, etc). The application can use ::cuCtxGetLimit() to find out exactly
- * what the limit has been set to.
- *
- * Setting each ::CUlimit has its own specific restrictions, so each is
- * discussed here.
- *
- * - ::CU_LIMIT_STACK_SIZE controls the stack size in bytes of each GPU thread.
- * The driver automatically increases the per-thread stack size
- * for each kernel launch as needed. This size isn't reset back to the
- * original value after each launch. Setting this value will take effect
- * immediately, and if necessary, the device will block until all preceding
- * requested tasks are complete.
- *
- * - ::CU_LIMIT_PRINTF_FIFO_SIZE controls the size in bytes of the FIFO used
- * by the ::printf() device system call. Setting ::CU_LIMIT_PRINTF_FIFO_SIZE
- * must be performed before launching any kernel that uses the ::printf()
- * device system call, otherwise ::CUDA_ERROR_INVALID_VALUE will be returned.
- *
- * - ::CU_LIMIT_MALLOC_HEAP_SIZE controls the size in bytes of the heap used
- * by the ::malloc() and ::free() device system calls. Setting
- * ::CU_LIMIT_MALLOC_HEAP_SIZE must be performed before launching any kernel
- * that uses the ::malloc() or ::free() device system calls, otherwise
- * ::CUDA_ERROR_INVALID_VALUE will be returned.
- *
- * - ::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH controls the maximum nesting depth of
- * a grid at which a thread can safely call ::cudaDeviceSynchronize(). Setting
- * this limit must be performed before any launch of a kernel that uses the
- * device runtime and calls ::cudaDeviceSynchronize() above the default sync
- * depth, two levels of grids. Calls to ::cudaDeviceSynchronize() will fail
- * with error code ::cudaErrorSyncDepthExceeded if the limitation is
- * violated. This limit can be set smaller than the default or up the maximum
- * launch depth of 24. When setting this limit, keep in mind that additional
- * levels of sync depth require the driver to reserve large amounts of device
- * memory which can no longer be used for user allocations. If these
- * reservations of device memory fail, ::cuCtxSetLimit() will return
- * ::CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a lower value.
- * This limit is only applicable to devices of compute capability 3.5 and
- * higher. Attempting to set this limit on devices of compute capability less
- * than 3.5 will result in the error ::CUDA_ERROR_UNSUPPORTED_LIMIT being
- * returned.
- *
- * - ::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT controls the maximum number of
- * outstanding device runtime launches that can be made from the current
- * context. A grid is outstanding from the point of launch up until the grid
- * is known to have been completed. Device runtime launches which violate
- * this limitation fail and return ::cudaErrorLaunchPendingCountExceeded when
- * ::cudaGetLastError() is called after launch. If more pending launches than
- * the default (2048 launches) are needed for a module using the device
- * runtime, this limit can be increased. Keep in mind that being able to
- * sustain additional pending launches will require the driver to reserve
- * larger amounts of device memory upfront which can no longer be used for
- * allocations. If these reservations fail, ::cuCtxSetLimit() will return
- * ::CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a lower value.
- * This limit is only applicable to devices of compute capability 3.5 and
- * higher. Attempting to set this limit on devices of compute capability less
- * than 3.5 will result in the error ::CUDA_ERROR_UNSUPPORTED_LIMIT being
- * returned.
- *
- * - ::CU_LIMIT_MAX_L2_FETCH_GRANULARITY controls the L2 cache fetch granularity.
- * Values can range from 0B to 128B. This is purely a performance hint and
- * it can be ignored or clamped depending on the platform.
- *
- * \param limit - Limit to set
- * \param value - Size of limit
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_UNSUPPORTED_LIMIT,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_INVALID_CONTEXT
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSynchronize,
- * ::cudaDeviceSetLimit
- */
-CUresult CUDAAPI cuCtxSetLimit(CUlimit limit, size_t value);
-
-/**
- * \brief Returns resource limits
- *
- * Returns in \p *pvalue the current size of \p limit. The supported
- * ::CUlimit values are:
- * - ::CU_LIMIT_STACK_SIZE: stack size in bytes of each GPU thread.
- * - ::CU_LIMIT_PRINTF_FIFO_SIZE: size in bytes of the FIFO used by the
- * ::printf() device system call.
- * - ::CU_LIMIT_MALLOC_HEAP_SIZE: size in bytes of the heap used by the
- * ::malloc() and ::free() device system calls.
- * - ::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH: maximum grid depth at which a thread
- * can issue the device runtime call ::cudaDeviceSynchronize() to wait on
- * child grid launches to complete.
- * - ::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT: maximum number of outstanding
- * device runtime launches that can be made from this context.
- * - ::CU_LIMIT_MAX_L2_FETCH_GRANULARITY: L2 cache fetch granularity.
- *
- * \param limit - Limit to query
- * \param pvalue - Returned size of limit
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_UNSUPPORTED_LIMIT
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize,
- * ::cudaDeviceGetLimit
- */
-CUresult CUDAAPI cuCtxGetLimit(size_t *pvalue, CUlimit limit);
-
-/**
- * \brief Returns the preferred cache configuration for the current context.
- *
- * On devices where the L1 cache and shared memory use the same hardware
- * resources, this function returns through \p pconfig the preferred cache configuration
- * for the current context. This is only a preference. The driver will use
- * the requested configuration if possible, but it is free to choose a different
- * configuration if required to execute functions.
- *
- * This will return a \p pconfig of ::CU_FUNC_CACHE_PREFER_NONE on devices
- * where the size of the L1 cache and shared memory are fixed.
- *
- * The supported cache configurations are:
- * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default)
- * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache
- * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory
- * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory
- *
- * \param pconfig - Returned cache configuration
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize,
- * ::cuFuncSetCacheConfig,
- * ::cudaDeviceGetCacheConfig
- */
-CUresult CUDAAPI cuCtxGetCacheConfig(CUfunc_cache *pconfig);
-
-/**
- * \brief Sets the preferred cache configuration for the current context.
- *
- * On devices where the L1 cache and shared memory use the same hardware
- * resources, this sets through \p config the preferred cache configuration for
- * the current context. This is only a preference. The driver will use
- * the requested configuration if possible, but it is free to choose a different
- * configuration if required to execute the function. Any function preference
- * set via ::cuFuncSetCacheConfig() will be preferred over this context-wide
- * setting. Setting the context-wide cache configuration to
- * ::CU_FUNC_CACHE_PREFER_NONE will cause subsequent kernel launches to prefer
- * to not change the cache configuration unless required to launch the kernel.
- *
- * This setting does nothing on devices where the size of the L1 cache and
- * shared memory are fixed.
- *
- * Launching a kernel with a different preference than the most recent
- * preference setting may insert a device-side synchronization point.
- *
- * The supported cache configurations are:
- * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default)
- * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache
- * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory
- * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory
- *
- * \param config - Requested cache configuration
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize,
- * ::cuFuncSetCacheConfig,
- * ::cudaDeviceSetCacheConfig
- */
-CUresult CUDAAPI cuCtxSetCacheConfig(CUfunc_cache config);
-
-/**
- * \brief Returns the current shared memory configuration for the current context.
- *
- * This function will return in \p pConfig the current size of shared memory banks
- * in the current context. On devices with configurable shared memory banks,
- * ::cuCtxSetSharedMemConfig can be used to change this setting, so that all
- * subsequent kernel launches will by default use the new bank size. When
- * ::cuCtxGetSharedMemConfig is called on devices without configurable shared
- * memory, it will return the fixed bank size of the hardware.
- *
- * The returned bank configurations can be either:
- * - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: shared memory bank width is
- * four bytes.
- * - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: shared memory bank width will
- * eight bytes.
- *
- * \param pConfig - returned shared memory configuration
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize,
- * ::cuCtxGetSharedMemConfig,
- * ::cuFuncSetCacheConfig,
- * ::cudaDeviceGetSharedMemConfig
- */
-CUresult CUDAAPI cuCtxGetSharedMemConfig(CUsharedconfig *pConfig);
-
-/**
- * \brief Sets the shared memory configuration for the current context.
- *
- * On devices with configurable shared memory banks, this function will set
- * the context's shared memory bank size which is used for subsequent kernel
- * launches.
- *
- * Changed the shared memory configuration between launches may insert a device
- * side synchronization point between those launches.
- *
- * Changing the shared memory bank size will not increase shared memory usage
- * or affect occupancy of kernels, but may have major effects on performance.
- * Larger bank sizes will allow for greater potential bandwidth to shared memory,
- * but will change what kinds of accesses to shared memory will result in bank
- * conflicts.
- *
- * This function will do nothing on devices with fixed shared memory bank size.
- *
- * The supported bank configurations are:
- * - ::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: set bank width to the default initial
- * setting (currently, four bytes).
- * - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: set shared memory bank width to
- * be natively four bytes.
- * - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: set shared memory bank width to
- * be natively eight bytes.
- *
- * \param config - requested shared memory configuration
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize,
- * ::cuCtxGetSharedMemConfig,
- * ::cuFuncSetCacheConfig,
- * ::cudaDeviceSetSharedMemConfig
- */
-CUresult CUDAAPI cuCtxSetSharedMemConfig(CUsharedconfig config);
-
-/**
- * \brief Gets the context's API version.
- *
- * Returns a version number in \p version corresponding to the capabilities of
- * the context (e.g. 3010 or 3020), which library developers can use to direct
- * callers to a specific API version. If \p ctx is NULL, returns the API version
- * used to create the currently bound context.
- *
- * Note that new API versions are only introduced when context capabilities are
- * changed that break binary compatibility, so the API version and driver version
- * may be different. For example, it is valid for the API version to be 3020 while
- * the driver version is 4020.
- *
- * \param ctx - Context to check
- * \param version - Pointer to version
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_UNKNOWN
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize
- */
-CUresult CUDAAPI cuCtxGetApiVersion(CUcontext ctx, unsigned int *version);
-
-/**
- * \brief Returns numerical values that correspond to the least and
- * greatest stream priorities.
- *
- * Returns in \p *leastPriority and \p *greatestPriority the numerical values that correspond
- * to the least and greatest stream priorities respectively. Stream priorities
- * follow a convention where lower numbers imply greater priorities. The range of
- * meaningful stream priorities is given by [\p *greatestPriority, \p *leastPriority].
- * If the user attempts to create a stream with a priority value that is
- * outside the meaningful range as specified by this API, the priority is
- * automatically clamped down or up to either \p *leastPriority or \p *greatestPriority
- * respectively. See ::cuStreamCreateWithPriority for details on creating a
- * priority stream.
- * A NULL may be passed in for \p *leastPriority or \p *greatestPriority if the value
- * is not desired.
- *
- * This function will return '0' in both \p *leastPriority and \p *greatestPriority if
- * the current context's device does not support stream priorities
- * (see ::cuDeviceGetAttribute).
- *
- * \param leastPriority - Pointer to an int in which the numerical value for least
- * stream priority is returned
- * \param greatestPriority - Pointer to an int in which the numerical value for greatest
- * stream priority is returned
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \notefnerr
- *
- * \sa ::cuStreamCreateWithPriority,
- * ::cuStreamGetPriority,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize,
- * ::cudaDeviceGetStreamPriorityRange
- */
-CUresult CUDAAPI cuCtxGetStreamPriorityRange(int *leastPriority, int *greatestPriority);
-
-/**
- * \brief Resets all persisting lines in cache to normal status.
- *
- * ::cuCtxResetPersistingL2Cache Resets all persisting lines in cache to normal
- * status. Takes effect on function return.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_NOT_SUPPORTED
- * \notefnerr
- *
- * \sa
- * ::CUaccessPolicyWindow
- */
-CUresult CUDAAPI cuCtxResetPersistingL2Cache(void);
-
-/** @} */ /* END CUDA_CTX */
-
-/**
- * \defgroup CUDA_CTX_DEPRECATED Context Management [DEPRECATED]
- *
- * ___MANBRIEF___ deprecated context management functions of the low-level CUDA
- * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the deprecated context management functions of the low-level
- * CUDA driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Increment a context's usage-count
- *
- * \deprecated
- *
- * Note that this function is deprecated and should not be used.
- *
- * Increments the usage count of the context and passes back a context handle
- * in \p *pctx that must be passed to ::cuCtxDetach() when the application is
- * done with the context. ::cuCtxAttach() fails if there is no context current
- * to the thread.
- *
- * Currently, the \p flags parameter must be 0.
- *
- * \param pctx - Returned context handle of the current context
- * \param flags - Context attach flags (must be 0)
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxDetach,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuCtxAttach(CUcontext *pctx, unsigned int flags);
-
-/**
- * \brief Decrement a context's usage-count
- *
- * \deprecated
- *
- * Note that this function is deprecated and should not be used.
- *
- * Decrements the usage count of the context \p ctx, and destroys the context
- * if the usage count goes to 0. The context must be a handle that was passed
- * back by ::cuCtxCreate() or ::cuCtxAttach(), and must be current to the
- * calling thread.
- *
- * \param ctx - Context to destroy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT
- * \notefnerr
- *
- * \sa ::cuCtxCreate,
- * ::cuCtxDestroy,
- * ::cuCtxGetApiVersion,
- * ::cuCtxGetCacheConfig,
- * ::cuCtxGetDevice,
- * ::cuCtxGetFlags,
- * ::cuCtxGetLimit,
- * ::cuCtxPopCurrent,
- * ::cuCtxPushCurrent,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxSetLimit,
- * ::cuCtxSynchronize
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuCtxDetach(CUcontext ctx);
-
-/** @} */ /* END CUDA_CTX_DEPRECATED */
-
-
-/**
- * \defgroup CUDA_MODULE Module Management
- *
- * ___MANBRIEF___ module management functions of the low-level CUDA driver API
- * (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the module management functions of the low-level CUDA
- * driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Loads a compute module
- *
- * Takes a filename \p fname and loads the corresponding module \p module into
- * the current context. The CUDA driver API does not attempt to lazily
- * allocate the resources needed by a module; if the memory for functions and
- * data (constant and global) needed by the module cannot be allocated,
- * ::cuModuleLoad() fails. The file should be a \e cubin file as output by
- * \b nvcc, or a \e PTX file either as output by \b nvcc or handwritten, or
- * a \e fatbin file as output by \b nvcc from toolchain 4.0 or later.
- *
- * \param module - Returned module
- * \param fname - Filename of module to load
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_PTX,
- * ::CUDA_ERROR_NOT_FOUND,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_FILE_NOT_FOUND,
- * ::CUDA_ERROR_NO_BINARY_FOR_GPU,
- * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND,
- * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED,
- * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND
- * \notefnerr
- *
- * \sa ::cuModuleGetFunction,
- * ::cuModuleGetGlobal,
- * ::cuModuleGetTexRef,
- * ::cuModuleLoadData,
- * ::cuModuleLoadDataEx,
- * ::cuModuleLoadFatBinary,
- * ::cuModuleUnload
- */
-CUresult CUDAAPI cuModuleLoad(CUmodule *module, const char *fname);
-
-/**
- * \brief Load a module's data
- *
- * Takes a pointer \p image and loads the corresponding module \p module into
- * the current context. The pointer may be obtained by mapping a \e cubin or
- * \e PTX or \e fatbin file, passing a \e cubin or \e PTX or \e fatbin file
- * as a NULL-terminated text string, or incorporating a \e cubin or \e fatbin
- * object into the executable resources and using operating system calls such
- * as Windows \c FindResource() to obtain the pointer.
- *
- * \param module - Returned module
- * \param image - Module data to load
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_PTX,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_NO_BINARY_FOR_GPU,
- * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND,
- * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED,
- * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND
- * \notefnerr
- *
- * \sa ::cuModuleGetFunction,
- * ::cuModuleGetGlobal,
- * ::cuModuleGetTexRef,
- * ::cuModuleLoad,
- * ::cuModuleLoadDataEx,
- * ::cuModuleLoadFatBinary,
- * ::cuModuleUnload
- */
-CUresult CUDAAPI cuModuleLoadData(CUmodule *module, const void *image);
-
-/**
- * \brief Load a module's data with options
- *
- * Takes a pointer \p image and loads the corresponding module \p module into
- * the current context. The pointer may be obtained by mapping a \e cubin or
- * \e PTX or \e fatbin file, passing a \e cubin or \e PTX or \e fatbin file
- * as a NULL-terminated text string, or incorporating a \e cubin or \e fatbin
- * object into the executable resources and using operating system calls such
- * as Windows \c FindResource() to obtain the pointer. Options are passed as
- * an array via \p options and any corresponding parameters are passed in
- * \p optionValues. The number of total options is supplied via \p numOptions.
- * Any outputs will be returned via \p optionValues.
- *
- * \param module - Returned module
- * \param image - Module data to load
- * \param numOptions - Number of options
- * \param options - Options for JIT
- * \param optionValues - Option values for JIT
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_PTX,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_NO_BINARY_FOR_GPU,
- * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND,
- * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED,
- * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND
- * \notefnerr
- *
- * \sa ::cuModuleGetFunction,
- * ::cuModuleGetGlobal,
- * ::cuModuleGetTexRef,
- * ::cuModuleLoad,
- * ::cuModuleLoadData,
- * ::cuModuleLoadFatBinary,
- * ::cuModuleUnload
- */
-CUresult CUDAAPI cuModuleLoadDataEx(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues);
-
-/**
- * \brief Load a module's data
- *
- * Takes a pointer \p fatCubin and loads the corresponding module \p module
- * into the current context. The pointer represents a <i>fat binary</i> object,
- * which is a collection of different \e cubin and/or \e PTX files, all
- * representing the same device code, but compiled and optimized for different
- * architectures.
- *
- * Prior to CUDA 4.0, there was no documented API for constructing and using
- * fat binary objects by programmers. Starting with CUDA 4.0, fat binary
- * objects can be constructed by providing the <i>-fatbin option</i> to \b nvcc.
- * More information can be found in the \b nvcc document.
- *
- * \param module - Returned module
- * \param fatCubin - Fat binary to load
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_PTX,
- * ::CUDA_ERROR_NOT_FOUND,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_NO_BINARY_FOR_GPU,
- * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND,
- * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED,
- * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND
- * \notefnerr
- *
- * \sa ::cuModuleGetFunction,
- * ::cuModuleGetGlobal,
- * ::cuModuleGetTexRef,
- * ::cuModuleLoad,
- * ::cuModuleLoadData,
- * ::cuModuleLoadDataEx,
- * ::cuModuleUnload
- */
-CUresult CUDAAPI cuModuleLoadFatBinary(CUmodule *module, const void *fatCubin);
-
-/**
- * \brief Unloads a module
- *
- * Unloads a module \p hmod from the current context.
- *
- * \param hmod - Module to unload
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuModuleGetFunction,
- * ::cuModuleGetGlobal,
- * ::cuModuleGetTexRef,
- * ::cuModuleLoad,
- * ::cuModuleLoadData,
- * ::cuModuleLoadDataEx,
- * ::cuModuleLoadFatBinary
- */
-CUresult CUDAAPI cuModuleUnload(CUmodule hmod);
-
-/**
- * \brief Returns a function handle
- *
- * Returns in \p *hfunc the handle of the function of name \p name located in
- * module \p hmod. If no function of that name exists, ::cuModuleGetFunction()
- * returns ::CUDA_ERROR_NOT_FOUND.
- *
- * \param hfunc - Returned function handle
- * \param hmod - Module to retrieve function from
- * \param name - Name of function to retrieve
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_NOT_FOUND
- * \notefnerr
- *
- * \sa ::cuModuleGetGlobal,
- * ::cuModuleGetTexRef,
- * ::cuModuleLoad,
- * ::cuModuleLoadData,
- * ::cuModuleLoadDataEx,
- * ::cuModuleLoadFatBinary,
- * ::cuModuleUnload
- */
-CUresult CUDAAPI cuModuleGetFunction(CUfunction *hfunc, CUmodule hmod, const char *name);
-
-/**
- * \brief Returns a global pointer from a module
- *
- * Returns in \p *dptr and \p *bytes the base pointer and size of the
- * global of name \p name located in module \p hmod. If no variable of that name
- * exists, ::cuModuleGetGlobal() returns ::CUDA_ERROR_NOT_FOUND. Both
- * parameters \p dptr and \p bytes are optional. If one of them is
- * NULL, it is ignored.
- *
- * \param dptr - Returned global device pointer
- * \param bytes - Returned global size in bytes
- * \param hmod - Module to retrieve global from
- * \param name - Name of global to retrieve
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_NOT_FOUND
- * \notefnerr
- *
- * \sa ::cuModuleGetFunction,
- * ::cuModuleGetTexRef,
- * ::cuModuleLoad,
- * ::cuModuleLoadData,
- * ::cuModuleLoadDataEx,
- * ::cuModuleLoadFatBinary,
- * ::cuModuleUnload,
- * ::cudaGetSymbolAddress,
- * ::cudaGetSymbolSize
- */
-CUresult CUDAAPI cuModuleGetGlobal(CUdeviceptr *dptr, size_t *bytes, CUmodule hmod, const char *name);
-
-/**
- * \brief Returns a handle to a texture reference
- *
- * Returns in \p *pTexRef the handle of the texture reference of name \p name
- * in the module \p hmod. If no texture reference of that name exists,
- * ::cuModuleGetTexRef() returns ::CUDA_ERROR_NOT_FOUND. This texture reference
- * handle should not be destroyed, since it will be destroyed when the module
- * is unloaded.
- *
- * \param pTexRef - Returned texture reference
- * \param hmod - Module to retrieve texture reference from
- * \param name - Name of texture reference to retrieve
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_NOT_FOUND
- * \notefnerr
- *
- * \sa ::cuModuleGetFunction,
- * ::cuModuleGetGlobal,
- * ::cuModuleGetSurfRef,
- * ::cuModuleLoad,
- * ::cuModuleLoadData,
- * ::cuModuleLoadDataEx,
- * ::cuModuleLoadFatBinary,
- * ::cuModuleUnload,
- * ::cudaGetTextureReference
- */
-CUresult CUDAAPI cuModuleGetTexRef(CUtexref *pTexRef, CUmodule hmod, const char *name);
-
-/**
- * \brief Returns a handle to a surface reference
- *
- * Returns in \p *pSurfRef the handle of the surface reference of name \p name
- * in the module \p hmod. If no surface reference of that name exists,
- * ::cuModuleGetSurfRef() returns ::CUDA_ERROR_NOT_FOUND.
- *
- * \param pSurfRef - Returned surface reference
- * \param hmod - Module to retrieve surface reference from
- * \param name - Name of surface reference to retrieve
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_NOT_FOUND
- * \notefnerr
- *
- * \sa ::cuModuleGetFunction,
- * ::cuModuleGetGlobal,
- * ::cuModuleGetTexRef,
- * ::cuModuleLoad,
- * ::cuModuleLoadData,
- * ::cuModuleLoadDataEx,
- * ::cuModuleLoadFatBinary,
- * ::cuModuleUnload,
- * ::cudaGetSurfaceReference
- */
-CUresult CUDAAPI cuModuleGetSurfRef(CUsurfref *pSurfRef, CUmodule hmod, const char *name);
-
-/**
- * \brief Creates a pending JIT linker invocation.
- *
- * If the call is successful, the caller owns the returned CUlinkState, which
- * should eventually be destroyed with ::cuLinkDestroy. The
- * device code machine size (32 or 64 bit) will match the calling application.
- *
- * Both linker and compiler options may be specified. Compiler options will
- * be applied to inputs to this linker action which must be compiled from PTX.
- * The options ::CU_JIT_WALL_TIME,
- * ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES, and ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES
- * will accumulate data until the CUlinkState is destroyed.
- *
- * \p optionValues must remain valid for the life of the CUlinkState if output
- * options are used. No other references to inputs are maintained after this
- * call returns.
- *
- * \param numOptions Size of options arrays
- * \param options Array of linker and compiler options
- * \param optionValues Array of option values, each cast to void *
- * \param stateOut On success, this will contain a CUlinkState to specify
- * and complete this action
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND
- * \notefnerr
- *
- * \sa ::cuLinkAddData,
- * ::cuLinkAddFile,
- * ::cuLinkComplete,
- * ::cuLinkDestroy
- */
-CUresult CUDAAPI
-cuLinkCreate(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut);
-
-/**
- * \brief Add an input to a pending linker invocation
- *
- * Ownership of \p data is retained by the caller. No reference is retained to any
- * inputs after this call returns.
- *
- * This method accepts only compiler options, which are used if the data must
- * be compiled from PTX, and does not accept any of
- * ::CU_JIT_WALL_TIME, ::CU_JIT_INFO_LOG_BUFFER, ::CU_JIT_ERROR_LOG_BUFFER,
- * ::CU_JIT_TARGET_FROM_CUCONTEXT, or ::CU_JIT_TARGET.
- *
- * \param state A pending linker action.
- * \param type The type of the input data.
- * \param data The input data. PTX must be NULL-terminated.
- * \param size The length of the input data.
- * \param name An optional name for this input in log messages.
- * \param numOptions Size of options.
- * \param options Options to be applied only for this input (overrides options from ::cuLinkCreate).
- * \param optionValues Array of option values, each cast to void *.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_IMAGE,
- * ::CUDA_ERROR_INVALID_PTX,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_NO_BINARY_FOR_GPU
- *
- * \sa ::cuLinkCreate,
- * ::cuLinkAddFile,
- * ::cuLinkComplete,
- * ::cuLinkDestroy
- */
-CUresult CUDAAPI
-cuLinkAddData(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name,
- unsigned int numOptions, CUjit_option *options, void **optionValues);
-
-/**
- * \brief Add a file input to a pending linker invocation
- *
- * No reference is retained to any inputs after this call returns.
- *
- * This method accepts only compiler options, which are used if the input
- * must be compiled from PTX, and does not accept any of
- * ::CU_JIT_WALL_TIME, ::CU_JIT_INFO_LOG_BUFFER, ::CU_JIT_ERROR_LOG_BUFFER,
- * ::CU_JIT_TARGET_FROM_CUCONTEXT, or ::CU_JIT_TARGET.
- *
- * This method is equivalent to invoking ::cuLinkAddData on the contents
- * of the file.
- *
- * \param state A pending linker action
- * \param type The type of the input data
- * \param path Path to the input file
- * \param numOptions Size of options
- * \param options Options to be applied only for this input (overrides options from ::cuLinkCreate)
- * \param optionValues Array of option values, each cast to void *
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_FILE_NOT_FOUND
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_IMAGE,
- * ::CUDA_ERROR_INVALID_PTX,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_NO_BINARY_FOR_GPU
- *
- * \sa ::cuLinkCreate,
- * ::cuLinkAddData,
- * ::cuLinkComplete,
- * ::cuLinkDestroy
- */
-CUresult CUDAAPI
-cuLinkAddFile(CUlinkState state, CUjitInputType type, const char *path,
- unsigned int numOptions, CUjit_option *options, void **optionValues);
-
-/**
- * \brief Complete a pending linker invocation
- *
- * Completes the pending linker action and returns the cubin image for the linked
- * device code, which can be used with ::cuModuleLoadData. The cubin is owned by
- * \p state, so it should be loaded before \p state is destroyed via ::cuLinkDestroy.
- * This call does not destroy \p state.
- *
- * \param state A pending linker invocation
- * \param cubinOut On success, this will point to the output image
- * \param sizeOut Optional parameter to receive the size of the generated image
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- *
- * \sa ::cuLinkCreate,
- * ::cuLinkAddData,
- * ::cuLinkAddFile,
- * ::cuLinkDestroy,
- * ::cuModuleLoadData
- */
-CUresult CUDAAPI
-cuLinkComplete(CUlinkState state, void **cubinOut, size_t *sizeOut);
-
-/**
- * \brief Destroys state for a JIT linker invocation.
- *
- * \param state State object for the linker invocation
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_HANDLE
- *
- * \sa ::cuLinkCreate
- */
-CUresult CUDAAPI
-cuLinkDestroy(CUlinkState state);
-
-/** @} */ /* END CUDA_MODULE */
-
-
-/**
- * \defgroup CUDA_MEM Memory Management
- *
- * ___MANBRIEF___ memory management functions of the low-level CUDA driver API
- * (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the memory management functions of the low-level CUDA
- * driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Gets free and total memory
- *
- * Returns in \p *free and \p *total respectively, the free and total amount of
- * memory available for allocation by the CUDA context, in bytes.
- *
- * \param free - Returned free memory in bytes
- * \param total - Returned total memory in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemGetInfo
- */
-CUresult CUDAAPI cuMemGetInfo(size_t *free, size_t *total);
-
-/**
- * \brief Allocates device memory
- *
- * Allocates \p bytesize bytes of linear memory on the device and returns in
- * \p *dptr a pointer to the allocated memory. The allocated memory is suitably
- * aligned for any kind of variable. The memory is not cleared. If \p bytesize
- * is 0, ::cuMemAlloc() returns ::CUDA_ERROR_INVALID_VALUE.
- *
- * \param dptr - Returned device pointer
- * \param bytesize - Requested allocation size in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMalloc
- */
-CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize);
-
-/**
- * \brief Allocates pitched device memory
- *
- * Allocates at least \p WidthInBytes * \p Height bytes of linear memory on
- * the device and returns in \p *dptr a pointer to the allocated memory. The
- * function may pad the allocation to ensure that corresponding pointers in
- * any given row will continue to meet the alignment requirements for
- * coalescing as the address is updated from row to row. \p ElementSizeBytes
- * specifies the size of the largest reads and writes that will be performed
- * on the memory range. \p ElementSizeBytes may be 4, 8 or 16 (since coalesced
- * memory transactions are not possible on other data sizes). If
- * \p ElementSizeBytes is smaller than the actual read/write size of a kernel,
- * the kernel will run correctly, but possibly at reduced speed. The pitch
- * returned in \p *pPitch by ::cuMemAllocPitch() is the width in bytes of the
- * allocation. The intended usage of pitch is as a separate parameter of the
- * allocation, used to compute addresses within the 2D array. Given the row
- * and column of an array element of type \b T, the address is computed as:
- * \code
- T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;
- * \endcode
- *
- * The pitch returned by ::cuMemAllocPitch() is guaranteed to work with
- * ::cuMemcpy2D() under all circumstances. For allocations of 2D arrays, it is
- * recommended that programmers consider performing pitch allocations using
- * ::cuMemAllocPitch(). Due to alignment restrictions in the hardware, this is
- * especially true if the application will be performing 2D memory copies
- * between different regions of device memory (whether linear memory or CUDA
- * arrays).
- *
- * The byte alignment of the pitch returned by ::cuMemAllocPitch() is guaranteed
- * to match or exceed the alignment requirement for texture binding with
- * ::cuTexRefSetAddress2D().
- *
- * \param dptr - Returned device pointer
- * \param pPitch - Returned pitch of allocation in bytes
- * \param WidthInBytes - Requested allocation width in bytes
- * \param Height - Requested allocation height in rows
- * \param ElementSizeBytes - Size of largest reads/writes for range
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMallocPitch
- */
-CUresult CUDAAPI cuMemAllocPitch(CUdeviceptr *dptr, size_t *pPitch, size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes);
-
-/**
- * \brief Frees device memory
- *
- * Frees the memory space pointed to by \p dptr, which must have been returned
- * by a previous call to ::cuMemAlloc() or ::cuMemAllocPitch().
- *
- * \param dptr - Pointer to memory to free
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaFree
- */
-CUresult CUDAAPI cuMemFree(CUdeviceptr dptr);
-
-/**
- * \brief Get information on memory allocations
- *
- * Returns the base address in \p *pbase and size in \p *psize of the
- * allocation by ::cuMemAlloc() or ::cuMemAllocPitch() that contains the input
- * pointer \p dptr. Both parameters \p pbase and \p psize are optional. If one
- * of them is NULL, it is ignored.
- *
- * \param pbase - Returned base address
- * \param psize - Returned size of device memory allocation
- * \param dptr - Device pointer to query
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_NOT_FOUND,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32
- */
-CUresult CUDAAPI cuMemGetAddressRange(CUdeviceptr *pbase, size_t *psize, CUdeviceptr dptr);
-
-/**
- * \brief Allocates page-locked host memory
- *
- * Allocates \p bytesize bytes of host memory that is page-locked and
- * accessible to the device. The driver tracks the virtual memory ranges
- * allocated with this function and automatically accelerates calls to
- * functions such as ::cuMemcpy(). Since the memory can be accessed directly by
- * the device, it can be read or written with much higher bandwidth than
- * pageable memory obtained with functions such as ::malloc(). Allocating
- * excessive amounts of memory with ::cuMemAllocHost() may degrade system
- * performance, since it reduces the amount of memory available to the system
- * for paging. As a result, this function is best used sparingly to allocate
- * staging areas for data exchange between host and device.
- *
- * Note all host memory allocated using ::cuMemHostAlloc() will automatically
- * be immediately accessible to all contexts on all devices which support unified
- * addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING).
- * The device pointer that may be used to access this host memory from those
- * contexts is always equal to the returned host pointer \p *pp.
- * See \ref CUDA_UNIFIED for additional details.
- *
- * \param pp - Returned host pointer to page-locked memory
- * \param bytesize - Requested allocation size in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMallocHost
- */
-CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize);
-
-/**
- * \brief Frees page-locked host memory
- *
- * Frees the memory space pointed to by \p p, which must have been returned by
- * a previous call to ::cuMemAllocHost().
- *
- * \param p - Pointer to memory to free
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaFreeHost
- */
-CUresult CUDAAPI cuMemFreeHost(void *p);
-
-/**
- * \brief Allocates page-locked host memory
- *
- * Allocates \p bytesize bytes of host memory that is page-locked and accessible
- * to the device. The driver tracks the virtual memory ranges allocated with
- * this function and automatically accelerates calls to functions such as
- * ::cuMemcpyHtoD(). Since the memory can be accessed directly by the device,
- * it can be read or written with much higher bandwidth than pageable memory
- * obtained with functions such as ::malloc(). Allocating excessive amounts of
- * pinned memory may degrade system performance, since it reduces the amount
- * of memory available to the system for paging. As a result, this function is
- * best used sparingly to allocate staging areas for data exchange between
- * host and device.
- *
- * The \p Flags parameter enables different options to be specified that
- * affect the allocation, as follows.
- *
- * - ::CU_MEMHOSTALLOC_PORTABLE: The memory returned by this call will be
- * considered as pinned memory by all CUDA contexts, not just the one that
- * performed the allocation.
- *
- * - ::CU_MEMHOSTALLOC_DEVICEMAP: Maps the allocation into the CUDA address
- * space. The device pointer to the memory may be obtained by calling
- * ::cuMemHostGetDevicePointer().
- *
- * - ::CU_MEMHOSTALLOC_WRITECOMBINED: Allocates the memory as write-combined
- * (WC). WC memory can be transferred across the PCI Express bus more
- * quickly on some system configurations, but cannot be read efficiently by
- * most CPUs. WC memory is a good option for buffers that will be written by
- * the CPU and read by the GPU via mapped pinned memory or host->device
- * transfers.
- *
- * All of these flags are orthogonal to one another: a developer may allocate
- * memory that is portable, mapped and/or write-combined with no restrictions.
- *
- * The CUDA context must have been created with the ::CU_CTX_MAP_HOST flag in
- * order for the ::CU_MEMHOSTALLOC_DEVICEMAP flag to have any effect.
- *
- * The ::CU_MEMHOSTALLOC_DEVICEMAP flag may be specified on CUDA contexts for
- * devices that do not support mapped pinned memory. The failure is deferred
- * to ::cuMemHostGetDevicePointer() because the memory may be mapped into
- * other CUDA contexts via the ::CU_MEMHOSTALLOC_PORTABLE flag.
- *
- * The memory allocated by this function must be freed with ::cuMemFreeHost().
- *
- * Note all host memory allocated using ::cuMemHostAlloc() will automatically
- * be immediately accessible to all contexts on all devices which support unified
- * addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING).
- * Unless the flag ::CU_MEMHOSTALLOC_WRITECOMBINED is specified, the device pointer
- * that may be used to access this host memory from those contexts is always equal
- * to the returned host pointer \p *pp. If the flag ::CU_MEMHOSTALLOC_WRITECOMBINED
- * is specified, then the function ::cuMemHostGetDevicePointer() must be used
- * to query the device pointer, even if the context supports unified addressing.
- * See \ref CUDA_UNIFIED for additional details.
- *
- * \param pp - Returned host pointer to page-locked memory
- * \param bytesize - Requested allocation size in bytes
- * \param Flags - Flags for allocation request
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaHostAlloc
- */
-CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, unsigned int Flags);
-
-/**
- * \brief Passes back device pointer of mapped pinned memory
- *
- * Passes back the device pointer \p pdptr corresponding to the mapped, pinned
- * host buffer \p p allocated by ::cuMemHostAlloc.
- *
- * ::cuMemHostGetDevicePointer() will fail if the ::CU_MEMHOSTALLOC_DEVICEMAP
- * flag was not specified at the time the memory was allocated, or if the
- * function is called on a GPU that does not support mapped pinned memory.
- *
- * For devices that have a non-zero value for the device attribute
- * ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, the memory
- * can also be accessed from the device using the host pointer \p p.
- * The device pointer returned by ::cuMemHostGetDevicePointer() may or may not
- * match the original host pointer \p p and depends on the devices visible to the
- * application. If all devices visible to the application have a non-zero value for the
- * device attribute, the device pointer returned by ::cuMemHostGetDevicePointer()
- * will match the original pointer \p p. If any device visible to the application
- * has a zero value for the device attribute, the device pointer returned by
- * ::cuMemHostGetDevicePointer() will not match the original host pointer \p p,
- * but it will be suitable for use on all devices provided Unified Virtual Addressing
- * is enabled. In such systems, it is valid to access the memory using either pointer
- * on devices that have a non-zero value for the device attribute. Note however that
- * such devices should access the memory using only of the two pointers and not both.
- *
- * \p Flags provides for future releases. For now, it must be set to 0.
- *
- * \param pdptr - Returned device pointer
- * \param p - Host pointer
- * \param Flags - Options (must be 0)
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaHostGetDevicePointer
- */
-CUresult CUDAAPI cuMemHostGetDevicePointer(CUdeviceptr *pdptr, void *p, unsigned int Flags);
-
-/**
- * \brief Passes back flags that were used for a pinned allocation
- *
- * Passes back the flags \p pFlags that were specified when allocating
- * the pinned host buffer \p p allocated by ::cuMemHostAlloc.
- *
- * ::cuMemHostGetFlags() will fail if the pointer does not reside in
- * an allocation performed by ::cuMemAllocHost() or ::cuMemHostAlloc().
- *
- * \param pFlags - Returned flags word
- * \param p - Host pointer
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa
- * ::cuMemAllocHost,
- * ::cuMemHostAlloc,
- * ::cudaHostGetFlags
- */
-CUresult CUDAAPI cuMemHostGetFlags(unsigned int *pFlags, void *p);
-
-/**
- * \brief Allocates memory that will be automatically managed by the Unified Memory system
- *
- * Allocates \p bytesize bytes of managed memory on the device and returns in
- * \p *dptr a pointer to the allocated memory. If the device doesn't support
- * allocating managed memory, ::CUDA_ERROR_NOT_SUPPORTED is returned. Support
- * for managed memory can be queried using the device attribute
- * ::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY. The allocated memory is suitably
- * aligned for any kind of variable. The memory is not cleared. If \p bytesize
- * is 0, ::cuMemAllocManaged returns ::CUDA_ERROR_INVALID_VALUE. The pointer
- * is valid on the CPU and on all GPUs in the system that support managed memory.
- * All accesses to this pointer must obey the Unified Memory programming model.
- *
- * \p flags specifies the default stream association for this allocation.
- * \p flags must be one of ::CU_MEM_ATTACH_GLOBAL or ::CU_MEM_ATTACH_HOST. If
- * ::CU_MEM_ATTACH_GLOBAL is specified, then this memory is accessible from
- * any stream on any device. If ::CU_MEM_ATTACH_HOST is specified, then the
- * allocation should not be accessed from devices that have a zero value for the
- * device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS; an explicit call to
- * ::cuStreamAttachMemAsync will be required to enable access on such devices.
- *
- * If the association is later changed via ::cuStreamAttachMemAsync to
- * a single stream, the default association as specifed during ::cuMemAllocManaged
- * is restored when that stream is destroyed. For __managed__ variables, the
- * default association is always ::CU_MEM_ATTACH_GLOBAL. Note that destroying a
- * stream is an asynchronous operation, and as a result, the change to default
- * association won't happen until all work in the stream has completed.
- *
- * Memory allocated with ::cuMemAllocManaged should be released with ::cuMemFree.
- *
- * Device memory oversubscription is possible for GPUs that have a non-zero value for the
- * device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Managed memory on
- * such GPUs may be evicted from device memory to host memory at any time by the Unified
- * Memory driver in order to make room for other allocations.
- *
- * In a multi-GPU system where all GPUs have a non-zero value for the device attribute
- * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, managed memory may not be populated when this
- * API returns and instead may be populated on access. In such systems, managed memory can
- * migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to
- * maintain data locality and prevent excessive page faults to the extent possible. The application
- * can also guide the driver about memory usage patterns via ::cuMemAdvise. The application
- * can also explicitly migrate memory to a desired processor's memory via
- * ::cuMemPrefetchAsync.
- *
- * In a multi-GPU system where all of the GPUs have a zero value for the device attribute
- * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS and all the GPUs have peer-to-peer support
- * with each other, the physical storage for managed memory is created on the GPU which is active
- * at the time ::cuMemAllocManaged is called. All other GPUs will reference the data at reduced
- * bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate
- * memory among such GPUs.
- *
- * In a multi-GPU system where not all GPUs have peer-to-peer support with each other and
- * where the value of the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS
- * is zero for at least one of those GPUs, the location chosen for physical storage of managed
- * memory is system-dependent.
- * - On Linux, the location chosen will be device memory as long as the current set of active
- * contexts are on devices that either have peer-to-peer support with each other or have a
- * non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS.
- * If there is an active context on a GPU that does not have a non-zero value for that device
- * attribute and it does not have peer-to-peer support with the other devices that have active
- * contexts on them, then the location for physical storage will be 'zero-copy' or host memory.
- * Note that this means that managed memory that is located in device memory is migrated to
- * host memory if a new context is created on a GPU that doesn't have a non-zero value for
- * the device attribute and does not support peer-to-peer with at least one of the other devices
- * that has an active context. This in turn implies that context creation may fail if there is
- * insufficient host memory to migrate all managed allocations.
- * - On Windows, the physical storage is always created in 'zero-copy' or host memory.
- * All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these
- * circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to
- * restrict CUDA to only use those GPUs that have peer-to-peer support.
- * Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a
- * non-zero value to force the driver to always use device memory for physical storage.
- * When this environment variable is set to a non-zero value, all contexts created in
- * that process on devices that support managed memory have to be peer-to-peer compatible
- * with each other. Context creation will fail if a context is created on a device that
- * supports managed memory and is not peer-to-peer compatible with any of the other
- * managed memory supporting devices on which contexts were previously created, even if
- * those contexts have been destroyed. These environment variables are described
- * in the CUDA programming guide under the "CUDA environment variables" section.
- * - On ARM, managed memory is not available on discrete gpu with Drive PX-2.
- *
- * \param dptr - Returned device pointer
- * \param bytesize - Requested allocation size in bytes
- * \param flags - Must be one of ::CU_MEM_ATTACH_GLOBAL or ::CU_MEM_ATTACH_HOST
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_NOT_SUPPORTED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cuDeviceGetAttribute, ::cuStreamAttachMemAsync,
- * ::cudaMallocManaged
- */
-CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, unsigned int flags);
-
-/**
- * \brief Returns a handle to a compute device
- *
- * Returns in \p *device a device handle given a PCI bus ID string.
- *
- * \param dev - Returned device handle
- *
- * \param pciBusId - String in one of the following forms:
- * [domain]:[bus]:[device].[function]
- * [domain]:[bus]:[device]
- * [bus]:[device].[function]
- * where \p domain, \p bus, \p device, and \p function are all hexadecimal values
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuDeviceGet,
- * ::cuDeviceGetAttribute,
- * ::cuDeviceGetPCIBusId,
- * ::cudaDeviceGetByPCIBusId
- */
-CUresult CUDAAPI cuDeviceGetByPCIBusId(CUdevice *dev, const char *pciBusId);
-
-/**
- * \brief Returns a PCI Bus Id string for the device
- *
- * Returns an ASCII string identifying the device \p dev in the NULL-terminated
- * string pointed to by \p pciBusId. \p len specifies the maximum length of the
- * string that may be returned.
- *
- * \param pciBusId - Returned identifier string for the device in the following format
- * [domain]:[bus]:[device].[function]
- * where \p domain, \p bus, \p device, and \p function are all hexadecimal values.
- * pciBusId should be large enough to store 13 characters including the NULL-terminator.
- *
- * \param len - Maximum length of string to store in \p name
- *
- * \param dev - Device to get identifier string for
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuDeviceGet,
- * ::cuDeviceGetAttribute,
- * ::cuDeviceGetByPCIBusId,
- * ::cudaDeviceGetPCIBusId
- */
-CUresult CUDAAPI cuDeviceGetPCIBusId(char *pciBusId, int len, CUdevice dev);
-
-/**
- * \brief Gets an interprocess handle for a previously allocated event
- *
- * Takes as input a previously allocated event. This event must have been
- * created with the ::CU_EVENT_INTERPROCESS and ::CU_EVENT_DISABLE_TIMING
- * flags set. This opaque handle may be copied into other processes and
- * opened with ::cuIpcOpenEventHandle to allow efficient hardware
- * synchronization between GPU work in different processes.
- *
- * After the event has been opened in the importing process,
- * ::cuEventRecord, ::cuEventSynchronize, ::cuStreamWaitEvent and
- * ::cuEventQuery may be used in either process. Performing operations
- * on the imported event after the exported event has been freed
- * with ::cuEventDestroy will result in undefined behavior.
- *
- * IPC functionality is restricted to devices with support for unified
- * addressing on Linux and Windows operating systems.
- * IPC functionality on Windows is restricted to GPUs in TCC mode
- *
- * \param pHandle - Pointer to a user allocated CUipcEventHandle
- * in which to return the opaque event handle
- * \param event - Event allocated with ::CU_EVENT_INTERPROCESS and
- * ::CU_EVENT_DISABLE_TIMING flags.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_MAP_FAILED,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::cuEventCreate,
- * ::cuEventDestroy,
- * ::cuEventSynchronize,
- * ::cuEventQuery,
- * ::cuStreamWaitEvent,
- * ::cuIpcOpenEventHandle,
- * ::cuIpcGetMemHandle,
- * ::cuIpcOpenMemHandle,
- * ::cuIpcCloseMemHandle,
- * ::cudaIpcGetEventHandle
- */
-CUresult CUDAAPI cuIpcGetEventHandle(CUipcEventHandle *pHandle, CUevent event);
-
-/**
- * \brief Opens an interprocess event handle for use in the current process
- *
- * Opens an interprocess event handle exported from another process with
- * ::cuIpcGetEventHandle. This function returns a ::CUevent that behaves like
- * a locally created event with the ::CU_EVENT_DISABLE_TIMING flag specified.
- * This event must be freed with ::cuEventDestroy.
- *
- * Performing operations on the imported event after the exported event has
- * been freed with ::cuEventDestroy will result in undefined behavior.
- *
- * IPC functionality is restricted to devices with support for unified
- * addressing on Linux and Windows operating systems.
- * IPC functionality on Windows is restricted to GPUs in TCC mode
- *
- * \param phEvent - Returns the imported event
- * \param handle - Interprocess handle to open
- *
- * \returns
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_MAP_FAILED,
- * ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::cuEventCreate,
- * ::cuEventDestroy,
- * ::cuEventSynchronize,
- * ::cuEventQuery,
- * ::cuStreamWaitEvent,
- * ::cuIpcGetEventHandle,
- * ::cuIpcGetMemHandle,
- * ::cuIpcOpenMemHandle,
- * ::cuIpcCloseMemHandle,
- * ::cudaIpcOpenEventHandle
- */
-CUresult CUDAAPI cuIpcOpenEventHandle(CUevent *phEvent, CUipcEventHandle handle);
-
-/**
- * \brief Gets an interprocess memory handle for an existing device memory
- * allocation
- *
- * Takes a pointer to the base of an existing device memory allocation created
- * with ::cuMemAlloc and exports it for use in another process. This is a
- * lightweight operation and may be called multiple times on an allocation
- * without adverse effects.
- *
- * If a region of memory is freed with ::cuMemFree and a subsequent call
- * to ::cuMemAlloc returns memory with the same device address,
- * ::cuIpcGetMemHandle will return a unique handle for the
- * new memory.
- *
- * IPC functionality is restricted to devices with support for unified
- * addressing on Linux and Windows operating systems.
- * IPC functionality on Windows is restricted to GPUs in TCC mode
- *
- * \param pHandle - Pointer to user allocated ::CUipcMemHandle to return
- * the handle in.
- * \param dptr - Base pointer to previously allocated device memory
- *
- * \returns
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_MAP_FAILED,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::cuMemAlloc,
- * ::cuMemFree,
- * ::cuIpcGetEventHandle,
- * ::cuIpcOpenEventHandle,
- * ::cuIpcOpenMemHandle,
- * ::cuIpcCloseMemHandle,
- * ::cudaIpcGetMemHandle
- */
-CUresult CUDAAPI cuIpcGetMemHandle(CUipcMemHandle *pHandle, CUdeviceptr dptr);
-
-/**
- * \brief Opens an interprocess memory handle exported from another process
- * and returns a device pointer usable in the local process.
- *
- * Maps memory exported from another process with ::cuIpcGetMemHandle into
- * the current device address space. For contexts on different devices
- * ::cuIpcOpenMemHandle can attempt to enable peer access between the
- * devices as if the user called ::cuCtxEnablePeerAccess. This behavior is
- * controlled by the ::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS flag.
- * ::cuDeviceCanAccessPeer can determine if a mapping is possible.
- *
- * ::cuIpcOpenMemHandle can open handles to devices that may not be visible
- * in the process calling the API.
- *
- * Contexts that may open ::CUipcMemHandles are restricted in the following way.
- * ::CUipcMemHandles from each ::CUdevice in a given process may only be opened
- * by one ::CUcontext per ::CUdevice per other process.
- *
- * Memory returned from ::cuIpcOpenMemHandle must be freed with
- * ::cuIpcCloseMemHandle.
- *
- * Calling ::cuMemFree on an exported memory region before calling
- * ::cuIpcCloseMemHandle in the importing context will result in undefined
- * behavior.
- *
- * IPC functionality is restricted to devices with support for unified
- * addressing on Linux and Windows operating systems.
- * IPC functionality on Windows is restricted to GPUs in TCC mode
- *
- * \param pdptr - Returned device pointer
- * \param handle - ::CUipcMemHandle to open
- * \param Flags - Flags for this operation. Must be specified as ::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS
- *
- * \returns
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_MAP_FAILED,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_TOO_MANY_PEERS,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \note No guarantees are made about the address returned in \p *pdptr.
- * In particular, multiple processes may not receive the same address for the same \p handle.
- *
- * \sa
- * ::cuMemAlloc,
- * ::cuMemFree,
- * ::cuIpcGetEventHandle,
- * ::cuIpcOpenEventHandle,
- * ::cuIpcGetMemHandle,
- * ::cuIpcCloseMemHandle,
- * ::cuCtxEnablePeerAccess,
- * ::cuDeviceCanAccessPeer,
- * ::cudaIpcOpenMemHandle
- */
-CUresult CUDAAPI cuIpcOpenMemHandle(CUdeviceptr *pdptr, CUipcMemHandle handle, unsigned int Flags);
-
-/**
- * \brief Close memory mapped with ::cuIpcOpenMemHandle
- *
- * Unmaps memory returnd by ::cuIpcOpenMemHandle. The original allocation
- * in the exporting process as well as imported mappings in other processes
- * will be unaffected.
- *
- * Any resources used to enable peer access will be freed if this is the
- * last mapping using them.
- *
- * IPC functionality is restricted to devices with support for unified
- * addressing on Linux and Windows operating systems.
- * IPC functionality on Windows is restricted to GPUs in TCC mode
- *
- * \param dptr - Device pointer returned by ::cuIpcOpenMemHandle
- *
- * \returns
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_MAP_FAILED,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_VALUE
- * \sa
- * ::cuMemAlloc,
- * ::cuMemFree,
- * ::cuIpcGetEventHandle,
- * ::cuIpcOpenEventHandle,
- * ::cuIpcGetMemHandle,
- * ::cuIpcOpenMemHandle,
- * ::cudaIpcCloseMemHandle
- */
-CUresult CUDAAPI cuIpcCloseMemHandle(CUdeviceptr dptr);
-
-/**
- * \brief Registers an existing host memory range for use by CUDA
- *
- * Page-locks the memory range specified by \p p and \p bytesize and maps it
- * for the device(s) as specified by \p Flags. This memory range also is added
- * to the same tracking mechanism as ::cuMemHostAlloc to automatically accelerate
- * calls to functions such as ::cuMemcpyHtoD(). Since the memory can be accessed
- * directly by the device, it can be read or written with much higher bandwidth
- * than pageable memory that has not been registered. Page-locking excessive
- * amounts of memory may degrade system performance, since it reduces the amount
- * of memory available to the system for paging. As a result, this function is
- * best used sparingly to register staging areas for data exchange between
- * host and device.
- *
- * This function has limited support on Mac OS X. OS 10.7 or higher is required.
- *
- * The \p Flags parameter enables different options to be specified that
- * affect the allocation, as follows.
- *
- * - ::CU_MEMHOSTREGISTER_PORTABLE: The memory returned by this call will be
- * considered as pinned memory by all CUDA contexts, not just the one that
- * performed the allocation.
- *
- * - ::CU_MEMHOSTREGISTER_DEVICEMAP: Maps the allocation into the CUDA address
- * space. The device pointer to the memory may be obtained by calling
- * ::cuMemHostGetDevicePointer().
- *
- * - ::CU_MEMHOSTREGISTER_IOMEMORY: The pointer is treated as pointing to some
- * I/O memory space, e.g. the PCI Express resource of a 3rd party device.
- *
- * All of these flags are orthogonal to one another: a developer may page-lock
- * memory that is portable or mapped with no restrictions.
- *
- * The CUDA context must have been created with the ::CU_CTX_MAP_HOST flag in
- * order for the ::CU_MEMHOSTREGISTER_DEVICEMAP flag to have any effect.
- *
- * The ::CU_MEMHOSTREGISTER_DEVICEMAP flag may be specified on CUDA contexts for
- * devices that do not support mapped pinned memory. The failure is deferred
- * to ::cuMemHostGetDevicePointer() because the memory may be mapped into
- * other CUDA contexts via the ::CU_MEMHOSTREGISTER_PORTABLE flag.
- *
- * For devices that have a non-zero value for the device attribute
- * ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, the memory
- * can also be accessed from the device using the host pointer \p p.
- * The device pointer returned by ::cuMemHostGetDevicePointer() may or may not
- * match the original host pointer \p ptr and depends on the devices visible to the
- * application. If all devices visible to the application have a non-zero value for the
- * device attribute, the device pointer returned by ::cuMemHostGetDevicePointer()
- * will match the original pointer \p ptr. If any device visible to the application
- * has a zero value for the device attribute, the device pointer returned by
- * ::cuMemHostGetDevicePointer() will not match the original host pointer \p ptr,
- * but it will be suitable for use on all devices provided Unified Virtual Addressing
- * is enabled. In such systems, it is valid to access the memory using either pointer
- * on devices that have a non-zero value for the device attribute. Note however that
- * such devices should access the memory using only of the two pointers and not both.
- *
- * The memory page-locked by this function must be unregistered with
- * ::cuMemHostUnregister().
- *
- * \param p - Host pointer to memory to page-lock
- * \param bytesize - Size in bytes of the address range to page-lock
- * \param Flags - Flags for allocation request
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED,
- * ::CUDA_ERROR_NOT_PERMITTED,
- * ::CUDA_ERROR_NOT_SUPPORTED
- * \notefnerr
- *
- * \sa
- * ::cuMemHostUnregister,
- * ::cuMemHostGetFlags,
- * ::cuMemHostGetDevicePointer,
- * ::cudaHostRegister
- */
-CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, unsigned int Flags);
-
-/**
- * \brief Unregisters a memory range that was registered with cuMemHostRegister.
- *
- * Unmaps the memory range whose base address is specified by \p p, and makes
- * it pageable again.
- *
- * The base address must be the same one specified to ::cuMemHostRegister().
- *
- * \param p - Host pointer to memory to unregister
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED,
- * \notefnerr
- *
- * \sa
- * ::cuMemHostRegister,
- * ::cudaHostUnregister
- */
-CUresult CUDAAPI cuMemHostUnregister(void *p);
-
-/**
- * \brief Copies memory
- *
- * Copies data between two pointers.
- * \p dst and \p src are base pointers of the destination and source, respectively.
- * \p ByteCount specifies the number of bytes to copy.
- * Note that this function infers the type of the transfer (host to host, host to
- * device, device to device, or device to host) from the pointer values. This
- * function is only allowed in contexts which support unified addressing.
- *
- * \param dst - Destination unified virtual address space pointer
- * \param src - Source unified virtual address space pointer
- * \param ByteCount - Size of memory copy in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- * \note_memcpy
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemcpy,
- * ::cudaMemcpyToSymbol,
- * ::cudaMemcpyFromSymbol
- */
-CUresult CUDAAPI cuMemcpy(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount);
-
-/**
- * \brief Copies device memory between two contexts
- *
- * Copies from device memory in one context to device memory in another
- * context. \p dstDevice is the base device pointer of the destination memory
- * and \p dstContext is the destination context. \p srcDevice is the base
- * device pointer of the source memory and \p srcContext is the source pointer.
- * \p ByteCount specifies the number of bytes to copy.
- *
- * \param dstDevice - Destination device pointer
- * \param dstContext - Destination context
- * \param srcDevice - Source device pointer
- * \param srcContext - Source context
- * \param ByteCount - Size of memory copy in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- *
- * \sa ::cuMemcpyDtoD, ::cuMemcpy3DPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync,
- * ::cuMemcpy3DPeerAsync,
- * ::cudaMemcpyPeer
- */
-CUresult CUDAAPI cuMemcpyPeer(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount);
-
-/**
- * \brief Copies memory from Host to Device
- *
- * Copies from host memory to device memory. \p dstDevice and \p srcHost are
- * the base addresses of the destination and source, respectively. \p ByteCount
- * specifies the number of bytes to copy.
- *
- * \param dstDevice - Destination device pointer
- * \param srcHost - Source host pointer
- * \param ByteCount - Size of memory copy in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- * \note_memcpy
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemcpy,
- * ::cudaMemcpyToSymbol
- */
-CUresult CUDAAPI cuMemcpyHtoD(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount);
-
-/**
- * \brief Copies memory from Device to Host
- *
- * Copies from device to host memory. \p dstHost and \p srcDevice specify the
- * base pointers of the destination and source, respectively. \p ByteCount
- * specifies the number of bytes to copy.
- *
- * \param dstHost - Destination host pointer
- * \param srcDevice - Source device pointer
- * \param ByteCount - Size of memory copy in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- * \note_memcpy
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemcpy,
- * ::cudaMemcpyFromSymbol
- */
-CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount);
-
-/**
- * \brief Copies memory from Device to Device
- *
- * Copies from device memory to device memory. \p dstDevice and \p srcDevice
- * are the base pointers of the destination and source, respectively.
- * \p ByteCount specifies the number of bytes to copy.
- *
- * \param dstDevice - Destination device pointer
- * \param srcDevice - Source device pointer
- * \param ByteCount - Size of memory copy in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemcpy,
- * ::cudaMemcpyToSymbol,
- * ::cudaMemcpyFromSymbol
- */
-CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount);
-
-/**
- * \brief Copies memory from Device to Array
- *
- * Copies from device memory to a 1D CUDA array. \p dstArray and \p dstOffset
- * specify the CUDA array handle and starting index of the destination data.
- * \p srcDevice specifies the base pointer of the source. \p ByteCount
- * specifies the number of bytes to copy.
- *
- * \param dstArray - Destination array
- * \param dstOffset - Offset in bytes of destination array
- * \param srcDevice - Source device pointer
- * \param ByteCount - Size of memory copy in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemcpyToArray
- */
-CUresult CUDAAPI cuMemcpyDtoA(CUarray dstArray, size_t dstOffset, CUdeviceptr srcDevice, size_t ByteCount);
-
-/**
- * \brief Copies memory from Array to Device
- *
- * Copies from one 1D CUDA array to device memory. \p dstDevice specifies the
- * base pointer of the destination and must be naturally aligned with the CUDA
- * array elements. \p srcArray and \p srcOffset specify the CUDA array handle
- * and the offset in bytes into the array where the copy is to begin.
- * \p ByteCount specifies the number of bytes to copy and must be evenly
- * divisible by the array element size.
- *
- * \param dstDevice - Destination device pointer
- * \param srcArray - Source array
- * \param srcOffset - Offset in bytes of source array
- * \param ByteCount - Size of memory copy in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemcpyFromArray
- */
-CUresult CUDAAPI cuMemcpyAtoD(CUdeviceptr dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount);
-
-/**
- * \brief Copies memory from Host to Array
- *
- * Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset
- * specify the CUDA array handle and starting offset in bytes of the destination
- * data. \p pSrc specifies the base address of the source. \p ByteCount specifies
- * the number of bytes to copy.
- *
- * \param dstArray - Destination array
- * \param dstOffset - Offset in bytes of destination array
- * \param srcHost - Source host pointer
- * \param ByteCount - Size of memory copy in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- * \note_memcpy
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemcpyToArray
- */
-CUresult CUDAAPI cuMemcpyHtoA(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount);
-
-/**
- * \brief Copies memory from Array to Host
- *
- * Copies from one 1D CUDA array to host memory. \p dstHost specifies the base
- * pointer of the destination. \p srcArray and \p srcOffset specify the CUDA
- * array handle and starting offset in bytes of the source data.
- * \p ByteCount specifies the number of bytes to copy.
- *
- * \param dstHost - Destination device pointer
- * \param srcArray - Source array
- * \param srcOffset - Offset in bytes of source array
- * \param ByteCount - Size of memory copy in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- * \note_memcpy
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemcpyFromArray
- */
-CUresult CUDAAPI cuMemcpyAtoH(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount);
-
-/**
- * \brief Copies memory from Array to Array
- *
- * Copies from one 1D CUDA array to another. \p dstArray and \p srcArray
- * specify the handles of the destination and source CUDA arrays for the copy,
- * respectively. \p dstOffset and \p srcOffset specify the destination and
- * source offsets in bytes into the CUDA arrays. \p ByteCount is the number of
- * bytes to be copied. The size of the elements in the CUDA arrays need not be
- * the same format, but the elements must be the same size; and count must be
- * evenly divisible by that size.
- *
- * \param dstArray - Destination array
- * \param dstOffset - Offset in bytes of destination array
- * \param srcArray - Source array
- * \param srcOffset - Offset in bytes of source array
- * \param ByteCount - Size of memory copy in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemcpyArrayToArray
- */
-CUresult CUDAAPI cuMemcpyAtoA(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount);
-
-/**
- * \brief Copies memory for 2D arrays
- *
- * Perform a 2D memory copy according to the parameters specified in \p pCopy.
- * The ::CUDA_MEMCPY2D structure is defined as:
- *
- * \code
- typedef struct CUDA_MEMCPY2D_st {
- unsigned int srcXInBytes, srcY;
- CUmemorytype srcMemoryType;
- const void *srcHost;
- CUdeviceptr srcDevice;
- CUarray srcArray;
- unsigned int srcPitch;
-
- unsigned int dstXInBytes, dstY;
- CUmemorytype dstMemoryType;
- void *dstHost;
- CUdeviceptr dstDevice;
- CUarray dstArray;
- unsigned int dstPitch;
-
- unsigned int WidthInBytes;
- unsigned int Height;
- } CUDA_MEMCPY2D;
- * \endcode
- * where:
- * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the
- * source and destination, respectively; ::CUmemorytype_enum is defined as:
- *
- * \code
- typedef enum CUmemorytype_enum {
- CU_MEMORYTYPE_HOST = 0x01,
- CU_MEMORYTYPE_DEVICE = 0x02,
- CU_MEMORYTYPE_ARRAY = 0x03,
- CU_MEMORYTYPE_UNIFIED = 0x04
- } CUmemorytype;
- * \endcode
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch
- * specify the (unified virtual address space) base address of the source data
- * and the bytes per row to apply. ::srcArray is ignored.
- * This value may be used only if unified addressing is supported in the calling
- * context.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch
- * specify the (host) base address of the source data and the bytes per row to
- * apply. ::srcArray is ignored.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch
- * specify the (device) base address of the source data and the bytes per row
- * to apply. ::srcArray is ignored.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the
- * handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are
- * ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch
- * specify the (host) base address of the destination data and the bytes per
- * row to apply. ::dstArray is ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch
- * specify the (unified virtual address space) base address of the source data
- * and the bytes per row to apply. ::dstArray is ignored.
- * This value may be used only if unified addressing is supported in the calling
- * context.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch
- * specify the (device) base address of the destination data and the bytes per
- * row to apply. ::dstArray is ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the
- * handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are
- * ignored.
- *
- * - ::srcXInBytes and ::srcY specify the base address of the source data for
- * the copy.
- *
- * \par
- * For host pointers, the starting address is
- * \code
- void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes);
- * \endcode
- *
- * \par
- * For device pointers, the starting address is
- * \code
- CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes;
- * \endcode
- *
- * \par
- * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array
- * element size.
- *
- * - ::dstXInBytes and ::dstY specify the base address of the destination data
- * for the copy.
- *
- * \par
- * For host pointers, the base address is
- * \code
- void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes);
- * \endcode
- *
- * \par
- * For device pointers, the starting address is
- * \code
- CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes;
- * \endcode
- *
- * \par
- * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array
- * element size.
- *
- * - ::WidthInBytes and ::Height specify the width (in bytes) and height of
- * the 2D copy being performed.
- * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes +
- * ::srcXInBytes, and ::dstPitch must be greater than or equal to
- * ::WidthInBytes + dstXInBytes.
- *
- * \par
- * ::cuMemcpy2D() returns an error if any pitch is greater than the maximum
- * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back
- * pitches that always work with ::cuMemcpy2D(). On intra-device memory copies
- * (device to device, CUDA array to device, CUDA array to CUDA array),
- * ::cuMemcpy2D() may fail for pitches not computed by ::cuMemAllocPitch().
- * ::cuMemcpy2DUnaligned() does not have this restriction, but may run
- * significantly slower in the cases where ::cuMemcpy2D() would have returned
- * an error code.
- *
- * \param pCopy - Parameters for the memory copy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemcpy2D,
- * ::cudaMemcpy2DToArray,
- * ::cudaMemcpy2DFromArray
- */
-CUresult CUDAAPI cuMemcpy2D(const CUDA_MEMCPY2D *pCopy);
-
-/**
- * \brief Copies memory for 2D arrays
- *
- * Perform a 2D memory copy according to the parameters specified in \p pCopy.
- * The ::CUDA_MEMCPY2D structure is defined as:
- *
- * \code
- typedef struct CUDA_MEMCPY2D_st {
- unsigned int srcXInBytes, srcY;
- CUmemorytype srcMemoryType;
- const void *srcHost;
- CUdeviceptr srcDevice;
- CUarray srcArray;
- unsigned int srcPitch;
- unsigned int dstXInBytes, dstY;
- CUmemorytype dstMemoryType;
- void *dstHost;
- CUdeviceptr dstDevice;
- CUarray dstArray;
- unsigned int dstPitch;
- unsigned int WidthInBytes;
- unsigned int Height;
- } CUDA_MEMCPY2D;
- * \endcode
- * where:
- * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the
- * source and destination, respectively; ::CUmemorytype_enum is defined as:
- *
- * \code
- typedef enum CUmemorytype_enum {
- CU_MEMORYTYPE_HOST = 0x01,
- CU_MEMORYTYPE_DEVICE = 0x02,
- CU_MEMORYTYPE_ARRAY = 0x03,
- CU_MEMORYTYPE_UNIFIED = 0x04
- } CUmemorytype;
- * \endcode
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch
- * specify the (unified virtual address space) base address of the source data
- * and the bytes per row to apply. ::srcArray is ignored.
- * This value may be used only if unified addressing is supported in the calling
- * context.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch
- * specify the (host) base address of the source data and the bytes per row to
- * apply. ::srcArray is ignored.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch
- * specify the (device) base address of the source data and the bytes per row
- * to apply. ::srcArray is ignored.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the
- * handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are
- * ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch
- * specify the (unified virtual address space) base address of the source data
- * and the bytes per row to apply. ::dstArray is ignored.
- * This value may be used only if unified addressing is supported in the calling
- * context.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch
- * specify the (host) base address of the destination data and the bytes per
- * row to apply. ::dstArray is ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch
- * specify the (device) base address of the destination data and the bytes per
- * row to apply. ::dstArray is ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the
- * handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are
- * ignored.
- *
- * - ::srcXInBytes and ::srcY specify the base address of the source data for
- * the copy.
- *
- * \par
- * For host pointers, the starting address is
- * \code
- void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes);
- * \endcode
- *
- * \par
- * For device pointers, the starting address is
- * \code
- CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes;
- * \endcode
- *
- * \par
- * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array
- * element size.
- *
- * - ::dstXInBytes and ::dstY specify the base address of the destination data
- * for the copy.
- *
- * \par
- * For host pointers, the base address is
- * \code
- void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes);
- * \endcode
- *
- * \par
- * For device pointers, the starting address is
- * \code
- CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes;
- * \endcode
- *
- * \par
- * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array
- * element size.
- *
- * - ::WidthInBytes and ::Height specify the width (in bytes) and height of
- * the 2D copy being performed.
- * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes +
- * ::srcXInBytes, and ::dstPitch must be greater than or equal to
- * ::WidthInBytes + dstXInBytes.
- *
- * \par
- * ::cuMemcpy2D() returns an error if any pitch is greater than the maximum
- * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back
- * pitches that always work with ::cuMemcpy2D(). On intra-device memory copies
- * (device to device, CUDA array to device, CUDA array to CUDA array),
- * ::cuMemcpy2D() may fail for pitches not computed by ::cuMemAllocPitch().
- * ::cuMemcpy2DUnaligned() does not have this restriction, but may run
- * significantly slower in the cases where ::cuMemcpy2D() would have returned
- * an error code.
- *
- * \param pCopy - Parameters for the memory copy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemcpy2D,
- * ::cudaMemcpy2DToArray,
- * ::cudaMemcpy2DFromArray
- */
-CUresult CUDAAPI cuMemcpy2DUnaligned(const CUDA_MEMCPY2D *pCopy);
-
-/**
- * \brief Copies memory for 3D arrays
- *
- * Perform a 3D memory copy according to the parameters specified in
- * \p pCopy. The ::CUDA_MEMCPY3D structure is defined as:
- *
- * \code
- typedef struct CUDA_MEMCPY3D_st {
-
- unsigned int srcXInBytes, srcY, srcZ;
- unsigned int srcLOD;
- CUmemorytype srcMemoryType;
- const void *srcHost;
- CUdeviceptr srcDevice;
- CUarray srcArray;
- unsigned int srcPitch; // ignored when src is array
- unsigned int srcHeight; // ignored when src is array; may be 0 if Depth==1
-
- unsigned int dstXInBytes, dstY, dstZ;
- unsigned int dstLOD;
- CUmemorytype dstMemoryType;
- void *dstHost;
- CUdeviceptr dstDevice;
- CUarray dstArray;
- unsigned int dstPitch; // ignored when dst is array
- unsigned int dstHeight; // ignored when dst is array; may be 0 if Depth==1
-
- unsigned int WidthInBytes;
- unsigned int Height;
- unsigned int Depth;
- } CUDA_MEMCPY3D;
- * \endcode
- * where:
- * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the
- * source and destination, respectively; ::CUmemorytype_enum is defined as:
- *
- * \code
- typedef enum CUmemorytype_enum {
- CU_MEMORYTYPE_HOST = 0x01,
- CU_MEMORYTYPE_DEVICE = 0x02,
- CU_MEMORYTYPE_ARRAY = 0x03,
- CU_MEMORYTYPE_UNIFIED = 0x04
- } CUmemorytype;
- * \endcode
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch
- * specify the (unified virtual address space) base address of the source data
- * and the bytes per row to apply. ::srcArray is ignored.
- * This value may be used only if unified addressing is supported in the calling
- * context.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost, ::srcPitch and
- * ::srcHeight specify the (host) base address of the source data, the bytes
- * per row, and the height of each 2D slice of the 3D array. ::srcArray is
- * ignored.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice, ::srcPitch and
- * ::srcHeight specify the (device) base address of the source data, the bytes
- * per row, and the height of each 2D slice of the 3D array. ::srcArray is
- * ignored.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the
- * handle of the source data. ::srcHost, ::srcDevice, ::srcPitch and
- * ::srcHeight are ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch
- * specify the (unified virtual address space) base address of the source data
- * and the bytes per row to apply. ::dstArray is ignored.
- * This value may be used only if unified addressing is supported in the calling
- * context.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch
- * specify the (host) base address of the destination data, the bytes per row,
- * and the height of each 2D slice of the 3D array. ::dstArray is ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch
- * specify the (device) base address of the destination data, the bytes per
- * row, and the height of each 2D slice of the 3D array. ::dstArray is ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the
- * handle of the destination data. ::dstHost, ::dstDevice, ::dstPitch and
- * ::dstHeight are ignored.
- *
- * - ::srcXInBytes, ::srcY and ::srcZ specify the base address of the source
- * data for the copy.
- *
- * \par
- * For host pointers, the starting address is
- * \code
- void* Start = (void*)((char*)srcHost+(srcZ*srcHeight+srcY)*srcPitch + srcXInBytes);
- * \endcode
- *
- * \par
- * For device pointers, the starting address is
- * \code
- CUdeviceptr Start = srcDevice+(srcZ*srcHeight+srcY)*srcPitch+srcXInBytes;
- * \endcode
- *
- * \par
- * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array
- * element size.
- *
- * - dstXInBytes, ::dstY and ::dstZ specify the base address of the
- * destination data for the copy.
- *
- * \par
- * For host pointers, the base address is
- * \code
- void* dstStart = (void*)((char*)dstHost+(dstZ*dstHeight+dstY)*dstPitch + dstXInBytes);
- * \endcode
- *
- * \par
- * For device pointers, the starting address is
- * \code
- CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
- * \endcode
- *
- * \par
- * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array
- * element size.
- *
- * - ::WidthInBytes, ::Height and ::Depth specify the width (in bytes), height
- * and depth of the 3D copy being performed.
- * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes +
- * ::srcXInBytes, and ::dstPitch must be greater than or equal to
- * ::WidthInBytes + dstXInBytes.
- * - If specified, ::srcHeight must be greater than or equal to ::Height +
- * ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY.
- *
- * \par
- * ::cuMemcpy3D() returns an error if any pitch is greater than the maximum
- * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH).
- *
- * The ::srcLOD and ::dstLOD members of the ::CUDA_MEMCPY3D structure must be
- * set to 0.
- *
- * \param pCopy - Parameters for the memory copy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMemcpy3D
- */
-CUresult CUDAAPI cuMemcpy3D(const CUDA_MEMCPY3D *pCopy);
-
-/**
- * \brief Copies memory between contexts
- *
- * Perform a 3D memory copy according to the parameters specified in
- * \p pCopy. See the definition of the ::CUDA_MEMCPY3D_PEER structure
- * for documentation of its parameters.
- *
- * \param pCopy - Parameters for the memory copy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_sync
- *
- * \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync,
- * ::cuMemcpy3DPeerAsync,
- * ::cudaMemcpy3DPeer
- */
-CUresult CUDAAPI cuMemcpy3DPeer(const CUDA_MEMCPY3D_PEER *pCopy);
-
-/**
- * \brief Copies memory asynchronously
- *
- * Copies data between two pointers.
- * \p dst and \p src are base pointers of the destination and source, respectively.
- * \p ByteCount specifies the number of bytes to copy.
- * Note that this function infers the type of the transfer (host to host, host to
- * device, device to device, or device to host) from the pointer values. This
- * function is only allowed in contexts which support unified addressing.
- *
- * \param dst - Destination unified virtual address space pointer
- * \param src - Source unified virtual address space pointer
- * \param ByteCount - Size of memory copy in bytes
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- * \note_async
- * \note_null_stream
- * \note_memcpy
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemcpyAsync,
- * ::cudaMemcpyToSymbolAsync,
- * ::cudaMemcpyFromSymbolAsync
- */
-CUresult CUDAAPI cuMemcpyAsync(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount, CUstream hStream);
-
-/**
- * \brief Copies device memory between two contexts asynchronously.
- *
- * Copies from device memory in one context to device memory in another
- * context. \p dstDevice is the base device pointer of the destination memory
- * and \p dstContext is the destination context. \p srcDevice is the base
- * device pointer of the source memory and \p srcContext is the source pointer.
- * \p ByteCount specifies the number of bytes to copy.
- *
- * \param dstDevice - Destination device pointer
- * \param dstContext - Destination context
- * \param srcDevice - Source device pointer
- * \param srcContext - Source context
- * \param ByteCount - Size of memory copy in bytes
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- * \note_async
- * \note_null_stream
- *
- * \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpy3DPeer, ::cuMemcpyDtoDAsync,
- * ::cuMemcpy3DPeerAsync,
- * ::cudaMemcpyPeerAsync
- */
-CUresult CUDAAPI cuMemcpyPeerAsync(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream);
-
-/**
- * \brief Copies memory from Host to Device
- *
- * Copies from host memory to device memory. \p dstDevice and \p srcHost are
- * the base addresses of the destination and source, respectively. \p ByteCount
- * specifies the number of bytes to copy.
- *
- * \param dstDevice - Destination device pointer
- * \param srcHost - Source host pointer
- * \param ByteCount - Size of memory copy in bytes
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- * \note_async
- * \note_null_stream
- * \note_memcpy
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemcpyAsync,
- * ::cudaMemcpyToSymbolAsync
- */
-CUresult CUDAAPI cuMemcpyHtoDAsync(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream);
-
-/**
- * \brief Copies memory from Device to Host
- *
- * Copies from device to host memory. \p dstHost and \p srcDevice specify the
- * base pointers of the destination and source, respectively. \p ByteCount
- * specifies the number of bytes to copy.
- *
- * \param dstHost - Destination host pointer
- * \param srcDevice - Source device pointer
- * \param ByteCount - Size of memory copy in bytes
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- * \note_async
- * \note_null_stream
- * \note_memcpy
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemcpyAsync,
- * ::cudaMemcpyFromSymbolAsync
- */
-CUresult CUDAAPI cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream);
-
-/**
- * \brief Copies memory from Device to Device
- *
- * Copies from device memory to device memory. \p dstDevice and \p srcDevice
- * are the base pointers of the destination and source, respectively.
- * \p ByteCount specifies the number of bytes to copy.
- *
- * \param dstDevice - Destination device pointer
- * \param srcDevice - Source device pointer
- * \param ByteCount - Size of memory copy in bytes
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- * \note_async
- * \note_null_stream
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemcpyAsync,
- * ::cudaMemcpyToSymbolAsync,
- * ::cudaMemcpyFromSymbolAsync
- */
-CUresult CUDAAPI cuMemcpyDtoDAsync(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream);
-
-/**
- * \brief Copies memory from Host to Array
- *
- * Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset
- * specify the CUDA array handle and starting offset in bytes of the
- * destination data. \p srcHost specifies the base address of the source.
- * \p ByteCount specifies the number of bytes to copy.
- *
- * \param dstArray - Destination array
- * \param dstOffset - Offset in bytes of destination array
- * \param srcHost - Source host pointer
- * \param ByteCount - Size of memory copy in bytes
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- * \note_async
- * \note_null_stream
- * \note_memcpy
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemcpyToArrayAsync
- */
-CUresult CUDAAPI cuMemcpyHtoAAsync(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream);
-
-/**
- * \brief Copies memory from Array to Host
- *
- * Copies from one 1D CUDA array to host memory. \p dstHost specifies the base
- * pointer of the destination. \p srcArray and \p srcOffset specify the CUDA
- * array handle and starting offset in bytes of the source data.
- * \p ByteCount specifies the number of bytes to copy.
- *
- * \param dstHost - Destination pointer
- * \param srcArray - Source array
- * \param srcOffset - Offset in bytes of source array
- * \param ByteCount - Size of memory copy in bytes
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- * \note_async
- * \note_null_stream
- * \note_memcpy
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemcpyFromArrayAsync
- */
-CUresult CUDAAPI cuMemcpyAtoHAsync(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream);
-
-/**
- * \brief Copies memory for 2D arrays
- *
- * Perform a 2D memory copy according to the parameters specified in \p pCopy.
- * The ::CUDA_MEMCPY2D structure is defined as:
- *
- * \code
- typedef struct CUDA_MEMCPY2D_st {
- unsigned int srcXInBytes, srcY;
- CUmemorytype srcMemoryType;
- const void *srcHost;
- CUdeviceptr srcDevice;
- CUarray srcArray;
- unsigned int srcPitch;
- unsigned int dstXInBytes, dstY;
- CUmemorytype dstMemoryType;
- void *dstHost;
- CUdeviceptr dstDevice;
- CUarray dstArray;
- unsigned int dstPitch;
- unsigned int WidthInBytes;
- unsigned int Height;
- } CUDA_MEMCPY2D;
- * \endcode
- * where:
- * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the
- * source and destination, respectively; ::CUmemorytype_enum is defined as:
- *
- * \code
- typedef enum CUmemorytype_enum {
- CU_MEMORYTYPE_HOST = 0x01,
- CU_MEMORYTYPE_DEVICE = 0x02,
- CU_MEMORYTYPE_ARRAY = 0x03,
- CU_MEMORYTYPE_UNIFIED = 0x04
- } CUmemorytype;
- * \endcode
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch
- * specify the (host) base address of the source data and the bytes per row to
- * apply. ::srcArray is ignored.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch
- * specify the (unified virtual address space) base address of the source data
- * and the bytes per row to apply. ::srcArray is ignored.
- * This value may be used only if unified addressing is supported in the calling
- * context.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch
- * specify the (device) base address of the source data and the bytes per row
- * to apply. ::srcArray is ignored.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the
- * handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are
- * ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch
- * specify the (unified virtual address space) base address of the source data
- * and the bytes per row to apply. ::dstArray is ignored.
- * This value may be used only if unified addressing is supported in the calling
- * context.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch
- * specify the (host) base address of the destination data and the bytes per
- * row to apply. ::dstArray is ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch
- * specify the (device) base address of the destination data and the bytes per
- * row to apply. ::dstArray is ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the
- * handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are
- * ignored.
- *
- * - ::srcXInBytes and ::srcY specify the base address of the source data for
- * the copy.
- *
- * \par
- * For host pointers, the starting address is
- * \code
- void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes);
- * \endcode
- *
- * \par
- * For device pointers, the starting address is
- * \code
- CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes;
- * \endcode
- *
- * \par
- * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array
- * element size.
- *
- * - ::dstXInBytes and ::dstY specify the base address of the destination data
- * for the copy.
- *
- * \par
- * For host pointers, the base address is
- * \code
- void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes);
- * \endcode
- *
- * \par
- * For device pointers, the starting address is
- * \code
- CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes;
- * \endcode
- *
- * \par
- * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array
- * element size.
- *
- * - ::WidthInBytes and ::Height specify the width (in bytes) and height of
- * the 2D copy being performed.
- * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes +
- * ::srcXInBytes, and ::dstPitch must be greater than or equal to
- * ::WidthInBytes + dstXInBytes.
- * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes +
- * ::srcXInBytes, and ::dstPitch must be greater than or equal to
- * ::WidthInBytes + dstXInBytes.
- * - If specified, ::srcHeight must be greater than or equal to ::Height +
- * ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY.
- *
- * \par
- * ::cuMemcpy2DAsync() returns an error if any pitch is greater than the maximum
- * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back
- * pitches that always work with ::cuMemcpy2D(). On intra-device memory copies
- * (device to device, CUDA array to device, CUDA array to CUDA array),
- * ::cuMemcpy2DAsync() may fail for pitches not computed by ::cuMemAllocPitch().
- *
- * \param pCopy - Parameters for the memory copy
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- * \note_async
- * \note_null_stream
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemcpy2DAsync,
- * ::cudaMemcpy2DToArrayAsync,
- * ::cudaMemcpy2DFromArrayAsync
- */
-CUresult CUDAAPI cuMemcpy2DAsync(const CUDA_MEMCPY2D *pCopy, CUstream hStream);
-
-/**
- * \brief Copies memory for 3D arrays
- *
- * Perform a 3D memory copy according to the parameters specified in
- * \p pCopy. The ::CUDA_MEMCPY3D structure is defined as:
- *
- * \code
- typedef struct CUDA_MEMCPY3D_st {
-
- unsigned int srcXInBytes, srcY, srcZ;
- unsigned int srcLOD;
- CUmemorytype srcMemoryType;
- const void *srcHost;
- CUdeviceptr srcDevice;
- CUarray srcArray;
- unsigned int srcPitch; // ignored when src is array
- unsigned int srcHeight; // ignored when src is array; may be 0 if Depth==1
-
- unsigned int dstXInBytes, dstY, dstZ;
- unsigned int dstLOD;
- CUmemorytype dstMemoryType;
- void *dstHost;
- CUdeviceptr dstDevice;
- CUarray dstArray;
- unsigned int dstPitch; // ignored when dst is array
- unsigned int dstHeight; // ignored when dst is array; may be 0 if Depth==1
-
- unsigned int WidthInBytes;
- unsigned int Height;
- unsigned int Depth;
- } CUDA_MEMCPY3D;
- * \endcode
- * where:
- * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the
- * source and destination, respectively; ::CUmemorytype_enum is defined as:
- *
- * \code
- typedef enum CUmemorytype_enum {
- CU_MEMORYTYPE_HOST = 0x01,
- CU_MEMORYTYPE_DEVICE = 0x02,
- CU_MEMORYTYPE_ARRAY = 0x03,
- CU_MEMORYTYPE_UNIFIED = 0x04
- } CUmemorytype;
- * \endcode
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch
- * specify the (unified virtual address space) base address of the source data
- * and the bytes per row to apply. ::srcArray is ignored.
- * This value may be used only if unified addressing is supported in the calling
- * context.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost, ::srcPitch and
- * ::srcHeight specify the (host) base address of the source data, the bytes
- * per row, and the height of each 2D slice of the 3D array. ::srcArray is
- * ignored.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice, ::srcPitch and
- * ::srcHeight specify the (device) base address of the source data, the bytes
- * per row, and the height of each 2D slice of the 3D array. ::srcArray is
- * ignored.
- *
- * \par
- * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the
- * handle of the source data. ::srcHost, ::srcDevice, ::srcPitch and
- * ::srcHeight are ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch
- * specify the (unified virtual address space) base address of the source data
- * and the bytes per row to apply. ::dstArray is ignored.
- * This value may be used only if unified addressing is supported in the calling
- * context.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch
- * specify the (host) base address of the destination data, the bytes per row,
- * and the height of each 2D slice of the 3D array. ::dstArray is ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch
- * specify the (device) base address of the destination data, the bytes per
- * row, and the height of each 2D slice of the 3D array. ::dstArray is ignored.
- *
- * \par
- * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the
- * handle of the destination data. ::dstHost, ::dstDevice, ::dstPitch and
- * ::dstHeight are ignored.
- *
- * - ::srcXInBytes, ::srcY and ::srcZ specify the base address of the source
- * data for the copy.
- *
- * \par
- * For host pointers, the starting address is
- * \code
- void* Start = (void*)((char*)srcHost+(srcZ*srcHeight+srcY)*srcPitch + srcXInBytes);
- * \endcode
- *
- * \par
- * For device pointers, the starting address is
- * \code
- CUdeviceptr Start = srcDevice+(srcZ*srcHeight+srcY)*srcPitch+srcXInBytes;
- * \endcode
- *
- * \par
- * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array
- * element size.
- *
- * - dstXInBytes, ::dstY and ::dstZ specify the base address of the
- * destination data for the copy.
- *
- * \par
- * For host pointers, the base address is
- * \code
- void* dstStart = (void*)((char*)dstHost+(dstZ*dstHeight+dstY)*dstPitch + dstXInBytes);
- * \endcode
- *
- * \par
- * For device pointers, the starting address is
- * \code
- CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes;
- * \endcode
- *
- * \par
- * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array
- * element size.
- *
- * - ::WidthInBytes, ::Height and ::Depth specify the width (in bytes), height
- * and depth of the 3D copy being performed.
- * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes +
- * ::srcXInBytes, and ::dstPitch must be greater than or equal to
- * ::WidthInBytes + dstXInBytes.
- * - If specified, ::srcHeight must be greater than or equal to ::Height +
- * ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY.
- *
- * \par
- * ::cuMemcpy3DAsync() returns an error if any pitch is greater than the maximum
- * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH).
- *
- * The ::srcLOD and ::dstLOD members of the ::CUDA_MEMCPY3D structure must be
- * set to 0.
- *
- * \param pCopy - Parameters for the memory copy
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- * \note_async
- * \note_null_stream
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemcpy3DAsync
- */
-CUresult CUDAAPI cuMemcpy3DAsync(const CUDA_MEMCPY3D *pCopy, CUstream hStream);
-
-/**
- * \brief Copies memory between contexts asynchronously.
- *
- * Perform a 3D memory copy according to the parameters specified in
- * \p pCopy. See the definition of the ::CUDA_MEMCPY3D_PEER structure
- * for documentation of its parameters.
- *
- * \param pCopy - Parameters for the memory copy
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_async
- * \note_null_stream
- *
- * \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync,
- * ::cuMemcpy3DPeerAsync,
- * ::cudaMemcpy3DPeerAsync
- */
-CUresult CUDAAPI cuMemcpy3DPeerAsync(const CUDA_MEMCPY3D_PEER *pCopy, CUstream hStream);
-
-/**
- * \brief Initializes device memory
- *
- * Sets the memory range of \p N 8-bit values to the specified value
- * \p uc.
- *
- * \param dstDevice - Destination device pointer
- * \param uc - Value to set
- * \param N - Number of elements
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_memset
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemset
- */
-CUresult CUDAAPI cuMemsetD8(CUdeviceptr dstDevice, unsigned char uc, size_t N);
-
-/**
- * \brief Initializes device memory
- *
- * Sets the memory range of \p N 16-bit values to the specified value
- * \p us. The \p dstDevice pointer must be two byte aligned.
- *
- * \param dstDevice - Destination device pointer
- * \param us - Value to set
- * \param N - Number of elements
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_memset
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemset
- */
-CUresult CUDAAPI cuMemsetD16(CUdeviceptr dstDevice, unsigned short us, size_t N);
-
-/**
- * \brief Initializes device memory
- *
- * Sets the memory range of \p N 32-bit values to the specified value
- * \p ui. The \p dstDevice pointer must be four byte aligned.
- *
- * \param dstDevice - Destination device pointer
- * \param ui - Value to set
- * \param N - Number of elements
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_memset
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32Async,
- * ::cudaMemset
- */
-CUresult CUDAAPI cuMemsetD32(CUdeviceptr dstDevice, unsigned int ui, size_t N);
-
-/**
- * \brief Initializes device memory
- *
- * Sets the 2D memory range of \p Width 8-bit values to the specified value
- * \p uc. \p Height specifies the number of rows to set, and \p dstPitch
- * specifies the number of bytes between each row. This function performs
- * fastest when the pitch is one that has been passed back by
- * ::cuMemAllocPitch().
- *
- * \param dstDevice - Destination device pointer
- * \param dstPitch - Pitch of destination device pointer
- * \param uc - Value to set
- * \param Width - Width of row
- * \param Height - Number of rows
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_memset
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemset2D
- */
-CUresult CUDAAPI cuMemsetD2D8(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height);
-
-/**
- * \brief Initializes device memory
- *
- * Sets the 2D memory range of \p Width 16-bit values to the specified value
- * \p us. \p Height specifies the number of rows to set, and \p dstPitch
- * specifies the number of bytes between each row. The \p dstDevice pointer
- * and \p dstPitch offset must be two byte aligned. This function performs
- * fastest when the pitch is one that has been passed back by
- * ::cuMemAllocPitch().
- *
- * \param dstDevice - Destination device pointer
- * \param dstPitch - Pitch of destination device pointer
- * \param us - Value to set
- * \param Width - Width of row
- * \param Height - Number of rows
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_memset
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemset2D
- */
-CUresult CUDAAPI cuMemsetD2D16(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height);
-
-/**
- * \brief Initializes device memory
- *
- * Sets the 2D memory range of \p Width 32-bit values to the specified value
- * \p ui. \p Height specifies the number of rows to set, and \p dstPitch
- * specifies the number of bytes between each row. The \p dstDevice pointer
- * and \p dstPitch offset must be four byte aligned. This function performs
- * fastest when the pitch is one that has been passed back by
- * ::cuMemAllocPitch().
- *
- * \param dstDevice - Destination device pointer
- * \param dstPitch - Pitch of destination device pointer
- * \param ui - Value to set
- * \param Width - Width of row
- * \param Height - Number of rows
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_memset
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemset2D
- */
-CUresult CUDAAPI cuMemsetD2D32(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height);
-
-/**
- * \brief Sets device memory
- *
- * Sets the memory range of \p N 8-bit values to the specified value
- * \p uc.
- *
- * \param dstDevice - Destination device pointer
- * \param uc - Value to set
- * \param N - Number of elements
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_memset
- * \note_null_stream
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemsetAsync
- */
-CUresult CUDAAPI cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc, size_t N, CUstream hStream);
-
-/**
- * \brief Sets device memory
- *
- * Sets the memory range of \p N 16-bit values to the specified value
- * \p us. The \p dstDevice pointer must be two byte aligned.
- *
- * \param dstDevice - Destination device pointer
- * \param us - Value to set
- * \param N - Number of elements
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_memset
- * \note_null_stream
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemsetAsync
- */
-CUresult CUDAAPI cuMemsetD16Async(CUdeviceptr dstDevice, unsigned short us, size_t N, CUstream hStream);
-
-/**
- * \brief Sets device memory
- *
- * Sets the memory range of \p N 32-bit values to the specified value
- * \p ui. The \p dstDevice pointer must be four byte aligned.
- *
- * \param dstDevice - Destination device pointer
- * \param ui - Value to set
- * \param N - Number of elements
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_memset
- * \note_null_stream
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, ::cuMemsetD32,
- * ::cudaMemsetAsync
- */
-CUresult CUDAAPI cuMemsetD32Async(CUdeviceptr dstDevice, unsigned int ui, size_t N, CUstream hStream);
-
-/**
- * \brief Sets device memory
- *
- * Sets the 2D memory range of \p Width 8-bit values to the specified value
- * \p uc. \p Height specifies the number of rows to set, and \p dstPitch
- * specifies the number of bytes between each row. This function performs
- * fastest when the pitch is one that has been passed back by
- * ::cuMemAllocPitch().
- *
- * \param dstDevice - Destination device pointer
- * \param dstPitch - Pitch of destination device pointer
- * \param uc - Value to set
- * \param Width - Width of row
- * \param Height - Number of rows
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_memset
- * \note_null_stream
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemset2DAsync
- */
-CUresult CUDAAPI cuMemsetD2D8Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream);
-
-/**
- * \brief Sets device memory
- *
- * Sets the 2D memory range of \p Width 16-bit values to the specified value
- * \p us. \p Height specifies the number of rows to set, and \p dstPitch
- * specifies the number of bytes between each row. The \p dstDevice pointer
- * and \p dstPitch offset must be two byte aligned. This function performs
- * fastest when the pitch is one that has been passed back by
- * ::cuMemAllocPitch().
- *
- * \param dstDevice - Destination device pointer
- * \param dstPitch - Pitch of destination device pointer
- * \param us - Value to set
- * \param Width - Width of row
- * \param Height - Number of rows
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_memset
- * \note_null_stream
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D32, ::cuMemsetD2D32Async,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemset2DAsync
- */
-CUresult CUDAAPI cuMemsetD2D16Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream);
-
-/**
- * \brief Sets device memory
- *
- * Sets the 2D memory range of \p Width 32-bit values to the specified value
- * \p ui. \p Height specifies the number of rows to set, and \p dstPitch
- * specifies the number of bytes between each row. The \p dstDevice pointer
- * and \p dstPitch offset must be four byte aligned. This function performs
- * fastest when the pitch is one that has been passed back by
- * ::cuMemAllocPitch().
- *
- * \param dstDevice - Destination device pointer
- * \param dstPitch - Pitch of destination device pointer
- * \param ui - Value to set
- * \param Width - Width of row
- * \param Height - Number of rows
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- * \note_memset
- * \note_null_stream
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,
- * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32,
- * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,
- * ::cuMemsetD32, ::cuMemsetD32Async,
- * ::cudaMemset2DAsync
- */
-CUresult CUDAAPI cuMemsetD2D32Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream);
-
-/**
- * \brief Creates a 1D or 2D CUDA array
- *
- * Creates a CUDA array according to the ::CUDA_ARRAY_DESCRIPTOR structure
- * \p pAllocateArray and returns a handle to the new CUDA array in \p *pHandle.
- * The ::CUDA_ARRAY_DESCRIPTOR is defined as:
- *
- * \code
- typedef struct {
- unsigned int Width;
- unsigned int Height;
- CUarray_format Format;
- unsigned int NumChannels;
- } CUDA_ARRAY_DESCRIPTOR;
- * \endcode
- * where:
- *
- * - \p Width, and \p Height are the width, and height of the CUDA array (in
- * elements); the CUDA array is one-dimensional if height is 0, two-dimensional
- * otherwise;
- * - ::Format specifies the format of the elements; ::CUarray_format is
- * defined as:
- * \code
- typedef enum CUarray_format_enum {
- CU_AD_FORMAT_UNSIGNED_INT8 = 0x01,
- CU_AD_FORMAT_UNSIGNED_INT16 = 0x02,
- CU_AD_FORMAT_UNSIGNED_INT32 = 0x03,
- CU_AD_FORMAT_SIGNED_INT8 = 0x08,
- CU_AD_FORMAT_SIGNED_INT16 = 0x09,
- CU_AD_FORMAT_SIGNED_INT32 = 0x0a,
- CU_AD_FORMAT_HALF = 0x10,
- CU_AD_FORMAT_FLOAT = 0x20
- } CUarray_format;
- * \endcode
- * - \p NumChannels specifies the number of packed components per CUDA array
- * element; it may be 1, 2, or 4;
- *
- * Here are examples of CUDA array descriptions:
- *
- * Description for a CUDA array of 2048 floats:
- * \code
- CUDA_ARRAY_DESCRIPTOR desc;
- desc.Format = CU_AD_FORMAT_FLOAT;
- desc.NumChannels = 1;
- desc.Width = 2048;
- desc.Height = 1;
- * \endcode
- *
- * Description for a 64 x 64 CUDA array of floats:
- * \code
- CUDA_ARRAY_DESCRIPTOR desc;
- desc.Format = CU_AD_FORMAT_FLOAT;
- desc.NumChannels = 1;
- desc.Width = 64;
- desc.Height = 64;
- * \endcode
- *
- * Description for a \p width x \p height CUDA array of 64-bit, 4x16-bit
- * float16's:
- * \code
- CUDA_ARRAY_DESCRIPTOR desc;
- desc.FormatFlags = CU_AD_FORMAT_HALF;
- desc.NumChannels = 4;
- desc.Width = width;
- desc.Height = height;
- * \endcode
- *
- * Description for a \p width x \p height CUDA array of 16-bit elements, each
- * of which is two 8-bit unsigned chars:
- * \code
- CUDA_ARRAY_DESCRIPTOR arrayDesc;
- desc.FormatFlags = CU_AD_FORMAT_UNSIGNED_INT8;
- desc.NumChannels = 2;
- desc.Width = width;
- desc.Height = height;
- * \endcode
- *
- * \param pHandle - Returned array
- * \param pAllocateArray - Array descriptor
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_UNKNOWN
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMallocArray
- */
-CUresult CUDAAPI cuArrayCreate(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR *pAllocateArray);
-
-/**
- * \brief Get a 1D or 2D CUDA array descriptor
- *
- * Returns in \p *pArrayDescriptor a descriptor containing information on the
- * format and dimensions of the CUDA array \p hArray. It is useful for
- * subroutines that have been passed a CUDA array, but need to know the CUDA
- * array parameters for validation or other purposes.
- *
- * \param pArrayDescriptor - Returned array descriptor
- * \param hArray - Array to get descriptor of
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaArrayGetInfo
- */
-CUresult CUDAAPI cuArrayGetDescriptor(CUDA_ARRAY_DESCRIPTOR *pArrayDescriptor, CUarray hArray);
-
-/**
- * \brief Destroys a CUDA array
- *
- * Destroys the CUDA array \p hArray.
- *
- * \param hArray - Array to destroy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_ARRAY_IS_MAPPED,
- * ::CUDA_ERROR_CONTEXT_IS_DESTROYED
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaFreeArray
- */
-CUresult CUDAAPI cuArrayDestroy(CUarray hArray);
-
-/**
- * \brief Creates a 3D CUDA array
- *
- * Creates a CUDA array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure
- * \p pAllocateArray and returns a handle to the new CUDA array in \p *pHandle.
- * The ::CUDA_ARRAY3D_DESCRIPTOR is defined as:
- *
- * \code
- typedef struct {
- unsigned int Width;
- unsigned int Height;
- unsigned int Depth;
- CUarray_format Format;
- unsigned int NumChannels;
- unsigned int Flags;
- } CUDA_ARRAY3D_DESCRIPTOR;
- * \endcode
- * where:
- *
- * - \p Width, \p Height, and \p Depth are the width, height, and depth of the
- * CUDA array (in elements); the following types of CUDA arrays can be allocated:
- * - A 1D array is allocated if \p Height and \p Depth extents are both zero.
- * - A 2D array is allocated if only \p Depth extent is zero.
- * - A 3D array is allocated if all three extents are non-zero.
- * - A 1D layered CUDA array is allocated if only \p Height is zero and the
- * ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 1D array. The number
- * of layers is determined by the depth extent.
- * - A 2D layered CUDA array is allocated if all three extents are non-zero and
- * the ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 2D array. The number
- * of layers is determined by the depth extent.
- * - A cubemap CUDA array is allocated if all three extents are non-zero and the
- * ::CUDA_ARRAY3D_CUBEMAP flag is set. \p Width must be equal to \p Height, and
- * \p Depth must be six. A cubemap is a special type of 2D layered CUDA array,
- * where the six layers represent the six faces of a cube. The order of the six
- * layers in memory is the same as that listed in ::CUarray_cubemap_face.
- * - A cubemap layered CUDA array is allocated if all three extents are non-zero,
- * and both, ::CUDA_ARRAY3D_CUBEMAP and ::CUDA_ARRAY3D_LAYERED flags are set.
- * \p Width must be equal to \p Height, and \p Depth must be a multiple of six.
- * A cubemap layered CUDA array is a special type of 2D layered CUDA array that
- * consists of a collection of cubemaps. The first six layers represent the first
- * cubemap, the next six layers form the second cubemap, and so on.
- *
- * - ::Format specifies the format of the elements; ::CUarray_format is
- * defined as:
- * \code
- typedef enum CUarray_format_enum {
- CU_AD_FORMAT_UNSIGNED_INT8 = 0x01,
- CU_AD_FORMAT_UNSIGNED_INT16 = 0x02,
- CU_AD_FORMAT_UNSIGNED_INT32 = 0x03,
- CU_AD_FORMAT_SIGNED_INT8 = 0x08,
- CU_AD_FORMAT_SIGNED_INT16 = 0x09,
- CU_AD_FORMAT_SIGNED_INT32 = 0x0a,
- CU_AD_FORMAT_HALF = 0x10,
- CU_AD_FORMAT_FLOAT = 0x20
- } CUarray_format;
- * \endcode
- *
- * - \p NumChannels specifies the number of packed components per CUDA array
- * element; it may be 1, 2, or 4;
- *
- * - ::Flags may be set to
- * - ::CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA arrays. If this flag is set,
- * \p Depth specifies the number of layers, not the depth of a 3D array.
- * - ::CUDA_ARRAY3D_SURFACE_LDST to enable surface references to be bound to the CUDA array.
- * If this flag is not set, ::cuSurfRefSetArray will fail when attempting to bind the CUDA array
- * to a surface reference.
- * - ::CUDA_ARRAY3D_CUBEMAP to enable creation of cubemaps. If this flag is set, \p Width must be
- * equal to \p Height, and \p Depth must be six. If the ::CUDA_ARRAY3D_LAYERED flag is also set,
- * then \p Depth must be a multiple of six.
- * - ::CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA array will be used for texture gather.
- * Texture gather can only be performed on 2D CUDA arrays.
- *
- * \p Width, \p Height and \p Depth must meet certain size requirements as listed in the following table.
- * All values are specified in elements. Note that for brevity's sake, the full name of the device attribute
- * is not specified. For ex., TEXTURE1D_WIDTH refers to the device attribute
- * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH.
- *
- * Note that 2D CUDA arrays have different size requirements if the ::CUDA_ARRAY3D_TEXTURE_GATHER flag
- * is set. \p Width and \p Height must not be greater than ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH
- * and ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT respectively, in that case.
- *
- * <table>
- * <tr><td><b>CUDA array type</b></td>
- * <td><b>Valid extents that must always be met<br>{(width range in elements), (height range),
- * (depth range)}</b></td>
- * <td><b>Valid extents with CUDA_ARRAY3D_SURFACE_LDST set<br>
- * {(width range in elements), (height range), (depth range)}</b></td></tr>
- * <tr><td>1D</td>
- * <td><small>{ (1,TEXTURE1D_WIDTH), 0, 0 }</small></td>
- * <td><small>{ (1,SURFACE1D_WIDTH), 0, 0 }</small></td></tr>
- * <tr><td>2D</td>
- * <td><small>{ (1,TEXTURE2D_WIDTH), (1,TEXTURE2D_HEIGHT), 0 }</small></td>
- * <td><small>{ (1,SURFACE2D_WIDTH), (1,SURFACE2D_HEIGHT), 0 }</small></td></tr>
- * <tr><td>3D</td>
- * <td><small>{ (1,TEXTURE3D_WIDTH), (1,TEXTURE3D_HEIGHT), (1,TEXTURE3D_DEPTH) }
- * <br>OR<br>{ (1,TEXTURE3D_WIDTH_ALTERNATE), (1,TEXTURE3D_HEIGHT_ALTERNATE),
- * (1,TEXTURE3D_DEPTH_ALTERNATE) }</small></td>
- * <td><small>{ (1,SURFACE3D_WIDTH), (1,SURFACE3D_HEIGHT),
- * (1,SURFACE3D_DEPTH) }</small></td></tr>
- * <tr><td>1D Layered</td>
- * <td><small>{ (1,TEXTURE1D_LAYERED_WIDTH), 0,
- * (1,TEXTURE1D_LAYERED_LAYERS) }</small></td>
- * <td><small>{ (1,SURFACE1D_LAYERED_WIDTH), 0,
- * (1,SURFACE1D_LAYERED_LAYERS) }</small></td></tr>
- * <tr><td>2D Layered</td>
- * <td><small>{ (1,TEXTURE2D_LAYERED_WIDTH), (1,TEXTURE2D_LAYERED_HEIGHT),
- * (1,TEXTURE2D_LAYERED_LAYERS) }</small></td>
- * <td><small>{ (1,SURFACE2D_LAYERED_WIDTH), (1,SURFACE2D_LAYERED_HEIGHT),
- * (1,SURFACE2D_LAYERED_LAYERS) }</small></td></tr>
- * <tr><td>Cubemap</td>
- * <td><small>{ (1,TEXTURECUBEMAP_WIDTH), (1,TEXTURECUBEMAP_WIDTH), 6 }</small></td>
- * <td><small>{ (1,SURFACECUBEMAP_WIDTH),
- * (1,SURFACECUBEMAP_WIDTH), 6 }</small></td></tr>
- * <tr><td>Cubemap Layered</td>
- * <td><small>{ (1,TEXTURECUBEMAP_LAYERED_WIDTH), (1,TEXTURECUBEMAP_LAYERED_WIDTH),
- * (1,TEXTURECUBEMAP_LAYERED_LAYERS) }</small></td>
- * <td><small>{ (1,SURFACECUBEMAP_LAYERED_WIDTH), (1,SURFACECUBEMAP_LAYERED_WIDTH),
- * (1,SURFACECUBEMAP_LAYERED_LAYERS) }</small></td></tr>
- * </table>
- *
- * Here are examples of CUDA array descriptions:
- *
- * Description for a CUDA array of 2048 floats:
- * \code
- CUDA_ARRAY3D_DESCRIPTOR desc;
- desc.Format = CU_AD_FORMAT_FLOAT;
- desc.NumChannels = 1;
- desc.Width = 2048;
- desc.Height = 0;
- desc.Depth = 0;
- * \endcode
- *
- * Description for a 64 x 64 CUDA array of floats:
- * \code
- CUDA_ARRAY3D_DESCRIPTOR desc;
- desc.Format = CU_AD_FORMAT_FLOAT;
- desc.NumChannels = 1;
- desc.Width = 64;
- desc.Height = 64;
- desc.Depth = 0;
- * \endcode
- *
- * Description for a \p width x \p height x \p depth CUDA array of 64-bit,
- * 4x16-bit float16's:
- * \code
- CUDA_ARRAY3D_DESCRIPTOR desc;
- desc.FormatFlags = CU_AD_FORMAT_HALF;
- desc.NumChannels = 4;
- desc.Width = width;
- desc.Height = height;
- desc.Depth = depth;
- * \endcode
- *
- * \param pHandle - Returned array
- * \param pAllocateArray - 3D array descriptor
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_UNKNOWN
- * \notefnerr
- *
- * \sa ::cuArray3DGetDescriptor, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaMalloc3DArray
- */
-CUresult CUDAAPI cuArray3DCreate(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pAllocateArray);
-
-/**
- * \brief Get a 3D CUDA array descriptor
- *
- * Returns in \p *pArrayDescriptor a descriptor containing information on the
- * format and dimensions of the CUDA array \p hArray. It is useful for
- * subroutines that have been passed a CUDA array, but need to know the CUDA
- * array parameters for validation or other purposes.
- *
- * This function may be called on 1D and 2D arrays, in which case the \p Height
- * and/or \p Depth members of the descriptor struct will be set to 0.
- *
- * \param pArrayDescriptor - Returned 3D array descriptor
- * \param hArray - 3D array to get descriptor of
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_CONTEXT_IS_DESTROYED
- * \notefnerr
- *
- * \sa ::cuArray3DCreate, ::cuArrayCreate,
- * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,
- * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,
- * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,
- * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,
- * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,
- * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,
- * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,
- * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,
- * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,
- * ::cudaArrayGetInfo
- */
-CUresult CUDAAPI cuArray3DGetDescriptor(CUDA_ARRAY3D_DESCRIPTOR *pArrayDescriptor, CUarray hArray);
-
-/**
- * \brief Creates a CUDA mipmapped array
- *
- * Creates a CUDA mipmapped array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure
- * \p pMipmappedArrayDesc and returns a handle to the new CUDA mipmapped array in \p *pHandle.
- * \p numMipmapLevels specifies the number of mipmap levels to be allocated. This value is
- * clamped to the range [1, 1 + floor(log2(max(width, height, depth)))].
- *
- * The ::CUDA_ARRAY3D_DESCRIPTOR is defined as:
- *
- * \code
- typedef struct {
- unsigned int Width;
- unsigned int Height;
- unsigned int Depth;
- CUarray_format Format;
- unsigned int NumChannels;
- unsigned int Flags;
- } CUDA_ARRAY3D_DESCRIPTOR;
- * \endcode
- * where:
- *
- * - \p Width, \p Height, and \p Depth are the width, height, and depth of the
- * CUDA array (in elements); the following types of CUDA arrays can be allocated:
- * - A 1D mipmapped array is allocated if \p Height and \p Depth extents are both zero.
- * - A 2D mipmapped array is allocated if only \p Depth extent is zero.
- * - A 3D mipmapped array is allocated if all three extents are non-zero.
- * - A 1D layered CUDA mipmapped array is allocated if only \p Height is zero and the
- * ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 1D array. The number
- * of layers is determined by the depth extent.
- * - A 2D layered CUDA mipmapped array is allocated if all three extents are non-zero and
- * the ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 2D array. The number
- * of layers is determined by the depth extent.
- * - A cubemap CUDA mipmapped array is allocated if all three extents are non-zero and the
- * ::CUDA_ARRAY3D_CUBEMAP flag is set. \p Width must be equal to \p Height, and
- * \p Depth must be six. A cubemap is a special type of 2D layered CUDA array,
- * where the six layers represent the six faces of a cube. The order of the six
- * layers in memory is the same as that listed in ::CUarray_cubemap_face.
- * - A cubemap layered CUDA mipmapped array is allocated if all three extents are non-zero,
- * and both, ::CUDA_ARRAY3D_CUBEMAP and ::CUDA_ARRAY3D_LAYERED flags are set.
- * \p Width must be equal to \p Height, and \p Depth must be a multiple of six.
- * A cubemap layered CUDA array is a special type of 2D layered CUDA array that
- * consists of a collection of cubemaps. The first six layers represent the first
- * cubemap, the next six layers form the second cubemap, and so on.
- *
- * - ::Format specifies the format of the elements; ::CUarray_format is
- * defined as:
- * \code
- typedef enum CUarray_format_enum {
- CU_AD_FORMAT_UNSIGNED_INT8 = 0x01,
- CU_AD_FORMAT_UNSIGNED_INT16 = 0x02,
- CU_AD_FORMAT_UNSIGNED_INT32 = 0x03,
- CU_AD_FORMAT_SIGNED_INT8 = 0x08,
- CU_AD_FORMAT_SIGNED_INT16 = 0x09,
- CU_AD_FORMAT_SIGNED_INT32 = 0x0a,
- CU_AD_FORMAT_HALF = 0x10,
- CU_AD_FORMAT_FLOAT = 0x20
- } CUarray_format;
- * \endcode
- *
- * - \p NumChannels specifies the number of packed components per CUDA array
- * element; it may be 1, 2, or 4;
- *
- * - ::Flags may be set to
- * - ::CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA mipmapped arrays. If this flag is set,
- * \p Depth specifies the number of layers, not the depth of a 3D array.
- * - ::CUDA_ARRAY3D_SURFACE_LDST to enable surface references to be bound to individual mipmap levels of
- * the CUDA mipmapped array. If this flag is not set, ::cuSurfRefSetArray will fail when attempting to
- * bind a mipmap level of the CUDA mipmapped array to a surface reference.
- * - ::CUDA_ARRAY3D_CUBEMAP to enable creation of mipmapped cubemaps. If this flag is set, \p Width must be
- * equal to \p Height, and \p Depth must be six. If the ::CUDA_ARRAY3D_LAYERED flag is also set,
- * then \p Depth must be a multiple of six.
- * - ::CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA mipmapped array will be used for texture gather.
- * Texture gather can only be performed on 2D CUDA mipmapped arrays.
- *
- * \p Width, \p Height and \p Depth must meet certain size requirements as listed in the following table.
- * All values are specified in elements. Note that for brevity's sake, the full name of the device attribute
- * is not specified. For ex., TEXTURE1D_MIPMAPPED_WIDTH refers to the device attribute
- * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH.
- *
- * <table>
- * <tr><td><b>CUDA array type</b></td>
- * <td><b>Valid extents that must always be met<br>{(width range in elements), (height range),
- * (depth range)}</b></td>
- * <td><b>Valid extents with CUDA_ARRAY3D_SURFACE_LDST set<br>
- * {(width range in elements), (height range), (depth range)}</b></td></tr>
- * <tr><td>1D</td>
- * <td><small>{ (1,TEXTURE1D_MIPMAPPED_WIDTH), 0, 0 }</small></td>
- * <td><small>{ (1,SURFACE1D_WIDTH), 0, 0 }</small></td></tr>
- * <tr><td>2D</td>
- * <td><small>{ (1,TEXTURE2D_MIPMAPPED_WIDTH), (1,TEXTURE2D_MIPMAPPED_HEIGHT), 0 }</small></td>
- * <td><small>{ (1,SURFACE2D_WIDTH), (1,SURFACE2D_HEIGHT), 0 }</small></td></tr>
- * <tr><td>3D</td>
- * <td><small>{ (1,TEXTURE3D_WIDTH), (1,TEXTURE3D_HEIGHT), (1,TEXTURE3D_DEPTH) }
- * <br>OR<br>{ (1,TEXTURE3D_WIDTH_ALTERNATE), (1,TEXTURE3D_HEIGHT_ALTERNATE),
- * (1,TEXTURE3D_DEPTH_ALTERNATE) }</small></td>
- * <td><small>{ (1,SURFACE3D_WIDTH), (1,SURFACE3D_HEIGHT),
- * (1,SURFACE3D_DEPTH) }</small></td></tr>
- * <tr><td>1D Layered</td>
- * <td><small>{ (1,TEXTURE1D_LAYERED_WIDTH), 0,
- * (1,TEXTURE1D_LAYERED_LAYERS) }</small></td>
- * <td><small>{ (1,SURFACE1D_LAYERED_WIDTH), 0,
- * (1,SURFACE1D_LAYERED_LAYERS) }</small></td></tr>
- * <tr><td>2D Layered</td>
- * <td><small>{ (1,TEXTURE2D_LAYERED_WIDTH), (1,TEXTURE2D_LAYERED_HEIGHT),
- * (1,TEXTURE2D_LAYERED_LAYERS) }</small></td>
- * <td><small>{ (1,SURFACE2D_LAYERED_WIDTH), (1,SURFACE2D_LAYERED_HEIGHT),
- * (1,SURFACE2D_LAYERED_LAYERS) }</small></td></tr>
- * <tr><td>Cubemap</td>
- * <td><small>{ (1,TEXTURECUBEMAP_WIDTH), (1,TEXTURECUBEMAP_WIDTH), 6 }</small></td>
- * <td><small>{ (1,SURFACECUBEMAP_WIDTH),
- * (1,SURFACECUBEMAP_WIDTH), 6 }</small></td></tr>
- * <tr><td>Cubemap Layered</td>
- * <td><small>{ (1,TEXTURECUBEMAP_LAYERED_WIDTH), (1,TEXTURECUBEMAP_LAYERED_WIDTH),
- * (1,TEXTURECUBEMAP_LAYERED_LAYERS) }</small></td>
- * <td><small>{ (1,SURFACECUBEMAP_LAYERED_WIDTH), (1,SURFACECUBEMAP_LAYERED_WIDTH),
- * (1,SURFACECUBEMAP_LAYERED_LAYERS) }</small></td></tr>
- * </table>
- *
- *
- * \param pHandle - Returned mipmapped array
- * \param pMipmappedArrayDesc - mipmapped array descriptor
- * \param numMipmapLevels - Number of mipmap levels
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY,
- * ::CUDA_ERROR_UNKNOWN
- * \notefnerr
- *
- * \sa
- * ::cuMipmappedArrayDestroy,
- * ::cuMipmappedArrayGetLevel,
- * ::cuArrayCreate,
- * ::cudaMallocMipmappedArray
- */
-CUresult CUDAAPI cuMipmappedArrayCreate(CUmipmappedArray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pMipmappedArrayDesc, unsigned int numMipmapLevels);
-
-/**
- * \brief Gets a mipmap level of a CUDA mipmapped array
- *
- * Returns in \p *pLevelArray a CUDA array that represents a single mipmap level
- * of the CUDA mipmapped array \p hMipmappedArray.
- *
- * If \p level is greater than the maximum number of levels in this mipmapped array,
- * ::CUDA_ERROR_INVALID_VALUE is returned.
- *
- * \param pLevelArray - Returned mipmap level CUDA array
- * \param hMipmappedArray - CUDA mipmapped array
- * \param level - Mipmap level
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa
- * ::cuMipmappedArrayCreate,
- * ::cuMipmappedArrayDestroy,
- * ::cuArrayCreate,
- * ::cudaGetMipmappedArrayLevel
- */
-CUresult CUDAAPI cuMipmappedArrayGetLevel(CUarray *pLevelArray, CUmipmappedArray hMipmappedArray, unsigned int level);
-
-/**
- * \brief Destroys a CUDA mipmapped array
- *
- * Destroys the CUDA mipmapped array \p hMipmappedArray.
- *
- * \param hMipmappedArray - Mipmapped array to destroy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_ARRAY_IS_MAPPED,
- * ::CUDA_ERROR_CONTEXT_IS_DESTROYED
- * \notefnerr
- *
- * \sa
- * ::cuMipmappedArrayCreate,
- * ::cuMipmappedArrayGetLevel,
- * ::cuArrayCreate,
- * ::cudaFreeMipmappedArray
- */
-CUresult CUDAAPI cuMipmappedArrayDestroy(CUmipmappedArray hMipmappedArray);
-
-/** @} */ /* END CUDA_MEM */
-
-/**
- * \defgroup CUDA_VA Virtual Memory Management
- *
- * ___MANBRIEF___ virtual memory management functions of the low-level CUDA driver API
- * (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the virtual memory management functions of the low-level CUDA
- * driver application programming interface.
- *
- * @{
- */
-
-/**
-* \brief Allocate an address range reservation.
-*
-* Reserves a virtual address range based on the given parameters, giving
-* the starting address of the range in \p ptr. This API requires a system that
-* supports UVA. The size and address parameters must be a multiple of the
-* host page size and the alignment must be a power of two or zero for default
-* alignment.
-*
-* \param[out] ptr - Resulting pointer to start of virtual address range allocated
-* \param[in] size - Size of the reserved virtual address range requested
-* \param[in] alignment - Alignment of the reserved virtual address range requested
-* \param[in] addr - Fixed starting address range requested
-* \param[in] flags - Currently unused, must be zero
-* \return
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_OUT_OF_MEMORY,
-* ::CUDA_ERROR_NOT_INITIALIZED,
-* ::CUDA_ERROR_DEINITIALIZED,
-* ::CUDA_ERROR_NOT_PERMITTED,
-* ::CUDA_ERROR_NOT_SUPPORTED
-*
-* \sa ::cuMemAddressFree
-*/
-CUresult CUDAAPI cuMemAddressReserve(CUdeviceptr *ptr, size_t size, size_t alignment, CUdeviceptr addr, unsigned long long flags);
-
-/**
-* \brief Free an address range reservation.
-*
-* Frees a virtual address range reserved by cuMemAddressReserve. The size
-* must match what was given to memAddressReserve and the ptr given must
-* match what was returned from memAddressReserve.
-*
-* \param[in] ptr - Starting address of the virtual address range to free
-* \param[in] size - Size of the virtual address region to free
-* \return
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_NOT_INITIALIZED,
-* ::CUDA_ERROR_DEINITIALIZED,
-* ::CUDA_ERROR_NOT_PERMITTED,
-* ::CUDA_ERROR_NOT_SUPPORTED
-*
-* \sa ::cuMemAddressReserve
-*/
-CUresult CUDAAPI cuMemAddressFree(CUdeviceptr ptr, size_t size);
-
-/**
-* \brief Create a shareable memory handle representing a memory allocation of a given size described by the given properties
-*
-* This creates a memory allocation on the target device specified through the
-* \p prop strcuture. The created allocation will not have any device or host
-* mappings. The generic memory \p handle for the allocation can be
-* mapped to the address space of calling process via ::cuMemMap. This handle
-* cannot be transmitted directly to other processes (see
-* ::cuMemExportToShareableHandle). On Windows, the caller must also pass
-* an LPSECURITYATTRIBUTE in \p prop to be associated with this handle which
-* limits or allows access to this handle for a recepient process (see
-* ::CUmemAllocationProp::win32HandleMetaData for more). The \p size of this
-* allocation must be a multiple of the the value given via
-* ::cuMemGetAllocationGranularity with the ::CU_MEM_ALLOC_GRANULARITY_MINIMUM
-* flag.
-*
-* \param[out] handle - Value of handle returned. All operations on this allocation are to be performed using this handle.
-* \param[in] size - Size of the allocation requested
-* \param[in] prop - Properties of the allocation to create.
-* \param[in] flags - flags for future use, must be zero now.
-* \return
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_OUT_OF_MEMORY,
-* ::CUDA_ERROR_INVALID_DEVICE,
-* ::CUDA_ERROR_NOT_INITIALIZED,
-* ::CUDA_ERROR_DEINITIALIZED,
-* ::CUDA_ERROR_NOT_PERMITTED,
-* ::CUDA_ERROR_NOT_SUPPORTED
-* \notefnerr
-*
-* \sa ::cuMemRelease, ::cuMemExportToShareableHandle, ::cuMemImportFromShareableHandle
-*/
-CUresult CUDAAPI cuMemCreate(CUmemGenericAllocationHandle *handle, size_t size, const CUmemAllocationProp *prop, unsigned long long flags);
-
-/**
-* \brief Release a memory handle representing a memory allocation which was previously allocated through cuMemCreate.
-*
-* Frees the memory that was allocated on a device through cuMemCreate.
-*
-* The memory allocation will be freed when all outstanding mappings to the memory
-* are unmapped and when all outstanding references to the handle (including it's
-* shareable counterparts) are also released. The generic memory handle can be
-* freed when there are still outstanding mappings made with this handle. Each
-* time a recepient process imports a shareable handle, it needs to pair it with
-* ::cuMemRelease for the handle to be freed. If \p handle is not a valid handle
-* the behavior is undefined.
-*
-* \param[in] handle Value of handle which was returned previously by cuMemCreate.
-* \return
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_NOT_INITIALIZED,
-* ::CUDA_ERROR_DEINITIALIZED,
-* ::CUDA_ERROR_NOT_PERMITTED,
-* ::CUDA_ERROR_NOT_SUPPORTED
-* \notefnerr
-*
-* \sa ::cuMemCreate
-*/
-CUresult CUDAAPI cuMemRelease(CUmemGenericAllocationHandle handle);
-
-/**
-* \brief Maps an allocation handle to a reserved virtual address range.
-*
-* Maps bytes of memory represented by \p handle starting from byte \p offset to
-* \p size to address range [\p addr, \p addr + \p size]. This range must be an
-* address reservation previously reserved with ::cuMemAddressReserve, and
-* \p offset + \p size must be less than the size of the memory allocation.
-* Both \p ptr, \p size, and \p offset must be a multiple of the value given via
-* ::cuMemGetAllocationGranularity with the ::CU_MEM_ALLOC_GRANULARITY_MINIMUM flag.
-*
-* Please note calling ::cuMemMap does not make the address accessible,
-* the caller needs to update accessibility of a contiguous mapped VA
-* range by calling ::cuMemSetAccess.
-*
-* Once a recipient process obtains a shareable memory handle
-* from ::cuMemImportFromShareableHandle, the process must
-* use ::cuMemMap to map the memory into its address ranges before
-* setting accessibility with ::cuMemSetAccess.
-*
-* ::cuMemMap can only create mappings on VA range reservations
-* that are not currently mapped.
-*
-* \param[in] ptr - Address where memory will be mapped.
-* \param[in] size - Size of the memory mapping.
-* \param[in] offset - Offset into the memory represented by
-* - \p handle from which to start mapping
-* - Note: currently must be zero.
-* \param[in] handle - Handle to a shareable memory
-* \param[in] flags - flags for future use, must be zero now.
-* \return
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_INVALID_DEVICE,
-* ::CUDA_ERROR_OUT_OF_MEMORY,
-* ::CUDA_ERROR_NOT_INITIALIZED,
-* ::CUDA_ERROR_DEINITIALIZED,
-* ::CUDA_ERROR_NOT_PERMITTED,
-* ::CUDA_ERROR_NOT_SUPPORTED
-* \notefnerr
-*
-* \sa ::cuMemUnmap, ::cuMemSetAccess, ::cuMemCreate, ::cuMemAddressReserve, ::cuMemImportFromShareableHandle
-*/
-CUresult CUDAAPI cuMemMap(CUdeviceptr ptr, size_t size, size_t offset, CUmemGenericAllocationHandle handle, unsigned long long flags);
-
-/**
-* \brief Unmap the backing memory of a given address range.
-*
-* The range must be the entire contiguous address range that was mapped to. In
-* other words, ::cuMemUnmap cannot unmap a sub-range of an address range mapped
-* by ::cuMemCreate / ::cuMemMap. Any backing memory allocations will be freed
-* if there are no existing mappings and there are no unreleased memory handles.
-*
-* When ::cuMemUnmap returns successfully the address range is converted to an
-* address reservation and can be used for a future calls to ::cuMemMap. Any new
-* mapping to this virtual address will need to have access granted through
-* ::cuMemSetAccess, as all mappings start with no accessibility setup.
-*
-* \param[in] ptr - Starting address for the virtual address range to unmap
-* \param[in] size - Size of the virtual address range to unmap
-* \returns
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_NOT_INITIALIZED,
-* ::CUDA_ERROR_DEINITIALIZED,
-* ::CUDA_ERROR_NOT_PERMITTED,
-* ::CUDA_ERROR_NOT_SUPPORTED
-* \notefnerr
-* \note_sync
-*
-* \sa ::cuMemCreate, ::cuMemAddressReserve
-*/
-CUresult CUDAAPI cuMemUnmap(CUdeviceptr ptr, size_t size);
-
-/**
-* \brief Set the access flags for each location specified in \p desc for the given virtual address range
-*
-* Given the virtual address range via \p ptr and \p size, and the locations
-* in the array given by \p desc and \p count, set the access flags for the
-* target locations. The range must be a fully mapped address range
-* containing all allocations created by ::cuMemMap / ::cuMemCreate.
-*
-* \param[in] ptr - Starting address for the virtual address range
-* \param[in] size - Length of the virtual address range
-* \param[in] desc - Array of ::CUmemAccessDesc that describe how to change the
-* - mapping for each location specified
-* \param[in] count - Number of ::CUmemAccessDesc in \p desc
-* \returns
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_INVALID_DEVICE,
-* ::CUDA_ERROR_NOT_SUPPORTED
-* \notefnerr
-* \note_sync
-*
-* \sa ::cuMemSetAccess, ::cuMemCreate, :cuMemMap
-*/
-CUresult CUDAAPI cuMemSetAccess(CUdeviceptr ptr, size_t size, const CUmemAccessDesc *desc, size_t count);
-
-/**
-* \brief Get the access \p flags set for the given \p location and \p ptr
-*
-* \param[out] flags - Flags set for this location
-* \param[in] location - Location in which to check the flags for
-* \param[in] ptr - Address in which to check the access flags for
-* \returns
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_INVALID_DEVICE,
-* ::CUDA_ERROR_NOT_INITIALIZED,
-* ::CUDA_ERROR_DEINITIALIZED,
-* ::CUDA_ERROR_NOT_PERMITTED,
-* ::CUDA_ERROR_NOT_SUPPORTED
-*
-* \sa ::cuMemSetAccess
-*/
-CUresult CUDAAPI cuMemGetAccess(unsigned long long *flags, const CUmemLocation *location, CUdeviceptr ptr);
-
-/**
-* \brief Exports an allocation to a requested shareable handle type
-*
-* Given a CUDA memory handle, create a shareable memory
-* allocation handle that can be used to share the memory with other
-* processes. The recipient process can convert the shareable handle back into a
-* CUDA memory handle using ::cuMemImportFromShareableHandle and map
-* it with ::cuMemMap. The implementation of what this handle is and how it
-* can be transferred is defined by the requested handle type in \p handleType
-*
-* Once all shareable handles are closed and the allocation is released, the allocated
-* memory referenced will be released back to the OS and uses of the CUDA handle afterward
-* will lead to undefined behavior.
-*
-* This API can also be used in conjunction with other APIs (e.g. Vulkan, OpenGL)
-* that support importing memory from the shareable type
-*
-* \param[out] shareableHandle - Pointer to the location in which to store the requested handle type
-* \param[in] handle - CUDA handle for the memory allocation
-* \param[in] handleType - Type of shareable handle requested (defines type and size of the \p shareableHandle output parameter)
-* \param[in] flags - Reserved, must be zero
-* \returns
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_NOT_INITIALIZED,
-* ::CUDA_ERROR_DEINITIALIZED,
-* ::CUDA_ERROR_NOT_PERMITTED,
-* ::CUDA_ERROR_NOT_SUPPORTED
-*
-* \sa ::cuMemImportFromShareableHandle
-*/
-CUresult CUDAAPI cuMemExportToShareableHandle(void *shareableHandle, CUmemGenericAllocationHandle handle, CUmemAllocationHandleType handleType, unsigned long long flags);
-
-/**
-* \brief Imports an allocation from a requested shareable handle type.
-*
-* If the current process cannot support the memory described by this shareable
-* handle, this API will error as CUDA_ERROR_NOT_SUPPORTED.
-*
-* \note Importing shareable handles exported from some graphics APIs(Vulkan, OpenGL, etc)
-* created on devices under an SLI group may not be supported, and thus this API will
-* return CUDA_ERROR_NOT_SUPPORTED.
-* There is no guarantee that the contents of \p handle will be the same CUDA memory handle
-* for the same given OS shareable handle, or the same underlying allocation.
-*
-* \param[out] handle - CUDA Memory handle for the memory allocation.
-* \param[in] osHandle - Shareable Handle representing the memory allocation that is to be imported.
-* \param[in] shHandleType - handle type of the exported handle ::CUmemAllocationHandleType.
-* \returns
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_NOT_INITIALIZED,
-* ::CUDA_ERROR_DEINITIALIZED,
-* ::CUDA_ERROR_NOT_PERMITTED,
-* ::CUDA_ERROR_NOT_SUPPORTED
-*
-* \sa ::cuMemExportToShareableHandle, ::cuMemMap, ::cuMemRelease
-*/
-CUresult CUDAAPI cuMemImportFromShareableHandle(CUmemGenericAllocationHandle *handle, void *osHandle, CUmemAllocationHandleType shHandleType);
-
-/**
-* \brief Calculates either the minimal or recommended granularity
-*
-* Calculates either the minimal or recommended granularity
-* for a given allocation specification and returns it in granularity. This
-* granularity can be used as a multiple for alignment, size, or address mapping.
-*
-* \param[out] granularity Returned granularity.
-* \param[in] prop Property for which to determine the granularity for
-* \param[in] option Determines which granularity to return
-* \returns
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_NOT_INITIALIZED,
-* ::CUDA_ERROR_DEINITIALIZED,
-* ::CUDA_ERROR_NOT_PERMITTED,
-* ::CUDA_ERROR_NOT_SUPPORTED
-*
-* \sa ::cuMemCreate, ::cuMemMap
-*/
-CUresult CUDAAPI cuMemGetAllocationGranularity(size_t *granularity, const CUmemAllocationProp *prop, CUmemAllocationGranularity_flags option);
-
-/**
-* \brief Retrieve the contents of the property structure defining properties for this handle
-*
-* \param[out] prop - Pointer to a properties structure which will hold the information about this handle
-* \param[in] handle - Handle which to perform the query on
-* \returns
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_NOT_INITIALIZED,
-* ::CUDA_ERROR_DEINITIALIZED,
-* ::CUDA_ERROR_NOT_PERMITTED,
-* ::CUDA_ERROR_NOT_SUPPORTED
-*
-* \sa ::cuMemCreate, ::cuMemImportFromShareableHandle
-*/
-CUresult CUDAAPI cuMemGetAllocationPropertiesFromHandle(CUmemAllocationProp *prop, CUmemGenericAllocationHandle handle);
-
-/**
-* \brief Given an address \p addr, returns the allocation handle of the backing memory allocation.
-*
-* The handle is guaranteed to be the same handle value used to map the memory. If the address
-* requested is not mapped, the function will fail. The returned handle must be released with
-* corresponding number of calls to ::cuMemRelease.
-*
-* \note The address \p addr, can be any address in a range previously mapped
-* by ::cuMemMap, and not necessarily the start address.
-*
-* \param[out] handle CUDA Memory handle for the backing memory allocation.
-* \param[in] addr Memory address to query, that has been mapped previously.
-* \returns
-* ::CUDA_SUCCESS,
-* ::CUDA_ERROR_INVALID_VALUE,
-* ::CUDA_ERROR_NOT_INITIALIZED,
-* ::CUDA_ERROR_DEINITIALIZED,
-* ::CUDA_ERROR_NOT_PERMITTED,
-* ::CUDA_ERROR_NOT_SUPPORTED
-*
-* \sa ::cuMemCreate, ::cuMemRelease, ::cuMemMap
-*/
-CUresult CUDAAPI cuMemRetainAllocationHandle(CUmemGenericAllocationHandle *handle, void *addr);
-
-/** @} */ /* END CUDA_VA */
-
-/**
- * \defgroup CUDA_UNIFIED Unified Addressing
- *
- * ___MANBRIEF___ unified addressing functions of the low-level CUDA driver
- * API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the unified addressing functions of the
- * low-level CUDA driver application programming interface.
- *
- * @{
- *
- * \section CUDA_UNIFIED_overview Overview
- *
- * CUDA devices can share a unified address space with the host.
- * For these devices there is no distinction between a device
- * pointer and a host pointer -- the same pointer value may be
- * used to access memory from the host program and from a kernel
- * running on the device (with exceptions enumerated below).
- *
- * \section CUDA_UNIFIED_support Supported Platforms
- *
- * Whether or not a device supports unified addressing may be
- * queried by calling ::cuDeviceGetAttribute() with the device
- * attribute ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING.
- *
- * Unified addressing is automatically enabled in 64-bit processes
- *
- * \section CUDA_UNIFIED_lookup Looking Up Information from Pointer Values
- *
- * It is possible to look up information about the memory which backs a
- * pointer value. For instance, one may want to know if a pointer points
- * to host or device memory. As another example, in the case of device
- * memory, one may want to know on which CUDA device the memory
- * resides. These properties may be queried using the function
- * ::cuPointerGetAttribute()
- *
- * Since pointers are unique, it is not necessary to specify information
- * about the pointers specified to the various copy functions in the
- * CUDA API. The function ::cuMemcpy() may be used to perform a copy
- * between two pointers, ignoring whether they point to host or device
- * memory (making ::cuMemcpyHtoD(), ::cuMemcpyDtoD(), and ::cuMemcpyDtoH()
- * unnecessary for devices supporting unified addressing). For
- * multidimensional copies, the memory type ::CU_MEMORYTYPE_UNIFIED may be
- * used to specify that the CUDA driver should infer the location of the
- * pointer from its value.
- *
- * \section CUDA_UNIFIED_automaphost Automatic Mapping of Host Allocated Host Memory
- *
- * All host memory allocated in all contexts using ::cuMemAllocHost() and
- * ::cuMemHostAlloc() is always directly accessible from all contexts on
- * all devices that support unified addressing. This is the case regardless
- * of whether or not the flags ::CU_MEMHOSTALLOC_PORTABLE and
- * ::CU_MEMHOSTALLOC_DEVICEMAP are specified.
- *
- * The pointer value through which allocated host memory may be accessed
- * in kernels on all devices that support unified addressing is the same
- * as the pointer value through which that memory is accessed on the host,
- * so it is not necessary to call ::cuMemHostGetDevicePointer() to get the device
- * pointer for these allocations.
- *
- * Note that this is not the case for memory allocated using the flag
- * ::CU_MEMHOSTALLOC_WRITECOMBINED, as discussed below.
- *
- * \section CUDA_UNIFIED_autopeerregister Automatic Registration of Peer Memory
- *
- * Upon enabling direct access from a context that supports unified addressing
- * to another peer context that supports unified addressing using
- * ::cuCtxEnablePeerAccess() all memory allocated in the peer context using
- * ::cuMemAlloc() and ::cuMemAllocPitch() will immediately be accessible
- * by the current context. The device pointer value through
- * which any peer memory may be accessed in the current context
- * is the same pointer value through which that memory may be
- * accessed in the peer context.
- *
- * \section CUDA_UNIFIED_exceptions Exceptions, Disjoint Addressing
- *
- * Not all memory may be accessed on devices through the same pointer
- * value through which they are accessed on the host. These exceptions
- * are host memory registered using ::cuMemHostRegister() and host memory
- * allocated using the flag ::CU_MEMHOSTALLOC_WRITECOMBINED. For these
- * exceptions, there exists a distinct host and device address for the
- * memory. The device address is guaranteed to not overlap any valid host
- * pointer range and is guaranteed to have the same value across all
- * contexts that support unified addressing.
- *
- * This device address may be queried using ::cuMemHostGetDevicePointer()
- * when a context using unified addressing is current. Either the host
- * or the unified device pointer value may be used to refer to this memory
- * through ::cuMemcpy() and similar functions using the
- * ::CU_MEMORYTYPE_UNIFIED memory type.
- *
- */
-
-/**
- * \brief Returns information about a pointer
- *
- * The supported attributes are:
- *
- * - ::CU_POINTER_ATTRIBUTE_CONTEXT:
- *
- * Returns in \p *data the ::CUcontext in which \p ptr was allocated or
- * registered.
- * The type of \p data must be ::CUcontext *.
- *
- * If \p ptr was not allocated by, mapped by, or registered with
- * a ::CUcontext which uses unified virtual addressing then
- * ::CUDA_ERROR_INVALID_VALUE is returned.
- *
- * - ::CU_POINTER_ATTRIBUTE_MEMORY_TYPE:
- *
- * Returns in \p *data the physical memory type of the memory that
- * \p ptr addresses as a ::CUmemorytype enumerated value.
- * The type of \p data must be unsigned int.
- *
- * If \p ptr addresses device memory then \p *data is set to
- * ::CU_MEMORYTYPE_DEVICE. The particular ::CUdevice on which the
- * memory resides is the ::CUdevice of the ::CUcontext returned by the
- * ::CU_POINTER_ATTRIBUTE_CONTEXT attribute of \p ptr.
- *
- * If \p ptr addresses host memory then \p *data is set to
- * ::CU_MEMORYTYPE_HOST.
- *
- * If \p ptr was not allocated by, mapped by, or registered with
- * a ::CUcontext which uses unified virtual addressing then
- * ::CUDA_ERROR_INVALID_VALUE is returned.
- *
- * If the current ::CUcontext does not support unified virtual
- * addressing then ::CUDA_ERROR_INVALID_CONTEXT is returned.
- *
- * - ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER:
- *
- * Returns in \p *data the device pointer value through which
- * \p ptr may be accessed by kernels running in the current
- * ::CUcontext.
- * The type of \p data must be CUdeviceptr *.
- *
- * If there exists no device pointer value through which
- * kernels running in the current ::CUcontext may access
- * \p ptr then ::CUDA_ERROR_INVALID_VALUE is returned.
- *
- * If there is no current ::CUcontext then
- * ::CUDA_ERROR_INVALID_CONTEXT is returned.
- *
- * Except in the exceptional disjoint addressing cases discussed
- * below, the value returned in \p *data will equal the input
- * value \p ptr.
- *
- * - ::CU_POINTER_ATTRIBUTE_HOST_POINTER:
- *
- * Returns in \p *data the host pointer value through which
- * \p ptr may be accessed by by the host program.
- * The type of \p data must be void **.
- * If there exists no host pointer value through which
- * the host program may directly access \p ptr then
- * ::CUDA_ERROR_INVALID_VALUE is returned.
- *
- * Except in the exceptional disjoint addressing cases discussed
- * below, the value returned in \p *data will equal the input
- * value \p ptr.
- *
- * - ::CU_POINTER_ATTRIBUTE_P2P_TOKENS:
- *
- * Returns in \p *data two tokens for use with the nv-p2p.h Linux
- * kernel interface. \p data must be a struct of type
- * CUDA_POINTER_ATTRIBUTE_P2P_TOKENS.
- *
- * \p ptr must be a pointer to memory obtained from :cuMemAlloc().
- * Note that p2pToken and vaSpaceToken are only valid for the
- * lifetime of the source allocation. A subsequent allocation at
- * the same address may return completely different tokens.
- * Querying this attribute has a side effect of setting the attribute
- * ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS for the region of memory that
- * \p ptr points to.
- *
- * - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS:
- *
- * A boolean attribute which when set, ensures that synchronous memory operations
- * initiated on the region of memory that \p ptr points to will always synchronize.
- * See further documentation in the section titled "API synchronization behavior"
- * to learn more about cases when synchronous memory operations can
- * exhibit asynchronous behavior.
- *
- * - ::CU_POINTER_ATTRIBUTE_BUFFER_ID:
- *
- * Returns in \p *data a buffer ID which is guaranteed to be unique within the process.
- * \p data must point to an unsigned long long.
- *
- * \p ptr must be a pointer to memory obtained from a CUDA memory allocation API.
- * Every memory allocation from any of the CUDA memory allocation APIs will
- * have a unique ID over a process lifetime. Subsequent allocations do not reuse IDs
- * from previous freed allocations. IDs are only unique within a single process.
- *
- *
- * - ::CU_POINTER_ATTRIBUTE_IS_MANAGED:
- *
- * Returns in \p *data a boolean that indicates whether the pointer points to
- * managed memory or not.
- *
- * If \p ptr is not a valid CUDA pointer then ::CUDA_ERROR_INVALID_VALUE is returned.
- *
- * - ::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL:
- *
- * Returns in \p *data an integer representing a device ordinal of a device against
- * which the memory was allocated or registered.
- *
- * - ::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE:
- *
- * Returns in \p *data a boolean that indicates if this pointer maps to
- * an allocation that is suitable for ::cudaIpcGetMemHandle.
- *
- * - ::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR:
- *
- * Returns in \p *data the starting address for the allocation referenced
- * by the device pointer \p ptr. Note that this is not necessarily the
- * address of the mapped region, but the address of the mappable address
- * range \p ptr references (e.g. from ::cuMemAddressReserve).
- *
- * - ::CU_POINTER_ATTRIBUTE_RANGE_SIZE:
- *
- * Returns in \p *data the size for the allocation referenced by the device
- * pointer \p ptr. Note that this is not necessarily the size of the mapped
- * region, but the size of the mappable address range \p ptr references
- * (e.g. from ::cuMemAddressReserve). To retrieve the size of the mapped
- * region, see ::cuMemGetAllocationPropertyForAddress.
- *
- * - ::CU_POINTER_ATTRIBUTE_MAPPED:
- *
- * Returns in \p *data a boolean that indicates if this pointer is in a
- * valid address range that is mapped to a backing allocation.
- *
- * - ::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES:
- *
- * Returns a bitmask of the allowed handle types for an allocation that may
- * be passed to ::cuMemExportToShareableHandle.
- *
- * \par
- *
- * Note that for most allocations in the unified virtual address space
- * the host and device pointer for accessing the allocation will be the
- * same. The exceptions to this are
- * - user memory registered using ::cuMemHostRegister
- * - host memory allocated using ::cuMemHostAlloc with the
- * ::CU_MEMHOSTALLOC_WRITECOMBINED flag
- * For these types of allocation there will exist separate, disjoint host
- * and device addresses for accessing the allocation. In particular
- * - The host address will correspond to an invalid unmapped device address
- * (which will result in an exception if accessed from the device)
- * - The device address will correspond to an invalid unmapped host address
- * (which will result in an exception if accessed from the host).
- * For these types of allocations, querying ::CU_POINTER_ATTRIBUTE_HOST_POINTER
- * and ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER may be used to retrieve the host
- * and device addresses from either address.
- *
- * \param data - Returned pointer attribute value
- * \param attribute - Pointer attribute to query
- * \param ptr - Pointer
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuPointerSetAttribute,
- * ::cuMemAlloc,
- * ::cuMemFree,
- * ::cuMemAllocHost,
- * ::cuMemFreeHost,
- * ::cuMemHostAlloc,
- * ::cuMemHostRegister,
- * ::cuMemHostUnregister,
- * ::cudaPointerGetAttributes
- */
-CUresult CUDAAPI cuPointerGetAttribute(void *data, CUpointer_attribute attribute, CUdeviceptr ptr);
-
-/**
- * \brief Prefetches memory to the specified destination device
- *
- * Prefetches memory to the specified destination device. \p devPtr is the
- * base device pointer of the memory to be prefetched and \p dstDevice is the
- * destination device. \p count specifies the number of bytes to copy. \p hStream
- * is the stream in which the operation is enqueued. The memory range must refer
- * to managed memory allocated via ::cuMemAllocManaged or declared via __managed__ variables.
- *
- * Passing in CU_DEVICE_CPU for \p dstDevice will prefetch the data to host memory. If
- * \p dstDevice is a GPU, then the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS
- * must be non-zero. Additionally, \p hStream must be associated with a device that has a
- * non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS.
- *
- * The start address and end address of the memory range will be rounded down and rounded up
- * respectively to be aligned to CPU page size before the prefetch operation is enqueued
- * in the stream.
- *
- * If no physical memory has been allocated for this region, then this memory region
- * will be populated and mapped on the destination device. If there's insufficient
- * memory to prefetch the desired region, the Unified Memory driver may evict pages from other
- * ::cuMemAllocManaged allocations to host memory in order to make room. Device memory
- * allocated using ::cuMemAlloc or ::cuArrayCreate will not be evicted.
- *
- * By default, any mappings to the previous location of the migrated pages are removed and
- * mappings for the new location are only setup on \p dstDevice. The exact behavior however
- * also depends on the settings applied to this memory range via ::cuMemAdvise as described
- * below:
- *
- * If ::CU_MEM_ADVISE_SET_READ_MOSTLY was set on any subset of this memory range,
- * then that subset will create a read-only copy of the pages on \p dstDevice.
- *
- * If ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION was called on any subset of this memory
- * range, then the pages will be migrated to \p dstDevice even if \p dstDevice is not the
- * preferred location of any pages in the memory range.
- *
- * If ::CU_MEM_ADVISE_SET_ACCESSED_BY was called on any subset of this memory range,
- * then mappings to those pages from all the appropriate processors are updated to
- * refer to the new location if establishing such a mapping is possible. Otherwise,
- * those mappings are cleared.
- *
- * Note that this API is not required for functionality and only serves to improve performance
- * by allowing the application to migrate data to a suitable location before it is accessed.
- * Memory accesses to this range are always coherent and are allowed even when the data is
- * actively being migrated.
- *
- * Note that this function is asynchronous with respect to the host and all work
- * on other devices.
- *
- * \param devPtr - Pointer to be prefetched
- * \param count - Size in bytes
- * \param dstDevice - Destination device to prefetch to
- * \param hStream - Stream to enqueue prefetch operation
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- * \note_async
- * \note_null_stream
- *
- * \sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync,
- * ::cuMemcpy3DPeerAsync, ::cuMemAdvise,
- * ::cudaMemPrefetchAsync
- */
-CUresult CUDAAPI cuMemPrefetchAsync(CUdeviceptr devPtr, size_t count, CUdevice dstDevice, CUstream hStream);
-
-/**
- * \brief Advise about the usage of a given memory range
- *
- * Advise the Unified Memory subsystem about the usage pattern for the memory range
- * starting at \p devPtr with a size of \p count bytes. The start address and end address of the memory
- * range will be rounded down and rounded up respectively to be aligned to CPU page size before the
- * advice is applied. The memory range must refer to managed memory allocated via ::cuMemAllocManaged
- * or declared via __managed__ variables. The memory range could also refer to system-allocated pageable
- * memory provided it represents a valid, host-accessible region of memory and all additional constraints
- * imposed by \p advice as outlined below are also satisfied. Specifying an invalid system-allocated pageable
- * memory range results in an error being returned.
- *
- * The \p advice parameter can take the following values:
- * - ::CU_MEM_ADVISE_SET_READ_MOSTLY: This implies that the data is mostly going to be read
- * from and only occasionally written to. Any read accesses from any processor to this region will create a
- * read-only copy of at least the accessed pages in that processor's memory. Additionally, if ::cuMemPrefetchAsync
- * is called on this region, it will create a read-only copy of the data on the destination processor.
- * If any processor writes to this region, all copies of the corresponding page will be invalidated
- * except for the one where the write occurred. The \p device argument is ignored for this advice.
- * Note that for a page to be read-duplicated, the accessing processor must either be the CPU or a GPU
- * that has a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS.
- * Also, if a context is created on a device that does not have the device attribute
- * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS set, then read-duplication will not occur until
- * all such contexts are destroyed.
- * If the memory region refers to valid system-allocated pageable memory, then the accessing device must
- * have a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS for a read-only
- * copy to be created on that device. Note however that if the accessing device also has a non-zero value for the
- * device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, then setting this advice
- * will not create a read-only copy when that device accesses this memory region.
- *
- * - ::CU_MEM_ADVISE_UNSET_READ_MOSTLY: Undoes the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY and also prevents the
- * Unified Memory driver from attempting heuristic read-duplication on the memory range. Any read-duplicated
- * copies of the data will be collapsed into a single copy. The location for the collapsed
- * copy will be the preferred location if the page has a preferred location and one of the read-duplicated
- * copies was resident at that location. Otherwise, the location chosen is arbitrary.
- *
- * - ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION: This advice sets the preferred location for the
- * data to be the memory belonging to \p device. Passing in CU_DEVICE_CPU for \p device sets the
- * preferred location as host memory. If \p device is a GPU, then it must have a non-zero value for the
- * device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Setting the preferred location
- * does not cause data to migrate to that location immediately. Instead, it guides the migration policy
- * when a fault occurs on that memory region. If the data is already in its preferred location and the
- * faulting processor can establish a mapping without requiring the data to be migrated, then
- * data migration will be avoided. On the other hand, if the data is not in its preferred location
- * or if a direct mapping cannot be established, then it will be migrated to the processor accessing
- * it. It is important to note that setting the preferred location does not prevent data prefetching
- * done using ::cuMemPrefetchAsync.
- * Having a preferred location can override the page thrash detection and resolution logic in the Unified
- * Memory driver. Normally, if a page is detected to be constantly thrashing between for example host and device
- * memory, the page may eventually be pinned to host memory by the Unified Memory driver. But
- * if the preferred location is set as device memory, then the page will continue to thrash indefinitely.
- * If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the
- * policies associated with that advice will override the policies of this advice, unless read accesses from
- * \p device will not result in a read-only copy being created on that device as outlined in description for
- * the advice ::CU_MEM_ADVISE_SET_READ_MOSTLY.
- * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero
- * value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has
- * a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES,
- * then this call has no effect. Note however that this behavior may change in the future.
- *
- * - ::CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: Undoes the effect of ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION
- * and changes the preferred location to none.
- *
- * - ::CU_MEM_ADVISE_SET_ACCESSED_BY: This advice implies that the data will be accessed by \p device.
- * Passing in ::CU_DEVICE_CPU for \p device will set the advice for the CPU. If \p device is a GPU, then
- * the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be non-zero.
- * This advice does not cause data migration and has no impact on the location of the data per se. Instead,
- * it causes the data to always be mapped in the specified processor's page tables, as long as the
- * location of the data permits a mapping to be established. If the data gets migrated for any reason,
- * the mappings are updated accordingly.
- * This advice is recommended in scenarios where data locality is not important, but avoiding faults is.
- * Consider for example a system containing multiple GPUs with peer-to-peer access enabled, where the
- * data located on one GPU is occasionally accessed by peer GPUs. In such scenarios, migrating data
- * over to the other GPUs is not as important because the accesses are infrequent and the overhead of
- * migration may be too high. But preventing faults can still help improve performance, and so having
- * a mapping set up in advance is useful. Note that on CPU access of this data, the data may be migrated
- * to host memory because the CPU typically cannot access device memory directly. Any GPU that had the
- * ::CU_MEM_ADVISE_SET_ACCESSED_BY flag set for this data will now have its mapping updated to point to the
- * page in host memory.
- * If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the
- * policies associated with that advice will override the policies of this advice. Additionally, if the
- * preferred location of this memory region or any subset of it is also \p device, then the policies
- * associated with ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the policies of this advice.
- * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero
- * value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has
- * a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES,
- * then this call has no effect.
- *
- * - ::CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of ::CU_MEM_ADVISE_SET_ACCESSED_BY. Any mappings to
- * the data from \p device may be removed at any time causing accesses to result in non-fatal page faults.
- * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero
- * value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has
- * a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES,
- * then this call has no effect.
- *
- * \param devPtr - Pointer to memory to set the advice for
- * \param count - Size in bytes of the memory range
- * \param advice - Advice to be applied for the specified memory range
- * \param device - Device to apply the advice for
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- * \note_async
- * \note_null_stream
- *
- * \sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync,
- * ::cuMemcpy3DPeerAsync, ::cuMemPrefetchAsync,
- * ::cudaMemAdvise
- */
-CUresult CUDAAPI cuMemAdvise(CUdeviceptr devPtr, size_t count, CUmem_advise advice, CUdevice device);
-
-/**
- * \brief Query an attribute of a given memory range
- *
- * Query an attribute about the memory range starting at \p devPtr with a size of \p count bytes. The
- * memory range must refer to managed memory allocated via ::cuMemAllocManaged or declared via
- * __managed__ variables.
- *
- * The \p attribute parameter can take the following values:
- * - ::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: If this attribute is specified, \p data will be interpreted
- * as a 32-bit integer, and \p dataSize must be 4. The result returned will be 1 if all pages in the given
- * memory range have read-duplication enabled, or 0 otherwise.
- * - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: If this attribute is specified, \p data will be
- * interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be a GPU device
- * id if all pages in the memory range have that GPU as their preferred location, or it will be CU_DEVICE_CPU
- * if all pages in the memory range have the CPU as their preferred location, or it will be CU_DEVICE_INVALID
- * if either all the pages don't have the same preferred location or some of the pages don't have a
- * preferred location at all. Note that the actual location of the pages in the memory range at the time of
- * the query may be different from the preferred location.
- * - ::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: If this attribute is specified, \p data will be interpreted
- * as an array of 32-bit integers, and \p dataSize must be a non-zero multiple of 4. The result returned
- * will be a list of device ids that had ::CU_MEM_ADVISE_SET_ACCESSED_BY set for that entire memory range.
- * If any device does not have that advice set for the entire memory range, that device will not be included.
- * If \p data is larger than the number of devices that have that advice set for that memory range,
- * CU_DEVICE_INVALID will be returned in all the extra space provided. For ex., if \p dataSize is 12
- * (i.e. \p data has 3 elements) and only device 0 has the advice set, then the result returned will be
- * { 0, CU_DEVICE_INVALID, CU_DEVICE_INVALID }. If \p data is smaller than the number of devices that have
- * that advice set, then only as many devices will be returned as can fit in the array. There is no
- * guarantee on which specific devices will be returned, however.
- * - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: If this attribute is specified, \p data will be
- * interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be the last location
- * to which all pages in the memory range were prefetched explicitly via ::cuMemPrefetchAsync. This will either be
- * a GPU id or CU_DEVICE_CPU depending on whether the last location for prefetch was a GPU or the CPU
- * respectively. If any page in the memory range was never explicitly prefetched or if all pages were not
- * prefetched to the same location, CU_DEVICE_INVALID will be returned. Note that this simply returns the
- * last location that the applicaton requested to prefetch the memory range to. It gives no indication as to
- * whether the prefetch operation to that location has completed or even begun.
- *
- * \param data - A pointers to a memory location where the result
- * of each attribute query will be written to.
- * \param dataSize - Array containing the size of data
- * \param attribute - The attribute to query
- * \param devPtr - Start of the range to query
- * \param count - Size of the range to query
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- * \note_async
- * \note_null_stream
- *
- * \sa ::cuMemRangeGetAttributes, ::cuMemPrefetchAsync,
- * ::cuMemAdvise,
- * ::cudaMemRangeGetAttribute
- */
-CUresult CUDAAPI cuMemRangeGetAttribute(void *data, size_t dataSize, CUmem_range_attribute attribute, CUdeviceptr devPtr, size_t count);
-
-/**
- * \brief Query attributes of a given memory range.
- *
- * Query attributes of the memory range starting at \p devPtr with a size of \p count bytes. The
- * memory range must refer to managed memory allocated via ::cuMemAllocManaged or declared via
- * __managed__ variables. The \p attributes array will be interpreted to have \p numAttributes
- * entries. The \p dataSizes array will also be interpreted to have \p numAttributes entries.
- * The results of the query will be stored in \p data.
- *
- * The list of supported attributes are given below. Please refer to ::cuMemRangeGetAttribute for
- * attribute descriptions and restrictions.
- *
- * - ::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY
- * - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION
- * - ::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY
- * - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION
- *
- * \param data - A two-dimensional array containing pointers to memory
- * locations where the result of each attribute query will be written to.
- * \param dataSizes - Array containing the sizes of each result
- * \param attributes - An array of attributes to query
- * (numAttributes and the number of attributes in this array should match)
- * \param numAttributes - Number of attributes to query
- * \param devPtr - Start of the range to query
- * \param count - Size of the range to query
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa ::cuMemRangeGetAttribute, ::cuMemAdvise
- * ::cuMemPrefetchAsync,
- * ::cudaMemRangeGetAttributes
- */
-CUresult CUDAAPI cuMemRangeGetAttributes(void **data, size_t *dataSizes, CUmem_range_attribute *attributes, size_t numAttributes, CUdeviceptr devPtr, size_t count);
-
-/**
- * \brief Set attributes on a previously allocated memory region
- *
- * The supported attributes are:
- *
- * - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS:
- *
- * A boolean attribute that can either be set (1) or unset (0). When set,
- * the region of memory that \p ptr points to is guaranteed to always synchronize
- * memory operations that are synchronous. If there are some previously initiated
- * synchronous memory operations that are pending when this attribute is set, the
- * function does not return until those memory operations are complete.
- * See further documentation in the section titled "API synchronization behavior"
- * to learn more about cases when synchronous memory operations can
- * exhibit asynchronous behavior.
- * \p value will be considered as a pointer to an unsigned integer to which this attribute is to be set.
- *
- * \param value - Pointer to memory containing the value to be set
- * \param attribute - Pointer attribute to set
- * \param ptr - Pointer to a memory region allocated using CUDA memory allocation APIs
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa ::cuPointerGetAttribute,
- * ::cuPointerGetAttributes,
- * ::cuMemAlloc,
- * ::cuMemFree,
- * ::cuMemAllocHost,
- * ::cuMemFreeHost,
- * ::cuMemHostAlloc,
- * ::cuMemHostRegister,
- * ::cuMemHostUnregister
- */
-CUresult CUDAAPI cuPointerSetAttribute(const void *value, CUpointer_attribute attribute, CUdeviceptr ptr);
-
-/**
- * \brief Returns information about a pointer.
- *
- * The supported attributes are (refer to ::cuPointerGetAttribute for attribute descriptions and restrictions):
- *
- * - ::CU_POINTER_ATTRIBUTE_CONTEXT
- * - ::CU_POINTER_ATTRIBUTE_MEMORY_TYPE
- * - ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER
- * - ::CU_POINTER_ATTRIBUTE_HOST_POINTER
- * - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS
- * - ::CU_POINTER_ATTRIBUTE_BUFFER_ID
- * - ::CU_POINTER_ATTRIBUTE_IS_MANAGED
- * - ::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL
- * - ::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR
- * - ::CU_POINTER_ATTRIBUTE_RANGE_SIZE
- * - ::CU_POINTER_ATTRIBUTE_MAPPED
- * - ::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE
- * - ::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES
- *
- * \param numAttributes - Number of attributes to query
- * \param attributes - An array of attributes to query
- * (numAttributes and the number of attributes in this array should match)
- * \param data - A two-dimensional array containing pointers to memory
- * locations where the result of each attribute query will be written to.
- * \param ptr - Pointer to query
- *
- * Unlike ::cuPointerGetAttribute, this function will not return an error when the \p ptr
- * encountered is not a valid CUDA pointer. Instead, the attributes are assigned default NULL values
- * and CUDA_SUCCESS is returned.
- *
- * If \p ptr was not allocated by, mapped by, or registered with a ::CUcontext which uses UVA
- * (Unified Virtual Addressing), ::CUDA_ERROR_INVALID_CONTEXT is returned.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuPointerGetAttribute,
- * ::cuPointerSetAttribute,
- * ::cudaPointerGetAttributes
- */
-CUresult CUDAAPI cuPointerGetAttributes(unsigned int numAttributes, CUpointer_attribute *attributes, void **data, CUdeviceptr ptr);
-
-/** @} */ /* END CUDA_UNIFIED */
-
-/**
- * \defgroup CUDA_STREAM Stream Management
- *
- * ___MANBRIEF___ stream management functions of the low-level CUDA driver API
- * (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the stream management functions of the low-level CUDA
- * driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Create a stream
- *
- * Creates a stream and returns a handle in \p phStream. The \p Flags argument
- * determines behaviors of the stream.
- *
- * Valid values for \p Flags are:
- * - ::CU_STREAM_DEFAULT: Default stream creation flag.
- * - ::CU_STREAM_NON_BLOCKING: Specifies that work running in the created
- * stream may run concurrently with work in stream 0 (the NULL stream), and that
- * the created stream should perform no implicit synchronization with stream 0.
- *
- * \param phStream - Returned newly created stream
- * \param Flags - Parameters for stream creation
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \notefnerr
- *
- * \sa ::cuStreamDestroy,
- * ::cuStreamCreateWithPriority,
- * ::cuStreamGetPriority,
- * ::cuStreamGetFlags,
- * ::cuStreamWaitEvent,
- * ::cuStreamQuery,
- * ::cuStreamSynchronize,
- * ::cuStreamAddCallback,
- * ::cudaStreamCreate,
- * ::cudaStreamCreateWithFlags
- */
-CUresult CUDAAPI cuStreamCreate(CUstream *phStream, unsigned int Flags);
-
-/**
- * \brief Create a stream with the given priority
- *
- * Creates a stream with the specified priority and returns a handle in \p phStream.
- * This API alters the scheduler priority of work in the stream. Work in a higher
- * priority stream may preempt work already executing in a low priority stream.
- *
- * \p priority follows a convention where lower numbers represent higher priorities.
- * '0' represents default priority. The range of meaningful numerical priorities can
- * be queried using ::cuCtxGetStreamPriorityRange. If the specified priority is
- * outside the numerical range returned by ::cuCtxGetStreamPriorityRange,
- * it will automatically be clamped to the lowest or the highest number in the range.
- *
- * \param phStream - Returned newly created stream
- * \param flags - Flags for stream creation. See ::cuStreamCreate for a list of
- * valid flags
- * \param priority - Stream priority. Lower numbers represent higher priorities.
- * See ::cuCtxGetStreamPriorityRange for more information about
- * meaningful stream priorities that can be passed.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \notefnerr
- *
- * \note Stream priorities are supported only on GPUs
- * with compute capability 3.5 or higher.
- *
- * \note In the current implementation, only compute kernels launched in
- * priority streams are affected by the stream's priority. Stream priorities have
- * no effect on host-to-device and device-to-host memory operations.
- *
- * \sa ::cuStreamDestroy,
- * ::cuStreamCreate,
- * ::cuStreamGetPriority,
- * ::cuCtxGetStreamPriorityRange,
- * ::cuStreamGetFlags,
- * ::cuStreamWaitEvent,
- * ::cuStreamQuery,
- * ::cuStreamSynchronize,
- * ::cuStreamAddCallback,
- * ::cudaStreamCreateWithPriority
- */
-CUresult CUDAAPI cuStreamCreateWithPriority(CUstream *phStream, unsigned int flags, int priority);
-
-
-/**
- * \brief Query the priority of a given stream
- *
- * Query the priority of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority
- * and return the priority in \p priority. Note that if the stream was created with a
- * priority outside the numerical range returned by ::cuCtxGetStreamPriorityRange,
- * this function returns the clamped priority.
- * See ::cuStreamCreateWithPriority for details about priority clamping.
- *
- * \param hStream - Handle to the stream to be queried
- * \param priority - Pointer to a signed integer in which the stream's priority is returned
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \notefnerr
- *
- * \sa ::cuStreamDestroy,
- * ::cuStreamCreate,
- * ::cuStreamCreateWithPriority,
- * ::cuCtxGetStreamPriorityRange,
- * ::cuStreamGetFlags,
- * ::cudaStreamGetPriority
- */
-CUresult CUDAAPI cuStreamGetPriority(CUstream hStream, int *priority);
-
-/**
- * \brief Query the flags of a given stream
- *
- * Query the flags of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority
- * and return the flags in \p flags.
- *
- * \param hStream - Handle to the stream to be queried
- * \param flags - Pointer to an unsigned integer in which the stream's flags are returned
- * The value returned in \p flags is a logical 'OR' of all flags that
- * were used while creating this stream. See ::cuStreamCreate for the list
- * of valid flags
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \notefnerr
- *
- * \sa ::cuStreamDestroy,
- * ::cuStreamCreate,
- * ::cuStreamGetPriority,
- * ::cudaStreamGetFlags
- */
-CUresult CUDAAPI cuStreamGetFlags(CUstream hStream, unsigned int *flags);
-
-/**
- * \brief Query the context associated with a stream
- *
- * Returns the CUDA context that the stream is associated with.
- *
- * The stream handle \p hStream can refer to any of the following:
- * <ul>
- * <li>a stream created via any of the CUDA driver APIs such as ::cuStreamCreate
- * and ::cuStreamCreateWithPriority, or their runtime API equivalents such as
- * ::cudaStreamCreate, ::cudaStreamCreateWithFlags and ::cudaStreamCreateWithPriority.
- * The returned context is the context that was active in the calling thread when the
- * stream was created. Passing an invalid handle will result in undefined behavior.</li>
- * <li>any of the special streams such as the NULL stream, ::CU_STREAM_LEGACY and
- * ::CU_STREAM_PER_THREAD. The runtime API equivalents of these are also accepted,
- * which are NULL, ::cudaStreamLegacy and ::cudaStreamPerThread respectively.
- * Specifying any of the special handles will return the context current to the
- * calling thread. If no context is current to the calling thread,
- * ::CUDA_ERROR_INVALID_CONTEXT is returned.</li>
- * </ul>
- *
- * \param hStream - Handle to the stream to be queried
- * \param pctx - Returned context associated with the stream
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * \notefnerr
- *
- * \sa ::cuStreamDestroy,
- * ::cuStreamCreateWithPriority,
- * ::cuStreamGetPriority,
- * ::cuStreamGetFlags,
- * ::cuStreamWaitEvent,
- * ::cuStreamQuery,
- * ::cuStreamSynchronize,
- * ::cuStreamAddCallback,
- * ::cudaStreamCreate,
- * ::cudaStreamCreateWithFlags
- */
-CUresult CUDAAPI cuStreamGetCtx(CUstream hStream, CUcontext *pctx);
-
-/**
- * \brief Make a compute stream wait on an event
- *
- * Makes all future work submitted to \p hStream wait for all work captured in
- * \p hEvent. See ::cuEventRecord() for details on what is captured by an event.
- * The synchronization will be performed efficiently on the device when applicable.
- * \p hEvent may be from a different context or device than \p hStream.
- *
- * \param hStream - Stream to wait
- * \param hEvent - Event to wait on (may not be NULL)
- * \param Flags - Parameters for the operation (must be 0)
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * \note_null_stream
- * \notefnerr
- *
- * \sa ::cuStreamCreate,
- * ::cuEventRecord,
- * ::cuStreamQuery,
- * ::cuStreamSynchronize,
- * ::cuStreamAddCallback,
- * ::cuStreamDestroy,
- * ::cudaStreamWaitEvent
- */
-CUresult CUDAAPI cuStreamWaitEvent(CUstream hStream, CUevent hEvent, unsigned int Flags);
-
-/**
- * \brief Add a callback to a compute stream
- *
- * \note This function is slated for eventual deprecation and removal. If
- * you do not require the callback to execute in case of a device error,
- * consider using ::cuLaunchHostFunc. Additionally, this function is not
- * supported with ::cuStreamBeginCapture and ::cuStreamEndCapture, unlike
- * ::cuLaunchHostFunc.
- *
- * Adds a callback to be called on the host after all currently enqueued
- * items in the stream have completed. For each
- * cuStreamAddCallback call, the callback will be executed exactly once.
- * The callback will block later work in the stream until it is finished.
- *
- * The callback may be passed ::CUDA_SUCCESS or an error code. In the event
- * of a device error, all subsequently executed callbacks will receive an
- * appropriate ::CUresult.
- *
- * Callbacks must not make any CUDA API calls. Attempting to use a CUDA API
- * will result in ::CUDA_ERROR_NOT_PERMITTED. Callbacks must not perform any
- * synchronization that may depend on outstanding device work or other callbacks
- * that are not mandated to run earlier. Callbacks without a mandated order
- * (in independent streams) execute in undefined order and may be serialized.
- *
- * For the purposes of Unified Memory, callback execution makes a number of
- * guarantees:
- * <ul>
- * <li>The callback stream is considered idle for the duration of the
- * callback. Thus, for example, a callback may always use memory attached
- * to the callback stream.</li>
- * <li>The start of execution of a callback has the same effect as
- * synchronizing an event recorded in the same stream immediately prior to
- * the callback. It thus synchronizes streams which have been "joined"
- * prior to the callback.</li>
- * <li>Adding device work to any stream does not have the effect of making
- * the stream active until all preceding host functions and stream callbacks
- * have executed. Thus, for
- * example, a callback might use global attached memory even if work has
- * been added to another stream, if the work has been ordered behind the
- * callback with an event.</li>
- * <li>Completion of a callback does not cause a stream to become
- * active except as described above. The callback stream will remain idle
- * if no device work follows the callback, and will remain idle across
- * consecutive callbacks without device work in between. Thus, for example,
- * stream synchronization can be done by signaling from a callback at the
- * end of the stream.</li>
- * </ul>
- *
- * \param hStream - Stream to add callback to
- * \param callback - The function to call once preceding stream operations are complete
- * \param userData - User specified data to be passed to the callback function
- * \param flags - Reserved for future use, must be 0
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_NOT_SUPPORTED
- * \note_null_stream
- * \notefnerr
- *
- * \sa ::cuStreamCreate,
- * ::cuStreamQuery,
- * ::cuStreamSynchronize,
- * ::cuStreamWaitEvent,
- * ::cuStreamDestroy,
- * ::cuMemAllocManaged,
- * ::cuStreamAttachMemAsync,
- * ::cuStreamLaunchHostFunc,
- * ::cudaStreamAddCallback
- */
-CUresult CUDAAPI cuStreamAddCallback(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags);
-
-/**
- * \brief Begins graph capture on a stream
- *
- * Begin graph capture on \p hStream. When a stream is in capture mode, all operations
- * pushed into the stream will not be executed, but will instead be captured into
- * a graph, which will be returned via ::cuStreamEndCapture. Capture may not be initiated
- * if \p stream is CU_STREAM_LEGACY. Capture must be ended on the same stream in which
- * it was initiated, and it may only be initiated if the stream is not already in capture
- * mode. The capture mode may be queried via ::cuStreamIsCapturing. A unique id
- * representing the capture sequence may be queried via ::cuStreamGetCaptureInfo.
- *
- * If \p mode is not ::CU_STREAM_CAPTURE_MODE_RELAXED, ::cuStreamEndCapture must be
- * called on this stream from the same thread.
- *
- * \param hStream - Stream in which to initiate capture
- * \param mode - Controls the interaction of this capture sequence with other API
- * calls that are potentially unsafe. For more details see
- * ::cuThreadExchangeStreamCaptureMode.
- *
- * \note Kernels captured using this API must not use texture and surface references.
- * Reading or writing through any texture or surface reference is undefined
- * behavior. This restriction does not apply to texture and surface objects.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa
- * ::cuStreamCreate,
- * ::cuStreamIsCapturing,
- * ::cuStreamEndCapture,
- * ::cuThreadExchangeStreamCaptureMode
- */
-CUresult CUDAAPI cuStreamBeginCapture(CUstream hStream, CUstreamCaptureMode mode);
-
-/**
- * \brief Swaps the stream capture interaction mode for a thread
- *
- * Sets the calling thread's stream capture interaction mode to the value contained
- * in \p *mode, and overwrites \p *mode with the previous mode for the thread. To
- * facilitate deterministic behavior across function or module boundaries, callers
- * are encouraged to use this API in a push-pop fashion: \code
- CUstreamCaptureMode mode = desiredMode;
- cuThreadExchangeStreamCaptureMode(&mode);
- ...
- cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode
- * \endcode
- *
- * During stream capture (see ::cuStreamBeginCapture), some actions, such as a call
- * to ::cudaMalloc, may be unsafe. In the case of ::cudaMalloc, the operation is
- * not enqueued asynchronously to a stream, and is not observed by stream capture.
- * Therefore, if the sequence of operations captured via ::cuStreamBeginCapture
- * depended on the allocation being replayed whenever the graph is launched, the
- * captured graph would be invalid.
- *
- * Therefore, stream capture places restrictions on API calls that can be made within
- * or concurrently to a ::cuStreamBeginCapture-::cuStreamEndCapture sequence. This
- * behavior can be controlled via this API and flags to ::cuStreamBeginCapture.
- *
- * A thread's mode is one of the following:
- * - \p CU_STREAM_CAPTURE_MODE_GLOBAL: This is the default mode. If the local thread has
- * an ongoing capture sequence that was not initiated with
- * \p CU_STREAM_CAPTURE_MODE_RELAXED at \p cuStreamBeginCapture, or if any other thread
- * has a concurrent capture sequence initiated with \p CU_STREAM_CAPTURE_MODE_GLOBAL,
- * this thread is prohibited from potentially unsafe API calls.
- * - \p CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: If the local thread has an ongoing capture
- * sequence not initiated with \p CU_STREAM_CAPTURE_MODE_RELAXED, it is prohibited
- * from potentially unsafe API calls. Concurrent capture sequences in other threads
- * are ignored.
- * - \p CU_STREAM_CAPTURE_MODE_RELAXED: The local thread is not prohibited from potentially
- * unsafe API calls. Note that the thread is still prohibited from API calls which
- * necessarily conflict with stream capture, for example, attempting ::cuEventQuery
- * on an event that was last recorded inside a capture sequence.
- *
- * \param mode - Pointer to mode value to swap with the current mode
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa
- * ::cuStreamBeginCapture
- */
-CUresult CUDAAPI cuThreadExchangeStreamCaptureMode(CUstreamCaptureMode *mode);
-
-/**
- * \brief Ends capture on a stream, returning the captured graph
- *
- * End capture on \p hStream, returning the captured graph via \p phGraph.
- * Capture must have been initiated on \p hStream via a call to ::cuStreamBeginCapture.
- * If capture was invalidated, due to a violation of the rules of stream capture, then
- * a NULL graph will be returned.
- *
- * If the \p mode argument to ::cuStreamBeginCapture was not
- * ::CU_STREAM_CAPTURE_MODE_RELAXED, this call must be from the same thread as
- * ::cuStreamBeginCapture.
- *
- * \param hStream - Stream to query
- * \param phGraph - The captured graph
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD
- * \notefnerr
- *
- * \sa
- * ::cuStreamCreate,
- * ::cuStreamBeginCapture,
- * ::cuStreamIsCapturing
- */
-CUresult CUDAAPI cuStreamEndCapture(CUstream hStream, CUgraph *phGraph);
-
-/**
- * \brief Returns a stream's capture status
- *
- * Return the capture status of \p hStream via \p captureStatus. After a successful
- * call, \p *captureStatus will contain one of the following:
- * - ::CU_STREAM_CAPTURE_STATUS_NONE: The stream is not capturing.
- * - ::CU_STREAM_CAPTURE_STATUS_ACTIVE: The stream is capturing.
- * - ::CU_STREAM_CAPTURE_STATUS_INVALIDATED: The stream was capturing but an error
- * has invalidated the capture sequence. The capture sequence must be terminated
- * with ::cuStreamEndCapture on the stream where it was initiated in order to
- * continue using \p hStream.
- *
- * Note that, if this is called on ::CU_STREAM_LEGACY (the "null stream") while
- * a blocking stream in the same context is capturing, it will return
- * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT and \p *captureStatus is unspecified
- * after the call. The blocking stream capture is not invalidated.
- *
- * When a blocking stream is capturing, the legacy stream is in an
- * unusable state until the blocking stream capture is terminated. The legacy
- * stream is not supported for stream capture, but attempted use would have an
- * implicit dependency on the capturing stream(s).
- *
- * \param hStream - Stream to query
- * \param captureStatus - Returns the stream's capture status
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT
- * \notefnerr
- *
- * \sa
- * ::cuStreamCreate,
- * ::cuStreamBeginCapture,
- * ::cuStreamEndCapture
- */
-CUresult CUDAAPI cuStreamIsCapturing(CUstream hStream, CUstreamCaptureStatus *captureStatus);
-
-/**
- * \brief Query capture status of a stream
- *
- * Query the capture status of a stream and and get an id for
- * the capture sequence, which is unique over the lifetime of the process.
- *
- * If called on ::CU_STREAM_LEGACY (the "null stream") while a stream not created
- * with ::CU_STREAM_NON_BLOCKING is capturing, returns ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT.
- *
- * A valid id is returned only if both of the following are true:
- * - the call returns CUDA_SUCCESS
- * - captureStatus is set to ::CU_STREAM_CAPTURE_STATUS_ACTIVE
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT
- * \notefnerr
- *
- * \sa
- * ::cuStreamBeginCapture,
- * ::cuStreamIsCapturing
- */
- CUresult CUDAAPI cuStreamGetCaptureInfo(CUstream hStream, CUstreamCaptureStatus *captureStatus, cuuint64_t *id);
-
-/**
- * \brief Attach memory to a stream asynchronously
- *
- * Enqueues an operation in \p hStream to specify stream association of
- * \p length bytes of memory starting from \p dptr. This function is a
- * stream-ordered operation, meaning that it is dependent on, and will
- * only take effect when, previous work in stream has completed. Any
- * previous association is automatically replaced.
- *
- * \p dptr must point to one of the following types of memories:
- * - managed memory declared using the __managed__ keyword or allocated with
- * ::cuMemAllocManaged.
- * - a valid host-accessible region of system-allocated pageable memory. This
- * type of memory may only be specified if the device associated with the
- * stream reports a non-zero value for the device attribute
- * ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS.
- *
- * For managed allocations, \p length must be either zero or the entire
- * allocation's size. Both indicate that the entire allocation's stream
- * association is being changed. Currently, it is not possible to change stream
- * association for a portion of a managed allocation.
- *
- * For pageable host allocations, \p length must be non-zero.
- *
- * The stream association is specified using \p flags which must be
- * one of ::CUmemAttach_flags.
- * If the ::CU_MEM_ATTACH_GLOBAL flag is specified, the memory can be accessed
- * by any stream on any device.
- * If the ::CU_MEM_ATTACH_HOST flag is specified, the program makes a guarantee
- * that it won't access the memory on the device from any stream on a device that
- * has a zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS.
- * If the ::CU_MEM_ATTACH_SINGLE flag is specified and \p hStream is associated with
- * a device that has a zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS,
- * the program makes a guarantee that it will only access the memory on the device
- * from \p hStream. It is illegal to attach singly to the NULL stream, because the
- * NULL stream is a virtual global stream and not a specific stream. An error will
- * be returned in this case.
- *
- * When memory is associated with a single stream, the Unified Memory system will
- * allow CPU access to this memory region so long as all operations in \p hStream
- * have completed, regardless of whether other streams are active. In effect,
- * this constrains exclusive ownership of the managed memory region by
- * an active GPU to per-stream activity instead of whole-GPU activity.
- *
- * Accessing memory on the device from streams that are not associated with
- * it will produce undefined results. No error checking is performed by the
- * Unified Memory system to ensure that kernels launched into other streams
- * do not access this region.
- *
- * It is a program's responsibility to order calls to ::cuStreamAttachMemAsync
- * via events, synchronization or other means to ensure legal access to memory
- * at all times. Data visibility and coherency will be changed appropriately
- * for all kernels which follow a stream-association change.
- *
- * If \p hStream is destroyed while data is associated with it, the association is
- * removed and the association reverts to the default visibility of the allocation
- * as specified at ::cuMemAllocManaged. For __managed__ variables, the default
- * association is always ::CU_MEM_ATTACH_GLOBAL. Note that destroying a stream is an
- * asynchronous operation, and as a result, the change to default association won't
- * happen until all work in the stream has completed.
- *
- * \param hStream - Stream in which to enqueue the attach operation
- * \param dptr - Pointer to memory (must be a pointer to managed memory or
- * to a valid host-accessible region of system-allocated
- * pageable memory)
- * \param length - Length of memory
- * \param flags - Must be one of ::CUmemAttach_flags
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_NOT_SUPPORTED
- * \note_null_stream
- * \notefnerr
- *
- * \sa ::cuStreamCreate,
- * ::cuStreamQuery,
- * ::cuStreamSynchronize,
- * ::cuStreamWaitEvent,
- * ::cuStreamDestroy,
- * ::cuMemAllocManaged,
- * ::cudaStreamAttachMemAsync
- */
-CUresult CUDAAPI cuStreamAttachMemAsync(CUstream hStream, CUdeviceptr dptr, size_t length, unsigned int flags);
-
-/**
- * \brief Determine status of a compute stream
- *
- * Returns ::CUDA_SUCCESS if all operations in the stream specified by
- * \p hStream have completed, or ::CUDA_ERROR_NOT_READY if not.
- *
- * For the purposes of Unified Memory, a return value of ::CUDA_SUCCESS
- * is equivalent to having called ::cuStreamSynchronize().
- *
- * \param hStream - Stream to query status of
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_NOT_READY
- * \note_null_stream
- * \notefnerr
- *
- * \sa ::cuStreamCreate,
- * ::cuStreamWaitEvent,
- * ::cuStreamDestroy,
- * ::cuStreamSynchronize,
- * ::cuStreamAddCallback,
- * ::cudaStreamQuery
- */
-CUresult CUDAAPI cuStreamQuery(CUstream hStream);
-
-/**
- * \brief Wait until a stream's tasks are completed
- *
- * Waits until the device has completed all operations in the stream specified
- * by \p hStream. If the context was created with the
- * ::CU_CTX_SCHED_BLOCKING_SYNC flag, the CPU thread will block until the
- * stream is finished with all of its tasks.
- *
- * \param hStream - Stream to wait for
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE
-
- * \note_null_stream
- * \notefnerr
- *
- * \sa ::cuStreamCreate,
- * ::cuStreamDestroy,
- * ::cuStreamWaitEvent,
- * ::cuStreamQuery,
- * ::cuStreamAddCallback,
- * ::cudaStreamSynchronize
- */
-CUresult CUDAAPI cuStreamSynchronize(CUstream hStream);
-
-/**
- * \brief Destroys a stream
- *
- * Destroys the stream specified by \p hStream.
- *
- * In case the device is still doing work in the stream \p hStream
- * when ::cuStreamDestroy() is called, the function will return immediately
- * and the resources associated with \p hStream will be released automatically
- * once the device has completed all work in \p hStream.
- *
- * \param hStream - Stream to destroy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa ::cuStreamCreate,
- * ::cuStreamWaitEvent,
- * ::cuStreamQuery,
- * ::cuStreamSynchronize,
- * ::cuStreamAddCallback,
- * ::cudaStreamDestroy
- */
-CUresult CUDAAPI cuStreamDestroy(CUstream hStream);
-
-/**
- * \brief Copies attributes from source stream to destination stream
- *
- * Copies attributes from source stream \p src to destination stream \p dst.
- * Both streams must have the same context.
- *
- * \param[out] dst Destination stream
- * \param[in] src Source stream
- * For list of attributes see ::CUstreamAttrID
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa
- * ::CUaccessPolicyWindow
- */
-CUresult CUDAAPI cuStreamCopyAttributes(CUstream dst, CUstream src);
-
-/**
- * \brief Queries stream attribute.
- *
- * Queries attribute \p attr from \p hStream and stores it in corresponding
- * member of \p value_out.
- *
- * \param[in] hStream
- * \param[in] attr
- * \param[out] value_out
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa
- * ::CUaccessPolicyWindow
- */
-CUresult CUDAAPI cuStreamGetAttribute(CUstream hStream, CUstreamAttrID attr,
- CUstreamAttrValue *value_out);
-
-/**
- * \brief Sets stream attribute.
- *
- * Sets attribute \p attr on \p hStream from corresponding attribute of
- * \p value. The updated attribute will be applied to subsequent work
- * submitted to the stream. It will not affect previously submitted work.
- *
- * \param[out] hStream
- * \param[in] attr
- * \param[in] value
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa
- * ::CUaccessPolicyWindow
- */
-CUresult CUDAAPI cuStreamSetAttribute(CUstream hStream, CUstreamAttrID attr,
- const CUstreamAttrValue *value);
-
-/** @} */ /* END CUDA_STREAM */
-
-
-/**
- * \defgroup CUDA_EVENT Event Management
- *
- * ___MANBRIEF___ event management functions of the low-level CUDA driver API
- * (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the event management functions of the low-level CUDA
- * driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Creates an event
- *
- * Creates an event *phEvent for the current context with the flags specified via
- * \p Flags. Valid flags include:
- * - ::CU_EVENT_DEFAULT: Default event creation flag.
- * - ::CU_EVENT_BLOCKING_SYNC: Specifies that the created event should use blocking
- * synchronization. A CPU thread that uses ::cuEventSynchronize() to wait on
- * an event created with this flag will block until the event has actually
- * been recorded.
- * - ::CU_EVENT_DISABLE_TIMING: Specifies that the created event does not need
- * to record timing data. Events created with this flag specified and
- * the ::CU_EVENT_BLOCKING_SYNC flag not specified will provide the best
- * performance when used with ::cuStreamWaitEvent() and ::cuEventQuery().
- * - ::CU_EVENT_INTERPROCESS: Specifies that the created event may be used as an
- * interprocess event by ::cuIpcGetEventHandle(). ::CU_EVENT_INTERPROCESS must
- * be specified along with ::CU_EVENT_DISABLE_TIMING.
- *
- * \param phEvent - Returns newly created event
- * \param Flags - Event creation flags
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \notefnerr
- *
- * \sa
- * ::cuEventRecord,
- * ::cuEventQuery,
- * ::cuEventSynchronize,
- * ::cuEventDestroy,
- * ::cuEventElapsedTime,
- * ::cudaEventCreate,
- * ::cudaEventCreateWithFlags
- */
-CUresult CUDAAPI cuEventCreate(CUevent *phEvent, unsigned int Flags);
-
-/**
- * \brief Records an event
- *
- * Captures in \p hEvent the contents of \p hStream at the time of this call.
- * \p hEvent and \p hStream must be from the same context.
- * Calls such as ::cuEventQuery() or ::cuStreamWaitEvent() will then
- * examine or wait for completion of the work that was captured. Uses of
- * \p hStream after this call do not modify \p hEvent. See note on default
- * stream behavior for what is captured in the default case.
- *
- * ::cuEventRecord() can be called multiple times on the same event and
- * will overwrite the previously captured state. Other APIs such as
- * ::cuStreamWaitEvent() use the most recently captured state at the time
- * of the API call, and are not affected by later calls to
- * ::cuEventRecord(). Before the first call to ::cuEventRecord(), an
- * event represents an empty set of work, so for example ::cuEventQuery()
- * would return ::CUDA_SUCCESS.
- *
- * \param hEvent - Event to record
- * \param hStream - Stream to record event for
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_null_stream
- * \notefnerr
- *
- * \sa ::cuEventCreate,
- * ::cuEventQuery,
- * ::cuEventSynchronize,
- * ::cuStreamWaitEvent,
- * ::cuEventDestroy,
- * ::cuEventElapsedTime,
- * ::cudaEventRecord
- */
-CUresult CUDAAPI cuEventRecord(CUevent hEvent, CUstream hStream);
-
-/**
- * \brief Queries an event's status
- *
- * Queries the status of all work currently captured by \p hEvent. See
- * ::cuEventRecord() for details on what is captured by an event.
- *
- * Returns ::CUDA_SUCCESS if all captured work has been completed, or
- * ::CUDA_ERROR_NOT_READY if any captured work is incomplete.
- *
- * For the purposes of Unified Memory, a return value of ::CUDA_SUCCESS
- * is equivalent to having called ::cuEventSynchronize().
- *
- * \param hEvent - Event to query
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_NOT_READY
- * \notefnerr
- *
- * \sa ::cuEventCreate,
- * ::cuEventRecord,
- * ::cuEventSynchronize,
- * ::cuEventDestroy,
- * ::cuEventElapsedTime,
- * ::cudaEventQuery
- */
-CUresult CUDAAPI cuEventQuery(CUevent hEvent);
-
-/**
- * \brief Waits for an event to complete
- *
- * Waits until the completion of all work currently captured in \p hEvent.
- * See ::cuEventRecord() for details on what is captured by an event.
- *
- * Waiting for an event that was created with the ::CU_EVENT_BLOCKING_SYNC
- * flag will cause the calling CPU thread to block until the event has
- * been completed by the device. If the ::CU_EVENT_BLOCKING_SYNC flag has
- * not been set, then the CPU thread will busy-wait until the event has
- * been completed by the device.
- *
- * \param hEvent - Event to wait for
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa ::cuEventCreate,
- * ::cuEventRecord,
- * ::cuEventQuery,
- * ::cuEventDestroy,
- * ::cuEventElapsedTime,
- * ::cudaEventSynchronize
- */
-CUresult CUDAAPI cuEventSynchronize(CUevent hEvent);
-
-/**
- * \brief Destroys an event
- *
- * Destroys the event specified by \p hEvent.
- *
- * An event may be destroyed before it is complete (i.e., while
- * ::cuEventQuery() would return ::CUDA_ERROR_NOT_READY). In this case, the
- * call does not block on completion of the event, and any associated
- * resources will automatically be released asynchronously at completion.
- *
- * \param hEvent - Event to destroy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa ::cuEventCreate,
- * ::cuEventRecord,
- * ::cuEventQuery,
- * ::cuEventSynchronize,
- * ::cuEventElapsedTime,
- * ::cudaEventDestroy
- */
-CUresult CUDAAPI cuEventDestroy(CUevent hEvent);
-
-/**
- * \brief Computes the elapsed time between two events
- *
- * Computes the elapsed time between two events (in milliseconds with a
- * resolution of around 0.5 microseconds).
- *
- * If either event was last recorded in a non-NULL stream, the resulting time
- * may be greater than expected (even if both used the same stream handle). This
- * happens because the ::cuEventRecord() operation takes place asynchronously
- * and there is no guarantee that the measured latency is actually just between
- * the two events. Any number of other different stream operations could execute
- * in between the two measured events, thus altering the timing in a significant
- * way.
- *
- * If ::cuEventRecord() has not been called on either event then
- * ::CUDA_ERROR_INVALID_HANDLE is returned. If ::cuEventRecord() has been called
- * on both events but one or both of them has not yet been completed (that is,
- * ::cuEventQuery() would return ::CUDA_ERROR_NOT_READY on at least one of the
- * events), ::CUDA_ERROR_NOT_READY is returned. If either event was created with
- * the ::CU_EVENT_DISABLE_TIMING flag, then this function will return
- * ::CUDA_ERROR_INVALID_HANDLE.
- *
- * \param pMilliseconds - Time between \p hStart and \p hEnd in ms
- * \param hStart - Starting event
- * \param hEnd - Ending event
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_NOT_READY
- * \notefnerr
- *
- * \sa ::cuEventCreate,
- * ::cuEventRecord,
- * ::cuEventQuery,
- * ::cuEventSynchronize,
- * ::cuEventDestroy,
- * ::cudaEventElapsedTime
- */
-CUresult CUDAAPI cuEventElapsedTime(float *pMilliseconds, CUevent hStart, CUevent hEnd);
-
-/** @} */ /* END CUDA_EVENT */
-
-/**
- * \defgroup CUDA_EXTRES_INTEROP External Resource Interoperability
- *
- * ___MANBRIEF___ External resource interoperability functions of the low-level CUDA driver API
- * (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the external resource interoperability functions of the low-level CUDA
- * driver application programming interface.
- *
- * @{
- */
-
- /**
- * \brief Imports an external memory object
- *
- * Imports an externally allocated memory object and returns
- * a handle to that in \p extMem_out.
- *
- * The properties of the handle being imported must be described in
- * \p memHandleDesc. The ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC structure
- * is defined as follows:
- *
- * \code
- typedef struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st {
- CUexternalMemoryHandleType type;
- union {
- int fd;
- struct {
- void *handle;
- const void *name;
- } win32;
- const void *nvSciBufObject;
- } handle;
- unsigned long long size;
- unsigned int flags;
- } CUDA_EXTERNAL_MEMORY_HANDLE_DESC;
- * \endcode
- *
- * where ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type specifies the type
- * of handle being imported. ::CUexternalMemoryHandleType is
- * defined as:
- *
- * \code
- typedef enum CUexternalMemoryHandleType_enum {
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = 1,
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = 2,
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3,
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = 4,
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = 5,
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = 6,
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = 7,
- CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8
- } CUexternalMemoryHandleType;
- * \endcode
- *
- * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD, then
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::fd must be a valid
- * file descriptor referencing a memory object. Ownership of
- * the file descriptor is transferred to the CUDA driver when the
- * handle is imported successfully. Performing any operations on the
- * file descriptor after it is imported results in undefined behavior.
- *
- * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32, then exactly one
- * of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be
- * NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle
- * is not NULL, then it must represent a valid shared NT handle that
- * references a memory object. Ownership of this handle is
- * not transferred to CUDA after the import operation, so the
- * application must release the handle using the appropriate system
- * call. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name
- * is not NULL, then it must point to a NULL-terminated array of
- * UTF-16 characters that refers to a memory object.
- *
- * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT, then
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must
- * be non-NULL and
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name
- * must be NULL. The handle specified must be a globally shared KMT
- * handle. This handle does not hold a reference to the underlying
- * object, and thus will be invalid when all references to the
- * memory object are destroyed.
- *
- * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP, then exactly one
- * of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be
- * NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle
- * is not NULL, then it must represent a valid shared NT handle that
- * is returned by ID3D12Device::CreateSharedHandle when referring to a
- * ID3D12Heap object. This handle holds a reference to the underlying
- * object. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name
- * is not NULL, then it must point to a NULL-terminated array of
- * UTF-16 characters that refers to a ID3D12Heap object.
- *
- * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE, then exactly one
- * of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be
- * NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle
- * is not NULL, then it must represent a valid shared NT handle that
- * is returned by ID3D12Device::CreateSharedHandle when referring to a
- * ID3D12Resource object. This handle holds a reference to the
- * underlying object. If
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name
- * is not NULL, then it must point to a NULL-terminated array of
- * UTF-16 characters that refers to a ID3D12Resource object.
- *
- * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE, then
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must
- * represent a valid shared NT handle that is returned by
- * IDXGIResource1::CreateSharedHandle when referring to a
- * ID3D11Resource object. If
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name
- * is not NULL, then it must point to a NULL-terminated array of
- * UTF-16 characters that refers to a ID3D11Resource object.
- *
- * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT, then
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must
- * represent a valid shared KMT handle that is returned by
- * IDXGIResource::GetSharedHandle when referring to a
- * ID3D11Resource object and
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name
- * must be NULL.
- *
- * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::nvSciBufObject must be non-NULL
- * and reference a valid NvSciBuf object.
- * If the NvSciBuf object imported into CUDA is also mapped by other drivers, then the
- * application must use ::cuWaitExternalSemaphoresAsync or ::cuSignalExternalSemaphoresAsync
- * as appropriate barriers to maintain coherence between CUDA and the other drivers.
- *
- * The size of the memory object must be specified in
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::size.
- *
- * Specifying the flag ::CUDA_EXTERNAL_MEMORY_DEDICATED in
- * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::flags indicates that the
- * resource is a dedicated resource. The definition of what a
- * dedicated resource is outside the scope of this extension.
- * This flag must be set if ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type
- * is one of the following:
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT
- *
- * \param extMem_out - Returned handle to an external memory object
- * \param memHandleDesc - Memory import handle descriptor
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \note If the Vulkan memory imported into CUDA is mapped on the CPU then the
- * application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges
- * as well as appropriate Vulkan pipeline barriers to maintain coherence between
- * CPU and GPU. For more information on these APIs, please refer to "Synchronization
- * and Cache Control" chapter from Vulkan specification.
- *
- * \sa ::cuDestroyExternalMemory,
- * ::cuExternalMemoryGetMappedBuffer,
- * ::cuExternalMemoryGetMappedMipmappedArray
- */
-CUresult CUDAAPI cuImportExternalMemory(CUexternalMemory *extMem_out, const CUDA_EXTERNAL_MEMORY_HANDLE_DESC *memHandleDesc);
-
-/**
- * \brief Maps a buffer onto an imported memory object
- *
- * Maps a buffer onto an imported memory object and returns a device
- * pointer in \p devPtr.
- *
- * The properties of the buffer being mapped must be described in
- * \p bufferDesc. The ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC structure is
- * defined as follows:
- *
- * \code
- typedef struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st {
- unsigned long long offset;
- unsigned long long size;
- unsigned int flags;
- } CUDA_EXTERNAL_MEMORY_BUFFER_DESC;
- * \endcode
- *
- * where ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::offset is the offset in
- * the memory object where the buffer's base address is.
- * ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::size is the size of the buffer.
- * ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::flags must be zero.
- *
- * The offset and size have to be suitably aligned to match the
- * requirements of the external API. Mapping two buffers whose ranges
- * overlap may or may not result in the same virtual address being
- * returned for the overlapped portion. In such cases, the application
- * must ensure that all accesses to that region from the GPU are
- * volatile. Otherwise writes made via one address are not guaranteed
- * to be visible via the other address, even if they're issued by the
- * same thread. It is recommended that applications map the combined
- * range instead of mapping separate buffers and then apply the
- * appropriate offsets to the returned pointer to derive the
- * individual buffers.
- *
- * The returned pointer \p devPtr must be freed using ::cuMemFree.
- *
- * \param devPtr - Returned device pointer to buffer
- * \param extMem - Handle to external memory object
- * \param bufferDesc - Buffer descriptor
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa ::cuImportExternalMemory
- * ::cuDestroyExternalMemory,
- * ::cuExternalMemoryGetMappedMipmappedArray
- */
-CUresult CUDAAPI cuExternalMemoryGetMappedBuffer(CUdeviceptr *devPtr, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_BUFFER_DESC *bufferDesc);
-
-/**
- * \brief Maps a CUDA mipmapped array onto an external memory object
- *
- * Maps a CUDA mipmapped array onto an external object and returns a
- * handle to it in \p mipmap.
- *
- * The properties of the CUDA mipmapped array being mapped must be
- * described in \p mipmapDesc. The structure
- * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC is defined as follows:
- *
- * \code
- typedef struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st {
- unsigned long long offset;
- CUDA_ARRAY3D_DESCRIPTOR arrayDesc;
- unsigned int numLevels;
- } CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC;
- * \endcode
- *
- * where ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::offset is the
- * offset in the memory object where the base level of the mipmap
- * chain is.
- * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc describes
- * the format, dimensions and type of the base level of the mipmap
- * chain. For further details on these parameters, please refer to the
- * documentation for ::cuMipmappedArrayCreate. Note that if the mipmapped
- * array is bound as a color target in the graphics API, then the flag
- * ::CUDA_ARRAY3D_COLOR_ATTACHMENT must be specified in
- * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc::Flags.
- * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::numLevels specifies
- * the total number of levels in the mipmap chain.
- *
- * If \p extMem was imported from a handle of type ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then
- * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::numLevels must be equal to 1.
- *
- * The returned CUDA mipmapped array must be freed using ::cuMipmappedArrayDestroy.
- *
- * \param mipmap - Returned CUDA mipmapped array
- * \param extMem - Handle to external memory object
- * \param mipmapDesc - CUDA array descriptor
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa ::cuImportExternalMemory
- * ::cuDestroyExternalMemory,
- * ::cuExternalMemoryGetMappedBuffer
- */
-CUresult CUDAAPI cuExternalMemoryGetMappedMipmappedArray(CUmipmappedArray *mipmap, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC *mipmapDesc);
-
-/**
- * \brief Destroys an external memory object.
- *
- * Destroys the specified external memory object. Any existing buffers
- * and CUDA mipmapped arrays mapped onto this object must no longer be
- * used and must be explicitly freed using ::cuMemFree and
- * ::cuMipmappedArrayDestroy respectively.
- *
- * \param extMem - External memory object to be destroyed
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa ::cuImportExternalMemory
- * ::cuExternalMemoryGetMappedBuffer,
- * ::cuExternalMemoryGetMappedMipmappedArray
- */
-CUresult CUDAAPI cuDestroyExternalMemory(CUexternalMemory extMem);
-
-/**
- * \brief Imports an external semaphore
- *
- * Imports an externally allocated synchronization object and returns
- * a handle to that in \p extSem_out.
- *
- * The properties of the handle being imported must be described in
- * \p semHandleDesc. The ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC is
- * defined as follows:
- *
- * \code
- typedef struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st {
- CUexternalSemaphoreHandleType type;
- union {
- int fd;
- struct {
- void *handle;
- const void *name;
- } win32;
- const void* NvSciSyncObj;
- } handle;
- unsigned int flags;
- } CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC;
- * \endcode
- *
- * where ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type specifies the type of
- * handle being imported. ::CUexternalSemaphoreHandleType is defined
- * as:
- *
- * \code
- typedef enum CUexternalSemaphoreHandleType_enum {
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = 1,
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = 2,
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3,
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = 4,
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = 5,
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = 6,
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = 7,
- CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = 8
- } CUexternalSemaphoreHandleType;
- * \endcode
- *
- * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, then
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must be a valid
- * file descriptor referencing a synchronization object. Ownership of
- * the file descriptor is transferred to the CUDA driver when the
- * handle is imported successfully. Performing any operations on the
- * file descriptor after it is imported results in undefined behavior.
- *
- * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, then exactly one
- * of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be
- * NULL. If
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle
- * is not NULL, then it must represent a valid shared NT handle that
- * references a synchronization object. Ownership of this handle is
- * not transferred to CUDA after the import operation, so the
- * application must release the handle using the appropriate system
- * call. If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name
- * is not NULL, then it must name a valid synchronization object.
- *
- * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT, then
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle must
- * be non-NULL and
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name
- * must be NULL. The handle specified must be a globally shared KMT
- * handle. This handle does not hold a reference to the underlying
- * object, and thus will be invalid when all references to the
- * synchronization object are destroyed.
- *
- * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, then exactly one
- * of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be
- * NULL. If
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle
- * is not NULL, then it must represent a valid shared NT handle that
- * is returned by ID3D12Device::CreateSharedHandle when referring to a
- * ID3D12Fence object. This handle holds a reference to the underlying
- * object. If
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name
- * is not NULL, then it must name a valid synchronization object that
- * refers to a valid ID3D12Fence object.
- *
- * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, then
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle
- * represents a valid shared NT handle that is returned by
- * ID3D11Fence::CreateSharedHandle. If
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name
- * is not NULL, then it must name a valid synchronization object that
- * refers to a valid ID3D11Fence object.
- *
- * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, then
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::nvSciSyncObj
- * represents a valid NvSciSyncObj.
- *
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, then
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle
- * represents a valid shared NT handle that
- * is returned by IDXGIResource1::CreateSharedHandle when referring to
- * a IDXGIKeyedMutex object. If
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name
- * is not NULL, then it must name a valid synchronization object that
- * refers to a valid IDXGIKeyedMutex object.
- *
- * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT, then
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle
- * represents a valid shared KMT handle that
- * is returned by IDXGIResource::GetSharedHandle when referring to
- * a IDXGIKeyedMutex object and
- * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must be NULL.
- *
- * \param extSem_out - Returned handle to an external semaphore
- * \param semHandleDesc - Semaphore import handle descriptor
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_NOT_SUPPORTED,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa ::cuDestroyExternalSemaphore,
- * ::cuSignalExternalSemaphoresAsync,
- * ::cuWaitExternalSemaphoresAsync
- */
-CUresult CUDAAPI cuImportExternalSemaphore(CUexternalSemaphore *extSem_out, const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC *semHandleDesc);
-
-/**
- * \brief Signals a set of external semaphore objects
- *
- * Enqueues a signal operation on a set of externally allocated
- * semaphore object in the specified stream. The operations will be
- * executed when all prior operations in the stream complete.
- *
- * The exact semantics of signaling a semaphore depends on the type of
- * the object.
- *
- * If the semaphore object is any one of the following types:
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD,
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32,
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT
- * then signaling the semaphore will set it to the signaled state.
- *
- * If the semaphore object is any one of the following types:
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE,
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE
- * then the semaphore will be set to the value specified in
- * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::fence::value.
- *
- * If the semaphore object is of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC
- * this API sets ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence
- * to a value that can be used by subsequent waiters of the same NvSciSync object
- * to order operations with those currently submitted in \p stream. Such an update
- * will overwrite previous contents of
- * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence. By default,
- * signaling such an external semaphore object causes appropriate memory synchronization
- * operations to be performed over all external memory objects that are imported as
- * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that any subsequent accesses
- * made by other importers of the same set of NvSciBuf memory object(s) are coherent.
- * These operations can be skipped by specifying the flag
- * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC, which can be used as a
- * performance optimization when data coherency is not required. But specifying this
- * flag in scenarios where data coherency is required results in undefined behavior.
- * Also, for semaphore object of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC,
- * if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in
- * ::cuDeviceGetNvSciSyncAttributes to CUDA_NVSCISYNC_ATTR_SIGNAL, this API will return
- * CUDA_ERROR_NOT_SUPPORTED.
- *
- * If the semaphore object is any one of the following types:
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX,
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT
- * then the keyed mutex will be released with the key specified in
- * ::CUDA_EXTERNAL_SEMAPHORE_PARAMS::params::keyedmutex::key.
- *
- * \param extSemArray - Set of external semaphores to be signaled
- * \param paramsArray - Array of semaphore parameters
- * \param numExtSems - Number of semaphores to signal
- * \param stream - Stream to enqueue the signal operations in
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_NOT_SUPPORTED
- * \notefnerr
- *
- * \sa ::cuImportExternalSemaphore,
- * ::cuDestroyExternalSemaphore,
- * ::cuWaitExternalSemaphoresAsync
- */
-CUresult CUDAAPI cuSignalExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream);
-
-/**
- * \brief Waits on a set of external semaphore objects
- *
- * Enqueues a wait operation on a set of externally allocated
- * semaphore object in the specified stream. The operations will be
- * executed when all prior operations in the stream complete.
- *
- * The exact semantics of waiting on a semaphore depends on the type
- * of the object.
- *
- * If the semaphore object is any one of the following types:
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD,
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32,
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT
- * then waiting on the semaphore will wait until the semaphore reaches
- * the signaled state. The semaphore will then be reset to the
- * unsignaled state. Therefore for every signal operation, there can
- * only be one wait operation.
- *
- * If the semaphore object is any one of the following types:
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE,
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE
- * then waiting on the semaphore will wait until the value of the
- * semaphore is greater than or equal to
- * ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::fence::value.
- *
- * If the semaphore object is of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC
- * then, waiting on the semaphore will wait until the
- * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence is signaled by the
- * signaler of the NvSciSyncObj that was associated with this semaphore object.
- * By default, waiting on such an external semaphore object causes appropriate
- * memory synchronization operations to be performed over all external memory objects
- * that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that
- * any subsequent accesses made by other importers of the same set of NvSciBuf memory
- * object(s) are coherent. These operations can be skipped by specifying the flag
- * ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC, which can be used as a
- * performance optimization when data coherency is not required. But specifying this
- * flag in scenarios where data coherency is required results in undefined behavior.
- * Also, for semaphore object of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC,
- * if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in
- * ::cuDeviceGetNvSciSyncAttributes to CUDA_NVSCISYNC_ATTR_WAIT, this API will return
- * CUDA_ERROR_NOT_SUPPORTED.
- *
- * If the semaphore object is any one of the following types:
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX,
- * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT
- * then the keyed mutex will be acquired when it is released with the key
- * specified in ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::key
- * or until the timeout specified by
- * ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::timeoutMs
- * has lapsed. The timeout interval can either be a finite value
- * specified in milliseconds or an infinite value. In case an infinite
- * value is specified the timeout never elapses. The windows INFINITE
- * macro must be used to specify infinite timeout.
- *
- * \param extSemArray - External semaphores to be waited on
- * \param paramsArray - Array of semaphore parameters
- * \param numExtSems - Number of semaphores to wait on
- * \param stream - Stream to enqueue the wait operations in
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_NOT_SUPPORTED,
- * ::CUDA_ERROR_TIMEOUT
- * \notefnerr
- *
- * \sa ::cuImportExternalSemaphore,
- * ::cuDestroyExternalSemaphore,
- * ::cuSignalExternalSemaphoresAsync
- */
-CUresult CUDAAPI cuWaitExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream);
-
-/**
- * \brief Destroys an external semaphore
- *
- * Destroys an external semaphore object and releases any references
- * to the underlying resource. Any outstanding signals or waits must
- * have completed before the semaphore is destroyed.
- *
- * \param extSem - External semaphore to be destroyed
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa ::cuImportExternalSemaphore,
- * ::cuSignalExternalSemaphoresAsync,
- * ::cuWaitExternalSemaphoresAsync
- */
-CUresult CUDAAPI cuDestroyExternalSemaphore(CUexternalSemaphore extSem);
-
-/** @} */ /* END CUDA_EXTRES_INTEROP */
-
-/**
- * \defgroup CUDA_MEMOP Stream memory operations
- *
- * ___MANBRIEF___ Stream memory operations of the low-level CUDA driver API
- * (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the stream memory operations of the low-level CUDA
- * driver application programming interface.
- *
- * The whole set of operations is disabled by default. Users are required
- * to explicitly enable them, e.g. on Linux by passing the kernel module
- * parameter shown below:
- * modprobe nvidia NVreg_EnableStreamMemOPs=1
- * There is currently no way to enable these operations on other operating
- * systems.
- *
- * Users can programmatically query whether the device supports these
- * operations with ::cuDeviceGetAttribute() and
- * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS.
- *
- * Support for the ::CU_STREAM_WAIT_VALUE_NOR flag can be queried with
- * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR.
- *
- * Support for the ::cuStreamWriteValue64() and ::cuStreamWaitValue64()
- * functions, as well as for the ::CU_STREAM_MEM_OP_WAIT_VALUE_64 and
- * ::CU_STREAM_MEM_OP_WRITE_VALUE_64 flags, can be queried with
- * ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS.
- *
- * Support for both ::CU_STREAM_WAIT_VALUE_FLUSH and
- * ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES requires dedicated platform
- * hardware features and can be queried with ::cuDeviceGetAttribute() and
- * ::CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES.
- *
- * Note that all memory pointers passed as parameters to these operations
- * are device pointers. Where necessary a device pointer should be
- * obtained, for example with ::cuMemHostGetDevicePointer().
- *
- * None of the operations accepts pointers to managed memory buffers
- * (::cuMemAllocManaged).
- *
- * @{
- */
-
-/**
- * \brief Wait on a memory location
- *
- * Enqueues a synchronization of the stream on the given memory location. Work
- * ordered after the operation will block until the given condition on the
- * memory is satisfied. By default, the condition is to wait for
- * (int32_t)(*addr - value) >= 0, a cyclic greater-or-equal.
- * Other condition types can be specified via \p flags.
- *
- * If the memory was registered via ::cuMemHostRegister(), the device pointer
- * should be obtained with ::cuMemHostGetDevicePointer(). This function cannot
- * be used with managed memory (::cuMemAllocManaged).
- *
- * Support for this can be queried with ::cuDeviceGetAttribute() and
- * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS.
- *
- * Support for CU_STREAM_WAIT_VALUE_NOR can be queried with ::cuDeviceGetAttribute() and
- * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR.
- *
- * \param stream The stream to synchronize on the memory location.
- * \param addr The memory location to wait on.
- * \param value The value to compare with the memory location.
- * \param flags See ::CUstreamWaitValue_flags.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_NOT_SUPPORTED
- * \notefnerr
- *
- * \sa ::cuStreamWaitValue64,
- * ::cuStreamWriteValue32,
- * ::cuStreamWriteValue64
- * ::cuStreamBatchMemOp,
- * ::cuMemHostRegister,
- * ::cuStreamWaitEvent
- */
-CUresult CUDAAPI cuStreamWaitValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags);
-
-/**
- * \brief Wait on a memory location
- *
- * Enqueues a synchronization of the stream on the given memory location. Work
- * ordered after the operation will block until the given condition on the
- * memory is satisfied. By default, the condition is to wait for
- * (int64_t)(*addr - value) >= 0, a cyclic greater-or-equal.
- * Other condition types can be specified via \p flags.
- *
- * If the memory was registered via ::cuMemHostRegister(), the device pointer
- * should be obtained with ::cuMemHostGetDevicePointer().
- *
- * Support for this can be queried with ::cuDeviceGetAttribute() and
- * ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS.
- *
- * \param stream The stream to synchronize on the memory location.
- * \param addr The memory location to wait on.
- * \param value The value to compare with the memory location.
- * \param flags See ::CUstreamWaitValue_flags.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_NOT_SUPPORTED
- * \notefnerr
- *
- * \sa ::cuStreamWaitValue32,
- * ::cuStreamWriteValue32,
- * ::cuStreamWriteValue64,
- * ::cuStreamBatchMemOp,
- * ::cuMemHostRegister,
- * ::cuStreamWaitEvent
- */
-CUresult CUDAAPI cuStreamWaitValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags);
-
-/**
- * \brief Write a value to memory
- *
- * Write a value to memory. Unless the ::CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER
- * flag is passed, the write is preceded by a system-wide memory fence,
- * equivalent to a __threadfence_system() but scoped to the stream
- * rather than a CUDA thread.
- *
- * If the memory was registered via ::cuMemHostRegister(), the device pointer
- * should be obtained with ::cuMemHostGetDevicePointer(). This function cannot
- * be used with managed memory (::cuMemAllocManaged).
- *
- * Support for this can be queried with ::cuDeviceGetAttribute() and
- * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS.
- *
- * \param stream The stream to do the write in.
- * \param addr The device address to write to.
- * \param value The value to write.
- * \param flags See ::CUstreamWriteValue_flags.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_NOT_SUPPORTED
- * \notefnerr
- *
- * \sa ::cuStreamWriteValue64,
- * ::cuStreamWaitValue32,
- * ::cuStreamWaitValue64,
- * ::cuStreamBatchMemOp,
- * ::cuMemHostRegister,
- * ::cuEventRecord
- */
-CUresult CUDAAPI cuStreamWriteValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags);
-
-/**
- * \brief Write a value to memory
- *
- * Write a value to memory. Unless the ::CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER
- * flag is passed, the write is preceded by a system-wide memory fence,
- * equivalent to a __threadfence_system() but scoped to the stream
- * rather than a CUDA thread.
- *
- * If the memory was registered via ::cuMemHostRegister(), the device pointer
- * should be obtained with ::cuMemHostGetDevicePointer().
- *
- * Support for this can be queried with ::cuDeviceGetAttribute() and
- * ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS.
- *
- * \param stream The stream to do the write in.
- * \param addr The device address to write to.
- * \param value The value to write.
- * \param flags See ::CUstreamWriteValue_flags.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_NOT_SUPPORTED
- * \notefnerr
- *
- * \sa ::cuStreamWriteValue32,
- * ::cuStreamWaitValue32,
- * ::cuStreamWaitValue64,
- * ::cuStreamBatchMemOp,
- * ::cuMemHostRegister,
- * ::cuEventRecord
- */
-CUresult CUDAAPI cuStreamWriteValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags);
-
-/**
- * \brief Batch operations to synchronize the stream via memory operations
- *
- * This is a batch version of ::cuStreamWaitValue32() and ::cuStreamWriteValue32().
- * Batching operations may avoid some performance overhead in both the API call
- * and the device execution versus adding them to the stream in separate API
- * calls. The operations are enqueued in the order they appear in the array.
- *
- * See ::CUstreamBatchMemOpType for the full set of supported operations, and
- * ::cuStreamWaitValue32(), ::cuStreamWaitValue64(), ::cuStreamWriteValue32(),
- * and ::cuStreamWriteValue64() for details of specific operations.
- *
- * Basic support for this can be queried with ::cuDeviceGetAttribute() and
- * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS. See related APIs for details
- * on querying support for specific operations.
- *
- * \param stream The stream to enqueue the operations in.
- * \param count The number of operations in the array. Must be less than 256.
- * \param paramArray The types and parameters of the individual operations.
- * \param flags Reserved for future expansion; must be 0.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_NOT_SUPPORTED
- * \notefnerr
- *
- * \sa ::cuStreamWaitValue32,
- * ::cuStreamWaitValue64,
- * ::cuStreamWriteValue32,
- * ::cuStreamWriteValue64,
- * ::cuMemHostRegister
- */
-CUresult CUDAAPI cuStreamBatchMemOp(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags);
-
-/** @} */ /* END CUDA_MEMOP */
-
-/**
- * \defgroup CUDA_EXEC Execution Control
- *
- * ___MANBRIEF___ execution control functions of the low-level CUDA driver API
- * (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the execution control functions of the low-level CUDA
- * driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Returns information about a function
- *
- * Returns in \p *pi the integer value of the attribute \p attrib on the kernel
- * given by \p hfunc. The supported attributes are:
- * - ::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum number of threads
- * per block, beyond which a launch of the function would fail. This number
- * depends on both the function and the device on which the function is
- * currently loaded.
- * - ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of
- * statically-allocated shared memory per block required by this function.
- * This does not include dynamically-allocated shared memory requested by
- * the user at runtime.
- * - ::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of user-allocated
- * constant memory required by this function.
- * - ::CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of local memory
- * used by each thread of this function.
- * - ::CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used by each thread
- * of this function.
- * - ::CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual architecture version for
- * which the function was compiled. This value is the major PTX version * 10
- * + the minor PTX version, so a PTX version 1.3 function would return the
- * value 13. Note that this may return the undefined value of 0 for cubins
- * compiled prior to CUDA 3.0.
- * - ::CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture version for
- * which the function was compiled. This value is the major binary
- * version * 10 + the minor binary version, so a binary version 1.3 function
- * would return the value 13. Note that this will return a value of 10 for
- * legacy cubins that do not have a properly-encoded binary architecture
- * version.
- * - ::CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether the function has
- * been compiled with user specified option "-Xptxas --dlcm=ca" set .
- * - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The maximum size in bytes of
- * dynamically-allocated shared memory.
- * - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: Preferred shared memory-L1
- * cache split ratio in percent of total shared memory.
- *
- * \param pi - Returned attribute value
- * \param attrib - Attribute requested
- * \param hfunc - Function to query attribute of
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuCtxGetCacheConfig,
- * ::cuCtxSetCacheConfig,
- * ::cuFuncSetCacheConfig,
- * ::cuLaunchKernel,
- * ::cudaFuncGetAttributes
- * ::cudaFuncSetAttribute
- */
-CUresult CUDAAPI cuFuncGetAttribute(int *pi, CUfunction_attribute attrib, CUfunction hfunc);
-
-/**
- * \brief Sets information about a function
- *
- * This call sets the value of a specified attribute \p attrib on the kernel given
- * by \p hfunc to an integer value specified by \p val
- * This function returns CUDA_SUCCESS if the new value of the attribute could be
- * successfully set. If the set fails, this call will return an error.
- * Not all attributes can have values set. Attempting to set a value on a read-only
- * attribute will result in an error (CUDA_ERROR_INVALID_VALUE)
- *
- * Supported attributes for the cuFuncSetAttribute call are:
- * - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: This maximum size in bytes of
- * dynamically-allocated shared memory. The value should contain the requested
- * maximum size of dynamically-allocated shared memory. The sum of this value and
- * the function attribute ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES cannot exceed the
- * device attribute ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN.
- * The maximal size of requestable dynamic shared memory may differ by GPU
- * architecture.
- * - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: On devices where the L1
- * cache and shared memory use the same hardware resources, this sets the shared memory
- * carveout preference, in percent of the total shared memory.
- * See ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR
- * This is only a hint, and the driver can choose a different ratio if required to execute the function.
- *
- * \param hfunc - Function to query attribute of
- * \param attrib - Attribute requested
- * \param value - The value to set
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuCtxGetCacheConfig,
- * ::cuCtxSetCacheConfig,
- * ::cuFuncSetCacheConfig,
- * ::cuLaunchKernel,
- * ::cudaFuncGetAttributes
- * ::cudaFuncSetAttribute
- */
-CUresult CUDAAPI cuFuncSetAttribute(CUfunction hfunc, CUfunction_attribute attrib, int value);
-
-/**
- * \brief Sets the preferred cache configuration for a device function
- *
- * On devices where the L1 cache and shared memory use the same hardware
- * resources, this sets through \p config the preferred cache configuration for
- * the device function \p hfunc. This is only a preference. The driver will use
- * the requested configuration if possible, but it is free to choose a different
- * configuration if required to execute \p hfunc. Any context-wide preference
- * set via ::cuCtxSetCacheConfig() will be overridden by this per-function
- * setting unless the per-function setting is ::CU_FUNC_CACHE_PREFER_NONE. In
- * that case, the current context-wide setting will be used.
- *
- * This setting does nothing on devices where the size of the L1 cache and
- * shared memory are fixed.
- *
- * Launching a kernel with a different preference than the most recent
- * preference setting may insert a device-side synchronization point.
- *
- *
- * The supported cache configurations are:
- * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default)
- * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache
- * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory
- * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory
- *
- * \param hfunc - Kernel to configure cache for
- * \param config - Requested cache configuration
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT
- * \notefnerr
- *
- * \sa ::cuCtxGetCacheConfig,
- * ::cuCtxSetCacheConfig,
- * ::cuFuncGetAttribute,
- * ::cuLaunchKernel,
- * ::cudaFuncSetCacheConfig
- */
-CUresult CUDAAPI cuFuncSetCacheConfig(CUfunction hfunc, CUfunc_cache config);
-
-/**
- * \brief Sets the shared memory configuration for a device function.
- *
- * On devices with configurable shared memory banks, this function will
- * force all subsequent launches of the specified device function to have
- * the given shared memory bank size configuration. On any given launch of the
- * function, the shared memory configuration of the device will be temporarily
- * changed if needed to suit the function's preferred configuration. Changes in
- * shared memory configuration between subsequent launches of functions,
- * may introduce a device side synchronization point.
- *
- * Any per-function setting of shared memory bank size set via
- * ::cuFuncSetSharedMemConfig will override the context wide setting set with
- * ::cuCtxSetSharedMemConfig.
- *
- * Changing the shared memory bank size will not increase shared memory usage
- * or affect occupancy of kernels, but may have major effects on performance.
- * Larger bank sizes will allow for greater potential bandwidth to shared memory,
- * but will change what kinds of accesses to shared memory will result in bank
- * conflicts.
- *
- * This function will do nothing on devices with fixed shared memory bank size.
- *
- * The supported bank configurations are:
- * - ::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: use the context's shared memory
- * configuration when launching this function.
- * - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: set shared memory bank width to
- * be natively four bytes when launching this function.
- * - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: set shared memory bank width to
- * be natively eight bytes when launching this function.
- *
- * \param hfunc - kernel to be given a shared memory config
- * \param config - requested shared memory configuration
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT
- * \notefnerr
- *
- * \sa ::cuCtxGetCacheConfig,
- * ::cuCtxSetCacheConfig,
- * ::cuCtxGetSharedMemConfig,
- * ::cuCtxSetSharedMemConfig,
- * ::cuFuncGetAttribute,
- * ::cuLaunchKernel,
- * ::cudaFuncSetSharedMemConfig
- */
-CUresult CUDAAPI cuFuncSetSharedMemConfig(CUfunction hfunc, CUsharedconfig config);
-
-/**
- * \brief Launches a CUDA function
- *
- * Invokes the kernel \p f on a \p gridDimX x \p gridDimY x \p gridDimZ
- * grid of blocks. Each block contains \p blockDimX x \p blockDimY x
- * \p blockDimZ threads.
- *
- * \p sharedMemBytes sets the amount of dynamic shared memory that will be
- * available to each thread block.
- *
- * Kernel parameters to \p f can be specified in one of two ways:
- *
- * 1) Kernel parameters can be specified via \p kernelParams. If \p f
- * has N parameters, then \p kernelParams needs to be an array of N
- * pointers. Each of \p kernelParams[0] through \p kernelParams[N-1]
- * must point to a region of memory from which the actual kernel
- * parameter will be copied. The number of kernel parameters and their
- * offsets and sizes do not need to be specified as that information is
- * retrieved directly from the kernel's image.
- *
- * 2) Kernel parameters can also be packaged by the application into
- * a single buffer that is passed in via the \p extra parameter.
- * This places the burden on the application of knowing each kernel
- * parameter's size and alignment/padding within the buffer. Here is
- * an example of using the \p extra parameter in this manner:
- * \code
- size_t argBufferSize;
- char argBuffer[256];
-
- // populate argBuffer and argBufferSize
-
- void *config[] = {
- CU_LAUNCH_PARAM_BUFFER_POINTER, argBuffer,
- CU_LAUNCH_PARAM_BUFFER_SIZE, &argBufferSize,
- CU_LAUNCH_PARAM_END
- };
- status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config);
- * \endcode
- *
- * The \p extra parameter exists to allow ::cuLaunchKernel to take
- * additional less commonly used arguments. \p extra specifies a list of
- * names of extra settings and their corresponding values. Each extra
- * setting name is immediately followed by the corresponding value. The
- * list must be terminated with either NULL or ::CU_LAUNCH_PARAM_END.
- *
- * - ::CU_LAUNCH_PARAM_END, which indicates the end of the \p extra
- * array;
- * - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next
- * value in \p extra will be a pointer to a buffer containing all
- * the kernel parameters for launching kernel \p f;
- * - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next
- * value in \p extra will be a pointer to a size_t containing the
- * size of the buffer specified with ::CU_LAUNCH_PARAM_BUFFER_POINTER;
- *
- * The error ::CUDA_ERROR_INVALID_VALUE will be returned if kernel
- * parameters are specified with both \p kernelParams and \p extra
- * (i.e. both \p kernelParams and \p extra are non-NULL).
- *
- * Calling ::cuLaunchKernel() invalidates the persistent function state
- * set through the following deprecated APIs:
- * ::cuFuncSetBlockShape(),
- * ::cuFuncSetSharedSize(),
- * ::cuParamSetSize(),
- * ::cuParamSeti(),
- * ::cuParamSetf(),
- * ::cuParamSetv().
- *
- * Note that to use ::cuLaunchKernel(), the kernel \p f must either have
- * been compiled with toolchain version 3.2 or later so that it will
- * contain kernel parameter information, or have no kernel parameters.
- * If either of these conditions is not met, then ::cuLaunchKernel() will
- * return ::CUDA_ERROR_INVALID_IMAGE.
- *
- * \param f - Kernel to launch
- * \param gridDimX - Width of grid in blocks
- * \param gridDimY - Height of grid in blocks
- * \param gridDimZ - Depth of grid in blocks
- * \param blockDimX - X dimension of each thread block
- * \param blockDimY - Y dimension of each thread block
- * \param blockDimZ - Z dimension of each thread block
- * \param sharedMemBytes - Dynamic shared-memory size per thread block in bytes
- * \param hStream - Stream identifier
- * \param kernelParams - Array of pointers to kernel parameters
- * \param extra - Extra options
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_IMAGE,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_LAUNCH_FAILED,
- * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,
- * ::CUDA_ERROR_LAUNCH_TIMEOUT,
- * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,
- * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED
- * \note_null_stream
- * \notefnerr
- *
- * \sa ::cuCtxGetCacheConfig,
- * ::cuCtxSetCacheConfig,
- * ::cuFuncSetCacheConfig,
- * ::cuFuncGetAttribute,
- * ::cudaLaunchKernel
- */
-CUresult CUDAAPI cuLaunchKernel(CUfunction f,
- unsigned int gridDimX,
- unsigned int gridDimY,
- unsigned int gridDimZ,
- unsigned int blockDimX,
- unsigned int blockDimY,
- unsigned int blockDimZ,
- unsigned int sharedMemBytes,
- CUstream hStream,
- void **kernelParams,
- void **extra);
-
-/**
- * \brief Launches a CUDA function where thread blocks can cooperate and synchronize as they execute
- *
- * Invokes the kernel \p f on a \p gridDimX x \p gridDimY x \p gridDimZ
- * grid of blocks. Each block contains \p blockDimX x \p blockDimY x
- * \p blockDimZ threads.
- *
- * \p sharedMemBytes sets the amount of dynamic shared memory that will be
- * available to each thread block.
- *
- * The device on which this kernel is invoked must have a non-zero value for
- * the device attribute ::CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH.
- *
- * The total number of blocks launched cannot exceed the maximum number of blocks per
- * multiprocessor as returned by ::cuOccupancyMaxActiveBlocksPerMultiprocessor (or
- * ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors
- * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT.
- *
- * The kernel cannot make use of CUDA dynamic parallelism.
- *
- * Kernel parameters must be specified via \p kernelParams. If \p f
- * has N parameters, then \p kernelParams needs to be an array of N
- * pointers. Each of \p kernelParams[0] through \p kernelParams[N-1]
- * must point to a region of memory from which the actual kernel
- * parameter will be copied. The number of kernel parameters and their
- * offsets and sizes do not need to be specified as that information is
- * retrieved directly from the kernel's image.
- *
- * Calling ::cuLaunchCooperativeKernel() sets persistent function state that is
- * the same as function state set through ::cuLaunchKernel API
- *
- * When the kernel \p f is launched via ::cuLaunchCooperativeKernel(), the previous
- * block shape, shared size and parameter info associated with \p f
- * is overwritten.
- *
- * Note that to use ::cuLaunchCooperativeKernel(), the kernel \p f must either have
- * been compiled with toolchain version 3.2 or later so that it will
- * contain kernel parameter information, or have no kernel parameters.
- * If either of these conditions is not met, then ::cuLaunchCooperativeKernel() will
- * return ::CUDA_ERROR_INVALID_IMAGE.
- *
- * \param f - Kernel to launch
- * \param gridDimX - Width of grid in blocks
- * \param gridDimY - Height of grid in blocks
- * \param gridDimZ - Depth of grid in blocks
- * \param blockDimX - X dimension of each thread block
- * \param blockDimY - Y dimension of each thread block
- * \param blockDimZ - Z dimension of each thread block
- * \param sharedMemBytes - Dynamic shared-memory size per thread block in bytes
- * \param hStream - Stream identifier
- * \param kernelParams - Array of pointers to kernel parameters
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_IMAGE,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_LAUNCH_FAILED,
- * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,
- * ::CUDA_ERROR_LAUNCH_TIMEOUT,
- * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,
- * ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE,
- * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED
- * \note_null_stream
- * \notefnerr
- *
- * \sa ::cuCtxGetCacheConfig,
- * ::cuCtxSetCacheConfig,
- * ::cuFuncSetCacheConfig,
- * ::cuFuncGetAttribute,
- * ::cuLaunchCooperativeKernelMultiDevice,
- * ::cudaLaunchCooperativeKernel
- */
-CUresult CUDAAPI cuLaunchCooperativeKernel(CUfunction f,
- unsigned int gridDimX,
- unsigned int gridDimY,
- unsigned int gridDimZ,
- unsigned int blockDimX,
- unsigned int blockDimY,
- unsigned int blockDimZ,
- unsigned int sharedMemBytes,
- CUstream hStream,
- void **kernelParams);
-
-/**
- * \brief Launches CUDA functions on multiple devices where thread blocks can cooperate and synchronize as they execute
- *
- * Invokes kernels as specified in the \p launchParamsList array where each element
- * of the array specifies all the parameters required to perform a single kernel launch.
- * These kernels can cooperate and synchronize as they execute. The size of the array is
- * specified by \p numDevices.
- *
- * No two kernels can be launched on the same device. All the devices targeted by this
- * multi-device launch must be identical. All devices must have a non-zero value for the
- * device attribute ::CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH.
- *
- * All kernels launched must be identical with respect to the compiled code. Note that
- * any __device__, __constant__ or __managed__ variables present in the module that owns
- * the kernel launched on each device, are independently instantiated on every device.
- * It is the application's responsiblity to ensure these variables are initialized and
- * used appropriately.
- *
- * The size of the grids as specified in blocks, the size of the blocks themselves
- * and the amount of shared memory used by each thread block must also match across
- * all launched kernels.
- *
- * The streams used to launch these kernels must have been created via either ::cuStreamCreate
- * or ::cuStreamCreateWithPriority. The NULL stream or ::CU_STREAM_LEGACY or ::CU_STREAM_PER_THREAD
- * cannot be used.
- *
- * The total number of blocks launched per kernel cannot exceed the maximum number of blocks
- * per multiprocessor as returned by ::cuOccupancyMaxActiveBlocksPerMultiprocessor (or
- * ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors
- * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. Since the
- * total number of blocks launched per device has to match across all devices, the maximum
- * number of blocks that can be launched per device will be limited by the device with the
- * least number of multiprocessors.
- *
- * The kernels cannot make use of CUDA dynamic parallelism.
- *
- * The ::CUDA_LAUNCH_PARAMS structure is defined as:
- * \code
- typedef struct CUDA_LAUNCH_PARAMS_st
- {
- CUfunction function;
- unsigned int gridDimX;
- unsigned int gridDimY;
- unsigned int gridDimZ;
- unsigned int blockDimX;
- unsigned int blockDimY;
- unsigned int blockDimZ;
- unsigned int sharedMemBytes;
- CUstream hStream;
- void **kernelParams;
- } CUDA_LAUNCH_PARAMS;
- * \endcode
- * where:
- * - ::CUDA_LAUNCH_PARAMS::function specifies the kernel to be launched. All functions must
- * be identical with respect to the compiled code.
- * - ::CUDA_LAUNCH_PARAMS::gridDimX is the width of the grid in blocks. This must match across
- * all kernels launched.
- * - ::CUDA_LAUNCH_PARAMS::gridDimY is the height of the grid in blocks. This must match across
- * all kernels launched.
- * - ::CUDA_LAUNCH_PARAMS::gridDimZ is the depth of the grid in blocks. This must match across
- * all kernels launched.
- * - ::CUDA_LAUNCH_PARAMS::blockDimX is the X dimension of each thread block. This must match across
- * all kernels launched.
- * - ::CUDA_LAUNCH_PARAMS::blockDimX is the Y dimension of each thread block. This must match across
- * all kernels launched.
- * - ::CUDA_LAUNCH_PARAMS::blockDimZ is the Z dimension of each thread block. This must match across
- * all kernels launched.
- * - ::CUDA_LAUNCH_PARAMS::sharedMemBytes is the dynamic shared-memory size per thread block in bytes.
- * This must match across all kernels launched.
- * - ::CUDA_LAUNCH_PARAMS::hStream is the handle to the stream to perform the launch in. This cannot
- * be the NULL stream or ::CU_STREAM_LEGACY or ::CU_STREAM_PER_THREAD. The CUDA context associated
- * with this stream must match that associated with ::CUDA_LAUNCH_PARAMS::function.
- * - ::CUDA_LAUNCH_PARAMS::kernelParams is an array of pointers to kernel parameters. If
- * ::CUDA_LAUNCH_PARAMS::function has N parameters, then ::CUDA_LAUNCH_PARAMS::kernelParams
- * needs to be an array of N pointers. Each of ::CUDA_LAUNCH_PARAMS::kernelParams[0] through
- * ::CUDA_LAUNCH_PARAMS::kernelParams[N-1] must point to a region of memory from which the actual
- * kernel parameter will be copied. The number of kernel parameters and their offsets and sizes
- * do not need to be specified as that information is retrieved directly from the kernel's image.
- *
- * By default, the kernel won't begin execution on any GPU until all prior work in all the specified
- * streams has completed. This behavior can be overridden by specifying the flag
- * ::CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC. When this flag is specified, each kernel
- * will only wait for prior work in the stream corresponding to that GPU to complete before it begins
- * execution.
- *
- * Similarly, by default, any subsequent work pushed in any of the specified streams will not begin
- * execution until the kernels on all GPUs have completed. This behavior can be overridden by specifying
- * the flag ::CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC. When this flag is specified,
- * any subsequent work pushed in any of the specified streams will only wait for the kernel launched
- * on the GPU corresponding to that stream to complete before it begins execution.
- *
- * Calling ::cuLaunchCooperativeKernelMultiDevice() sets persistent function state that is
- * the same as function state set through ::cuLaunchKernel API when called individually for each
- * element in \p launchParamsList.
- *
- * When kernels are launched via ::cuLaunchCooperativeKernelMultiDevice(), the previous
- * block shape, shared size and parameter info associated with each ::CUDA_LAUNCH_PARAMS::function
- * in \p launchParamsList is overwritten.
- *
- * Note that to use ::cuLaunchCooperativeKernelMultiDevice(), the kernels must either have
- * been compiled with toolchain version 3.2 or later so that it will
- * contain kernel parameter information, or have no kernel parameters.
- * If either of these conditions is not met, then ::cuLaunchCooperativeKernelMultiDevice() will
- * return ::CUDA_ERROR_INVALID_IMAGE.
- *
- * \param launchParamsList - List of launch parameters, one per device
- * \param numDevices - Size of the \p launchParamsList array
- * \param flags - Flags to control launch behavior
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_IMAGE,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_LAUNCH_FAILED,
- * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,
- * ::CUDA_ERROR_LAUNCH_TIMEOUT,
- * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,
- * ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE,
- * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED
- * \note_null_stream
- * \notefnerr
- *
- * \sa ::cuCtxGetCacheConfig,
- * ::cuCtxSetCacheConfig,
- * ::cuFuncSetCacheConfig,
- * ::cuFuncGetAttribute,
- * ::cuLaunchCooperativeKernel,
- * ::cudaLaunchCooperativeKernelMultiDevice
- */
-CUresult CUDAAPI cuLaunchCooperativeKernelMultiDevice(CUDA_LAUNCH_PARAMS *launchParamsList, unsigned int numDevices, unsigned int flags);
-
-/**
- * \brief Enqueues a host function call in a stream
- *
- * Enqueues a host function to run in a stream. The function will be called
- * after currently enqueued work and will block work added after it.
- *
- * The host function must not make any CUDA API calls. Attempting to use a
- * CUDA API may result in ::CUDA_ERROR_NOT_PERMITTED, but this is not required.
- * The host function must not perform any synchronization that may depend on
- * outstanding CUDA work not mandated to run earlier. Host functions without a
- * mandated order (such as in independent streams) execute in undefined order
- * and may be serialized.
- *
- * For the purposes of Unified Memory, execution makes a number of guarantees:
- * <ul>
- * <li>The stream is considered idle for the duration of the function's
- * execution. Thus, for example, the function may always use memory attached
- * to the stream it was enqueued in.</li>
- * <li>The start of execution of the function has the same effect as
- * synchronizing an event recorded in the same stream immediately prior to
- * the function. It thus synchronizes streams which have been "joined"
- * prior to the function.</li>
- * <li>Adding device work to any stream does not have the effect of making
- * the stream active until all preceding host functions and stream callbacks
- * have executed. Thus, for
- * example, a function might use global attached memory even if work has
- * been added to another stream, if the work has been ordered behind the
- * function call with an event.</li>
- * <li>Completion of the function does not cause a stream to become
- * active except as described above. The stream will remain idle
- * if no device work follows the function, and will remain idle across
- * consecutive host functions or stream callbacks without device work in
- * between. Thus, for example,
- * stream synchronization can be done by signaling from a host function at the
- * end of the stream.</li>
- * </ul>
- *
- * Note that, in contrast to ::cuStreamAddCallback, the function will not be
- * called in the event of an error in the CUDA context.
- *
- * \param hStream - Stream to enqueue function call in
- * \param fn - The function to call once preceding stream operations are complete
- * \param userData - User-specified data to be passed to the function
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_NOT_SUPPORTED
- * \note_null_stream
- * \notefnerr
- *
- * \sa ::cuStreamCreate,
- * ::cuStreamQuery,
- * ::cuStreamSynchronize,
- * ::cuStreamWaitEvent,
- * ::cuStreamDestroy,
- * ::cuMemAllocManaged,
- * ::cuStreamAttachMemAsync,
- * ::cuStreamAddCallback
- */
-CUresult CUDAAPI cuLaunchHostFunc(CUstream hStream, CUhostFn fn, void *userData);
-
-/** @} */ /* END CUDA_EXEC */
-
-/**
- * \defgroup CUDA_EXEC_DEPRECATED Execution Control [DEPRECATED]
- *
- * ___MANBRIEF___ deprecated execution control functions of the low-level CUDA
- * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the deprecated execution control functions of the
- * low-level CUDA driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Sets the block-dimensions for the function
- *
- * \deprecated
- *
- * Specifies the \p x, \p y, and \p z dimensions of the thread blocks that are
- * created when the kernel given by \p hfunc is launched.
- *
- * \param hfunc - Kernel to specify dimensions of
- * \param x - X dimension
- * \param y - Y dimension
- * \param z - Z dimension
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuFuncSetSharedSize,
- * ::cuFuncSetCacheConfig,
- * ::cuFuncGetAttribute,
- * ::cuParamSetSize,
- * ::cuParamSeti,
- * ::cuParamSetf,
- * ::cuParamSetv,
- * ::cuLaunch,
- * ::cuLaunchGrid,
- * ::cuLaunchGridAsync,
- * ::cuLaunchKernel
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuFuncSetBlockShape(CUfunction hfunc, int x, int y, int z);
-
-/**
- * \brief Sets the dynamic shared-memory size for the function
- *
- * \deprecated
- *
- * Sets through \p bytes the amount of dynamic shared memory that will be
- * available to each thread block when the kernel given by \p hfunc is launched.
- *
- * \param hfunc - Kernel to specify dynamic shared-memory size for
- * \param bytes - Dynamic shared-memory size per thread in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuFuncSetBlockShape,
- * ::cuFuncSetCacheConfig,
- * ::cuFuncGetAttribute,
- * ::cuParamSetSize,
- * ::cuParamSeti,
- * ::cuParamSetf,
- * ::cuParamSetv,
- * ::cuLaunch,
- * ::cuLaunchGrid,
- * ::cuLaunchGridAsync,
- * ::cuLaunchKernel
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuFuncSetSharedSize(CUfunction hfunc, unsigned int bytes);
-
-/**
- * \brief Sets the parameter size for the function
- *
- * \deprecated
- *
- * Sets through \p numbytes the total size in bytes needed by the function
- * parameters of the kernel corresponding to \p hfunc.
- *
- * \param hfunc - Kernel to set parameter size for
- * \param numbytes - Size of parameter list in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuFuncSetBlockShape,
- * ::cuFuncSetSharedSize,
- * ::cuFuncGetAttribute,
- * ::cuParamSetf,
- * ::cuParamSeti,
- * ::cuParamSetv,
- * ::cuLaunch,
- * ::cuLaunchGrid,
- * ::cuLaunchGridAsync,
- * ::cuLaunchKernel
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetSize(CUfunction hfunc, unsigned int numbytes);
-
-/**
- * \brief Adds an integer parameter to the function's argument list
- *
- * \deprecated
- *
- * Sets an integer parameter that will be specified the next time the
- * kernel corresponding to \p hfunc will be invoked. \p offset is a byte offset.
- *
- * \param hfunc - Kernel to add parameter to
- * \param offset - Offset to add parameter to argument list
- * \param value - Value of parameter
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuFuncSetBlockShape,
- * ::cuFuncSetSharedSize,
- * ::cuFuncGetAttribute,
- * ::cuParamSetSize,
- * ::cuParamSetf,
- * ::cuParamSetv,
- * ::cuLaunch,
- * ::cuLaunchGrid,
- * ::cuLaunchGridAsync,
- * ::cuLaunchKernel
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuParamSeti(CUfunction hfunc, int offset, unsigned int value);
-
-/**
- * \brief Adds a floating-point parameter to the function's argument list
- *
- * \deprecated
- *
- * Sets a floating-point parameter that will be specified the next time the
- * kernel corresponding to \p hfunc will be invoked. \p offset is a byte offset.
- *
- * \param hfunc - Kernel to add parameter to
- * \param offset - Offset to add parameter to argument list
- * \param value - Value of parameter
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuFuncSetBlockShape,
- * ::cuFuncSetSharedSize,
- * ::cuFuncGetAttribute,
- * ::cuParamSetSize,
- * ::cuParamSeti,
- * ::cuParamSetv,
- * ::cuLaunch,
- * ::cuLaunchGrid,
- * ::cuLaunchGridAsync,
- * ::cuLaunchKernel
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetf(CUfunction hfunc, int offset, float value);
-
-/**
- * \brief Adds arbitrary data to the function's argument list
- *
- * \deprecated
- *
- * Copies an arbitrary amount of data (specified in \p numbytes) from \p ptr
- * into the parameter space of the kernel corresponding to \p hfunc. \p offset
- * is a byte offset.
- *
- * \param hfunc - Kernel to add data to
- * \param offset - Offset to add data to argument list
- * \param ptr - Pointer to arbitrary data
- * \param numbytes - Size of data to copy in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa ::cuFuncSetBlockShape,
- * ::cuFuncSetSharedSize,
- * ::cuFuncGetAttribute,
- * ::cuParamSetSize,
- * ::cuParamSetf,
- * ::cuParamSeti,
- * ::cuLaunch,
- * ::cuLaunchGrid,
- * ::cuLaunchGridAsync,
- * ::cuLaunchKernel
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetv(CUfunction hfunc, int offset, void *ptr, unsigned int numbytes);
-
-/**
- * \brief Launches a CUDA function
- *
- * \deprecated
- *
- * Invokes the kernel \p f on a 1 x 1 x 1 grid of blocks. The block
- * contains the number of threads specified by a previous call to
- * ::cuFuncSetBlockShape().
- *
- * The block shape, dynamic shared memory size, and parameter information
- * must be set using
- * ::cuFuncSetBlockShape(),
- * ::cuFuncSetSharedSize(),
- * ::cuParamSetSize(),
- * ::cuParamSeti(),
- * ::cuParamSetf(), and
- * ::cuParamSetv()
- * prior to calling this function.
- *
- * Launching a function via ::cuLaunchKernel() invalidates the function's
- * block shape, dynamic shared memory size, and parameter information. After
- * launching via cuLaunchKernel, this state must be re-initialized prior to
- * calling this function. Failure to do so results in undefined behavior.
- *
- * \param f - Kernel to launch
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_LAUNCH_FAILED,
- * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,
- * ::CUDA_ERROR_LAUNCH_TIMEOUT,
- * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,
- * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED
- * \notefnerr
- *
- * \sa ::cuFuncSetBlockShape,
- * ::cuFuncSetSharedSize,
- * ::cuFuncGetAttribute,
- * ::cuParamSetSize,
- * ::cuParamSetf,
- * ::cuParamSeti,
- * ::cuParamSetv,
- * ::cuLaunchGrid,
- * ::cuLaunchGridAsync,
- * ::cuLaunchKernel
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuLaunch(CUfunction f);
-
-/**
- * \brief Launches a CUDA function
- *
- * \deprecated
- *
- * Invokes the kernel \p f on a \p grid_width x \p grid_height grid of
- * blocks. Each block contains the number of threads specified by a previous
- * call to ::cuFuncSetBlockShape().
- *
- * The block shape, dynamic shared memory size, and parameter information
- * must be set using
- * ::cuFuncSetBlockShape(),
- * ::cuFuncSetSharedSize(),
- * ::cuParamSetSize(),
- * ::cuParamSeti(),
- * ::cuParamSetf(), and
- * ::cuParamSetv()
- * prior to calling this function.
- *
- * Launching a function via ::cuLaunchKernel() invalidates the function's
- * block shape, dynamic shared memory size, and parameter information. After
- * launching via cuLaunchKernel, this state must be re-initialized prior to
- * calling this function. Failure to do so results in undefined behavior.
- *
- * \param f - Kernel to launch
- * \param grid_width - Width of grid in blocks
- * \param grid_height - Height of grid in blocks
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_LAUNCH_FAILED,
- * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,
- * ::CUDA_ERROR_LAUNCH_TIMEOUT,
- * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,
- * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED
- * \notefnerr
- *
- * \sa ::cuFuncSetBlockShape,
- * ::cuFuncSetSharedSize,
- * ::cuFuncGetAttribute,
- * ::cuParamSetSize,
- * ::cuParamSetf,
- * ::cuParamSeti,
- * ::cuParamSetv,
- * ::cuLaunch,
- * ::cuLaunchGridAsync,
- * ::cuLaunchKernel
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuLaunchGrid(CUfunction f, int grid_width, int grid_height);
-
-/**
- * \brief Launches a CUDA function
- *
- * \deprecated
- *
- * Invokes the kernel \p f on a \p grid_width x \p grid_height grid of
- * blocks. Each block contains the number of threads specified by a previous
- * call to ::cuFuncSetBlockShape().
- *
- * The block shape, dynamic shared memory size, and parameter information
- * must be set using
- * ::cuFuncSetBlockShape(),
- * ::cuFuncSetSharedSize(),
- * ::cuParamSetSize(),
- * ::cuParamSeti(),
- * ::cuParamSetf(), and
- * ::cuParamSetv()
- * prior to calling this function.
- *
- * Launching a function via ::cuLaunchKernel() invalidates the function's
- * block shape, dynamic shared memory size, and parameter information. After
- * launching via cuLaunchKernel, this state must be re-initialized prior to
- * calling this function. Failure to do so results in undefined behavior.
- *
- * \param f - Kernel to launch
- * \param grid_width - Width of grid in blocks
- * \param grid_height - Height of grid in blocks
- * \param hStream - Stream identifier
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_LAUNCH_FAILED,
- * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,
- * ::CUDA_ERROR_LAUNCH_TIMEOUT,
- * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,
- * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED
- *
- * \note In certain cases where cubins are created with no ABI (i.e., using \p ptxas \p --abi-compile \p no),
- * this function may serialize kernel launches. The CUDA driver retains asynchronous behavior by
- * growing the per-thread stack as needed per launch and not shrinking it afterwards.
- *
- * \note_null_stream
- * \notefnerr
- *
- * \sa ::cuFuncSetBlockShape,
- * ::cuFuncSetSharedSize,
- * ::cuFuncGetAttribute,
- * ::cuParamSetSize,
- * ::cuParamSetf,
- * ::cuParamSeti,
- * ::cuParamSetv,
- * ::cuLaunch,
- * ::cuLaunchGrid,
- * ::cuLaunchKernel
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuLaunchGridAsync(CUfunction f, int grid_width, int grid_height, CUstream hStream);
-
-
-/**
- * \brief Adds a texture-reference to the function's argument list
- *
- * \deprecated
- *
- * Makes the CUDA array or linear memory bound to the texture reference
- * \p hTexRef available to a device program as a texture. In this version of
- * CUDA, the texture-reference must be obtained via ::cuModuleGetTexRef() and
- * the \p texunit parameter must be set to ::CU_PARAM_TR_DEFAULT.
- *
- * \param hfunc - Kernel to add texture-reference to
- * \param texunit - Texture unit (must be ::CU_PARAM_TR_DEFAULT)
- * \param hTexRef - Texture-reference to add to argument list
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetTexRef(CUfunction hfunc, int texunit, CUtexref hTexRef);
-/** @} */ /* END CUDA_EXEC_DEPRECATED */
-
-/**
- * \defgroup CUDA_GRAPH Graph Management
- *
- * ___MANBRIEF___ graph management functions of the low-level CUDA driver API
- * (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the graph management functions of the low-level CUDA
- * driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Creates a graph
- *
- * Creates an empty graph, which is returned via \p phGraph.
- *
- * \param phGraph - Returns newly created graph
- * \param flags - Graph creation flags, must be 0
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphAddChildGraphNode,
- * ::cuGraphAddEmptyNode,
- * ::cuGraphAddKernelNode,
- * ::cuGraphAddHostNode,
- * ::cuGraphAddMemcpyNode,
- * ::cuGraphAddMemsetNode,
- * ::cuGraphInstantiate,
- * ::cuGraphDestroy,
- * ::cuGraphGetNodes,
- * ::cuGraphGetRootNodes,
- * ::cuGraphGetEdges,
- * ::cuGraphClone
- */
-CUresult CUDAAPI cuGraphCreate(CUgraph *phGraph, unsigned int flags);
-
-/**
- * \brief Creates a kernel execution node and adds it to a graph
- *
- * Creates a new kernel execution node and adds it to \p hGraph with \p numDependencies
- * dependencies specified via \p dependencies and arguments specified in \p nodeParams.
- * It is possible for \p numDependencies to be 0, in which case the node will be placed
- * at the root of the graph. \p dependencies may not have any duplicate entries.
- * A handle to the new node will be returned in \p phGraphNode.
- *
- * The CUDA_KERNEL_NODE_PARAMS structure is defined as:
- *
- * \code
- * typedef struct CUDA_KERNEL_NODE_PARAMS_st {
- * CUfunction func;
- * unsigned int gridDimX;
- * unsigned int gridDimY;
- * unsigned int gridDimZ;
- * unsigned int blockDimX;
- * unsigned int blockDimY;
- * unsigned int blockDimZ;
- * unsigned int sharedMemBytes;
- * void **kernelParams;
- * void **extra;
- * } CUDA_KERNEL_NODE_PARAMS;
- * \endcode
- *
- * When the graph is launched, the node will invoke kernel \p func on a (\p gridDimX x
- * \p gridDimY x \p gridDimZ) grid of blocks. Each block contains
- * (\p blockDimX x \p blockDimY x \p blockDimZ) threads.
- *
- * \p sharedMemBytes sets the amount of dynamic shared memory that will be
- * available to each thread block.
- *
- * Kernel parameters to \p func can be specified in one of two ways:
- *
- * 1) Kernel parameters can be specified via \p kernelParams. If the kernel has N
- * parameters, then \p kernelParams needs to be an array of N pointers. Each pointer,
- * from \p kernelParams[0] to \p kernelParams[N-1], points to the region of memory from which the actual
- * parameter will be copied. The number of kernel parameters and their offsets and sizes do not need
- * to be specified as that information is retrieved directly from the kernel's image.
- *
- * 2) Kernel parameters for non-cooperative kernels can also be packaged by the application into a single
- * buffer that is passed in via \p extra. This places the burden on the application of knowing each
- * kernel parameter's size and alignment/padding within the buffer. The \p extra parameter exists
- * to allow this function to take additional less commonly used arguments. \p extra specifies
- * a list of names of extra settings and their corresponding values. Each extra setting name is
- * immediately followed by the corresponding value. The list must be terminated with either NULL or
- * CU_LAUNCH_PARAM_END.
- *
- * - ::CU_LAUNCH_PARAM_END, which indicates the end of the \p extra
- * array;
- * - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next
- * value in \p extra will be a pointer to a buffer
- * containing all the kernel parameters for launching kernel
- * \p func;
- * - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next
- * value in \p extra will be a pointer to a size_t
- * containing the size of the buffer specified with
- * ::CU_LAUNCH_PARAM_BUFFER_POINTER;
- *
- * The error ::CUDA_ERROR_INVALID_VALUE will be returned if kernel parameters are specified with both
- * \p kernelParams and \p extra (i.e. both \p kernelParams and \p extra are non-NULL).
- * ::CUDA_ERROR_INVALID_VALUE will be returned if \p extra is used for a cooperative kernel.
- *
- * The \p kernelParams or \p extra array, as well as the argument values it points to,
- * are copied during this call.
- *
- * \note Kernels launched using graphs must not use texture and surface references. Reading or
- * writing through any texture or surface reference is undefined behavior.
- * This restriction does not apply to texture and surface objects.
- *
- * \param phGraphNode - Returns newly created node
- * \param hGraph - Graph to which to add the node
- * \param dependencies - Dependencies of the node
- * \param numDependencies - Number of dependencies
- * \param nodeParams - Parameters for the GPU execution node
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuLaunchKernel,
- * ::cuLaunchCooperativeKernel,
- * ::cuGraphKernelNodeGetParams,
- * ::cuGraphKernelNodeSetParams,
- * ::cuGraphCreate,
- * ::cuGraphDestroyNode,
- * ::cuGraphAddChildGraphNode,
- * ::cuGraphAddEmptyNode,
- * ::cuGraphAddHostNode,
- * ::cuGraphAddMemcpyNode,
- * ::cuGraphAddMemsetNode
- */
-CUresult CUDAAPI cuGraphAddKernelNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS *nodeParams);
-
-/**
- * \brief Returns a kernel node's parameters
- *
- * Returns the parameters of kernel node \p hNode in \p nodeParams.
- * The \p kernelParams or \p extra array returned in \p nodeParams,
- * as well as the argument values it points to, are owned by the node.
- * This memory remains valid until the node is destroyed or its
- * parameters are modified, and should not be modified
- * directly. Use ::cuGraphKernelNodeSetParams to update the
- * parameters of this node.
- *
- * The params will contain either \p kernelParams or \p extra,
- * according to which of these was most recently set on the node.
- *
- * \param hNode - Node to get the parameters for
- * \param nodeParams - Pointer to return the parameters
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuLaunchKernel,
- * ::cuGraphAddKernelNode,
- * ::cuGraphKernelNodeSetParams
- */
-CUresult CUDAAPI cuGraphKernelNodeGetParams(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS *nodeParams);
-
-/**
- * \brief Sets a kernel node's parameters
- *
- * Sets the parameters of kernel node \p hNode to \p nodeParams.
- *
- * \param hNode - Node to set the parameters for
- * \param nodeParams - Parameters to copy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuLaunchKernel,
- * ::cuGraphAddKernelNode,
- * ::cuGraphKernelNodeGetParams
- */
-CUresult CUDAAPI cuGraphKernelNodeSetParams(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS *nodeParams);
-
-/**
- * \brief Creates a memcpy node and adds it to a graph
- *
- * Creates a new memcpy node and adds it to \p hGraph with \p numDependencies
- * dependencies specified via \p dependencies.
- * It is possible for \p numDependencies to be 0, in which case the node will be placed
- * at the root of the graph. \p dependencies may not have any duplicate entries.
- * A handle to the new node will be returned in \p phGraphNode.
- *
- * When the graph is launched, the node will perform the memcpy described by \p copyParams.
- * See ::cuMemcpy3D() for a description of the structure and its restrictions.
- *
- * Memcpy nodes have some additional restrictions with regards to managed memory, if the
- * system contains at least one device which has a zero value for the device attribute
- * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. If one or more of the operands refer
- * to managed memory, then using the memory type ::CU_MEMORYTYPE_UNIFIED is disallowed
- * for those operand(s). The managed memory will be treated as residing on either the
- * host or the device, depending on which memory type is specified.
- *
- * \param phGraphNode - Returns newly created node
- * \param hGraph - Graph to which to add the node
- * \param dependencies - Dependencies of the node
- * \param numDependencies - Number of dependencies
- * \param copyParams - Parameters for the memory copy
- * \param ctx - Context on which to run the node
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuMemcpy3D,
- * ::cuGraphMemcpyNodeGetParams,
- * ::cuGraphMemcpyNodeSetParams,
- * ::cuGraphCreate,
- * ::cuGraphDestroyNode,
- * ::cuGraphAddChildGraphNode,
- * ::cuGraphAddEmptyNode,
- * ::cuGraphAddKernelNode,
- * ::cuGraphAddHostNode,
- * ::cuGraphAddMemsetNode
- */
-CUresult CUDAAPI cuGraphAddMemcpyNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMCPY3D *copyParams, CUcontext ctx);
-
-/**
- * \brief Returns a memcpy node's parameters
- *
- * Returns the parameters of memcpy node \p hNode in \p nodeParams.
- *
- * \param hNode - Node to get the parameters for
- * \param nodeParams - Pointer to return the parameters
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuMemcpy3D,
- * ::cuGraphAddMemcpyNode,
- * ::cuGraphMemcpyNodeSetParams
- */
-CUresult CUDAAPI cuGraphMemcpyNodeGetParams(CUgraphNode hNode, CUDA_MEMCPY3D *nodeParams);
-
-/**
- * \brief Sets a memcpy node's parameters
- *
- * Sets the parameters of memcpy node \p hNode to \p nodeParams.
- *
- * \param hNode - Node to set the parameters for
- * \param nodeParams - Parameters to copy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuMemcpy3D,
- * ::cuGraphAddMemcpyNode,
- * ::cuGraphMemcpyNodeGetParams
- */
-CUresult CUDAAPI cuGraphMemcpyNodeSetParams(CUgraphNode hNode, const CUDA_MEMCPY3D *nodeParams);
-
-/**
- * \brief Creates a memset node and adds it to a graph
- *
- * Creates a new memset node and adds it to \p hGraph with \p numDependencies
- * dependencies specified via \p dependencies.
- * It is possible for \p numDependencies to be 0, in which case the node will be placed
- * at the root of the graph. \p dependencies may not have any duplicate entries.
- * A handle to the new node will be returned in \p phGraphNode.
- *
- * The element size must be 1, 2, or 4 bytes.
- * When the graph is launched, the node will perform the memset described by \p memsetParams.
- *
- * \param phGraphNode - Returns newly created node
- * \param hGraph - Graph to which to add the node
- * \param dependencies - Dependencies of the node
- * \param numDependencies - Number of dependencies
- * \param memsetParams - Parameters for the memory set
- * \param ctx - Context on which to run the node
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_CONTEXT
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuMemsetD2D32,
- * ::cuGraphMemsetNodeGetParams,
- * ::cuGraphMemsetNodeSetParams,
- * ::cuGraphCreate,
- * ::cuGraphDestroyNode,
- * ::cuGraphAddChildGraphNode,
- * ::cuGraphAddEmptyNode,
- * ::cuGraphAddKernelNode,
- * ::cuGraphAddHostNode,
- * ::cuGraphAddMemcpyNode
- */
-CUresult CUDAAPI cuGraphAddMemsetNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMSET_NODE_PARAMS *memsetParams, CUcontext ctx);
-
-/**
- * \brief Returns a memset node's parameters
- *
- * Returns the parameters of memset node \p hNode in \p nodeParams.
- *
- * \param hNode - Node to get the parameters for
- * \param nodeParams - Pointer to return the parameters
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuMemsetD2D32,
- * ::cuGraphAddMemsetNode,
- * ::cuGraphMemsetNodeSetParams
- */
-CUresult CUDAAPI cuGraphMemsetNodeGetParams(CUgraphNode hNode, CUDA_MEMSET_NODE_PARAMS *nodeParams);
-
-/**
- * \brief Sets a memset node's parameters
- *
- * Sets the parameters of memset node \p hNode to \p nodeParams.
- *
- * \param hNode - Node to set the parameters for
- * \param nodeParams - Parameters to copy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuMemsetD2D32,
- * ::cuGraphAddMemsetNode,
- * ::cuGraphMemsetNodeGetParams
- */
-CUresult CUDAAPI cuGraphMemsetNodeSetParams(CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS *nodeParams);
-
-/**
- * \brief Creates a host execution node and adds it to a graph
- *
- * Creates a new CPU execution node and adds it to \p hGraph with \p numDependencies
- * dependencies specified via \p dependencies and arguments specified in \p nodeParams.
- * It is possible for \p numDependencies to be 0, in which case the node will be placed
- * at the root of the graph. \p dependencies may not have any duplicate entries.
- * A handle to the new node will be returned in \p phGraphNode.
- *
- * When the graph is launched, the node will invoke the specified CPU function.
- * Host nodes are not supported under MPS with pre-Volta GPUs.
- *
- * \param phGraphNode - Returns newly created node
- * \param hGraph - Graph to which to add the node
- * \param dependencies - Dependencies of the node
- * \param numDependencies - Number of dependencies
- * \param nodeParams - Parameters for the host node
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_NOT_SUPPORTED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuLaunchHostFunc,
- * ::cuGraphHostNodeGetParams,
- * ::cuGraphHostNodeSetParams,
- * ::cuGraphCreate,
- * ::cuGraphDestroyNode,
- * ::cuGraphAddChildGraphNode,
- * ::cuGraphAddEmptyNode,
- * ::cuGraphAddKernelNode,
- * ::cuGraphAddMemcpyNode,
- * ::cuGraphAddMemsetNode
- */
-CUresult CUDAAPI cuGraphAddHostNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_HOST_NODE_PARAMS *nodeParams);
-
-/**
- * \brief Returns a host node's parameters
- *
- * Returns the parameters of host node \p hNode in \p nodeParams.
- *
- * \param hNode - Node to get the parameters for
- * \param nodeParams - Pointer to return the parameters
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuLaunchHostFunc,
- * ::cuGraphAddHostNode,
- * ::cuGraphHostNodeSetParams
- */
-CUresult CUDAAPI cuGraphHostNodeGetParams(CUgraphNode hNode, CUDA_HOST_NODE_PARAMS *nodeParams);
-
-/**
- * \brief Sets a host node's parameters
- *
- * Sets the parameters of host node \p hNode to \p nodeParams.
- *
- * \param hNode - Node to set the parameters for
- * \param nodeParams - Parameters to copy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuLaunchHostFunc,
- * ::cuGraphAddHostNode,
- * ::cuGraphHostNodeGetParams
- */
-CUresult CUDAAPI cuGraphHostNodeSetParams(CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS *nodeParams);
-
-/**
- * \brief Creates a child graph node and adds it to a graph
- *
- * Creates a new node which executes an embedded graph, and adds it to \p hGraph with
- * \p numDependencies dependencies specified via \p dependencies.
- * It is possible for \p numDependencies to be 0, in which case the node will be placed
- * at the root of the graph. \p dependencies may not have any duplicate entries.
- * A handle to the new node will be returned in \p phGraphNode.
- *
- * The node executes an embedded child graph. The child graph is cloned in this call.
- *
- * \param phGraphNode - Returns newly created node
- * \param hGraph - Graph to which to add the node
- * \param dependencies - Dependencies of the node
- * \param numDependencies - Number of dependencies
- * \param childGraph - The graph to clone into this node
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphChildGraphNodeGetGraph,
- * ::cuGraphCreate,
- * ::cuGraphDestroyNode,
- * ::cuGraphAddEmptyNode,
- * ::cuGraphAddKernelNode,
- * ::cuGraphAddHostNode,
- * ::cuGraphAddMemcpyNode,
- * ::cuGraphAddMemsetNode,
- * ::cuGraphClone
- */
-CUresult CUDAAPI cuGraphAddChildGraphNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUgraph childGraph);
-
-/**
- * \brief Gets a handle to the embedded graph of a child graph node
- *
- * Gets a handle to the embedded graph in a child graph node. This call
- * does not clone the graph. Changes to the graph will be reflected in
- * the node, and the node retains ownership of the graph.
- *
- * \param hNode - Node to get the embedded graph for
- * \param phGraph - Location to store a handle to the graph
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphAddChildGraphNode,
- * ::cuGraphNodeFindInClone
- */
-CUresult CUDAAPI cuGraphChildGraphNodeGetGraph(CUgraphNode hNode, CUgraph *phGraph);
-
-/**
- * \brief Creates an empty node and adds it to a graph
- *
- * Creates a new node which performs no operation, and adds it to \p hGraph with
- * \p numDependencies dependencies specified via \p dependencies.
- * It is possible for \p numDependencies to be 0, in which case the node will be placed
- * at the root of the graph. \p dependencies may not have any duplicate entries.
- * A handle to the new node will be returned in \p phGraphNode.
- *
- * An empty node performs no operation during execution, but can be used for
- * transitive ordering. For example, a phased execution graph with 2 groups of n
- * nodes with a barrier between them can be represented using an empty node and
- * 2*n dependency edges, rather than no empty node and n^2 dependency edges.
- *
- * \param phGraphNode - Returns newly created node
- * \param hGraph - Graph to which to add the node
- * \param dependencies - Dependencies of the node
- * \param numDependencies - Number of dependencies
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphCreate,
- * ::cuGraphDestroyNode,
- * ::cuGraphAddChildGraphNode,
- * ::cuGraphAddKernelNode,
- * ::cuGraphAddHostNode,
- * ::cuGraphAddMemcpyNode,
- * ::cuGraphAddMemsetNode
- */
-CUresult CUDAAPI cuGraphAddEmptyNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies);
-
-/**
- * \brief Clones a graph
- *
- * This function creates a copy of \p originalGraph and returns it in \p * phGraphClone.
- * All parameters are copied into the cloned graph. The original graph may be modified
- * after this call without affecting the clone.
- *
- * Child graph nodes in the original graph are recursively copied into the clone.
- *
- * \param phGraphClone - Returns newly created cloned graph
- * \param originalGraph - Graph to clone
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_OUT_OF_MEMORY
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphCreate,
- * ::cuGraphNodeFindInClone
- */
-CUresult CUDAAPI cuGraphClone(CUgraph *phGraphClone, CUgraph originalGraph);
-
-/**
- * \brief Finds a cloned version of a node
- *
- * This function returns the node in \p hClonedGraph corresponding to \p hOriginalNode
- * in the original graph.
- *
- * \p hClonedGraph must have been cloned from \p hOriginalGraph via ::cuGraphClone.
- * \p hOriginalNode must have been in \p hOriginalGraph at the time of the call to
- * ::cuGraphClone, and the corresponding cloned node in \p hClonedGraph must not have
- * been removed. The cloned node is then returned via \p phClonedNode.
- *
- * \param phNode - Returns handle to the cloned node
- * \param hOriginalNode - Handle to the original node
- * \param hClonedGraph - Cloned graph to query
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphClone
- */
-CUresult CUDAAPI cuGraphNodeFindInClone(CUgraphNode *phNode, CUgraphNode hOriginalNode, CUgraph hClonedGraph);
-
-/**
- * \brief Returns a node's type
- *
- * Returns the node type of \p hNode in \p type.
- *
- * \param hNode - Node to query
- * \param type - Pointer to return the node type
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphGetNodes,
- * ::cuGraphGetRootNodes,
- * ::cuGraphChildGraphNodeGetGraph,
- * ::cuGraphKernelNodeGetParams,
- * ::cuGraphKernelNodeSetParams,
- * ::cuGraphHostNodeGetParams,
- * ::cuGraphHostNodeSetParams,
- * ::cuGraphMemcpyNodeGetParams,
- * ::cuGraphMemcpyNodeSetParams,
- * ::cuGraphMemsetNodeGetParams,
- * ::cuGraphMemsetNodeSetParams
- */
-CUresult CUDAAPI cuGraphNodeGetType(CUgraphNode hNode, CUgraphNodeType *type);
-
-/**
- * \brief Returns a graph's nodes
- *
- * Returns a list of \p hGraph's nodes. \p nodes may be NULL, in which case this
- * function will return the number of nodes in \p numNodes. Otherwise,
- * \p numNodes entries will be filled in. If \p numNodes is higher than the actual
- * number of nodes, the remaining entries in \p nodes will be set to NULL, and the
- * number of nodes actually obtained will be returned in \p numNodes.
- *
- * \param hGraph - Graph to query
- * \param nodes - Pointer to return the nodes
- * \param numNodes - See description
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphCreate,
- * ::cuGraphGetRootNodes,
- * ::cuGraphGetEdges,
- * ::cuGraphNodeGetType,
- * ::cuGraphNodeGetDependencies,
- * ::cuGraphNodeGetDependentNodes
- */
-CUresult CUDAAPI cuGraphGetNodes(CUgraph hGraph, CUgraphNode *nodes, size_t *numNodes);
-
-/**
- * \brief Returns a graph's root nodes
- *
- * Returns a list of \p hGraph's root nodes. \p rootNodes may be NULL, in which case this
- * function will return the number of root nodes in \p numRootNodes. Otherwise,
- * \p numRootNodes entries will be filled in. If \p numRootNodes is higher than the actual
- * number of root nodes, the remaining entries in \p rootNodes will be set to NULL, and the
- * number of nodes actually obtained will be returned in \p numRootNodes.
- *
- * \param hGraph - Graph to query
- * \param rootNodes - Pointer to return the root nodes
- * \param numRootNodes - See description
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphCreate,
- * ::cuGraphGetNodes,
- * ::cuGraphGetEdges,
- * ::cuGraphNodeGetType,
- * ::cuGraphNodeGetDependencies,
- * ::cuGraphNodeGetDependentNodes
- */
-CUresult CUDAAPI cuGraphGetRootNodes(CUgraph hGraph, CUgraphNode *rootNodes, size_t *numRootNodes);
-
-/**
- * \brief Returns a graph's dependency edges
- *
- * Returns a list of \p hGraph's dependency edges. Edges are returned via corresponding
- * indices in \p from and \p to; that is, the node in \p to[i] has a dependency on the
- * node in \p from[i]. \p from and \p to may both be NULL, in which
- * case this function only returns the number of edges in \p numEdges. Otherwise,
- * \p numEdges entries will be filled in. If \p numEdges is higher than the actual
- * number of edges, the remaining entries in \p from and \p to will be set to NULL, and
- * the number of edges actually returned will be written to \p numEdges.
- *
- * \param hGraph - Graph to get the edges from
- * \param from - Location to return edge endpoints
- * \param to - Location to return edge endpoints
- * \param numEdges - See description
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphGetNodes,
- * ::cuGraphGetRootNodes,
- * ::cuGraphAddDependencies,
- * ::cuGraphRemoveDependencies,
- * ::cuGraphNodeGetDependencies,
- * ::cuGraphNodeGetDependentNodes
- */
-CUresult CUDAAPI cuGraphGetEdges(CUgraph hGraph, CUgraphNode *from, CUgraphNode *to, size_t *numEdges);
-
-/**
- * \brief Returns a node's dependencies
- *
- * Returns a list of \p node's dependencies. \p dependencies may be NULL, in which case this
- * function will return the number of dependencies in \p numDependencies. Otherwise,
- * \p numDependencies entries will be filled in. If \p numDependencies is higher than the actual
- * number of dependencies, the remaining entries in \p dependencies will be set to NULL, and the
- * number of nodes actually obtained will be returned in \p numDependencies.
- *
- * \param hNode - Node to query
- * \param dependencies - Pointer to return the dependencies
- * \param numDependencies - See description
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphNodeGetDependentNodes,
- * ::cuGraphGetNodes,
- * ::cuGraphGetRootNodes,
- * ::cuGraphGetEdges,
- * ::cuGraphAddDependencies,
- * ::cuGraphRemoveDependencies
- */
-CUresult CUDAAPI cuGraphNodeGetDependencies(CUgraphNode hNode, CUgraphNode *dependencies, size_t *numDependencies);
-
-/**
- * \brief Returns a node's dependent nodes
- *
- * Returns a list of \p node's dependent nodes. \p dependentNodes may be NULL, in which
- * case this function will return the number of dependent nodes in \p numDependentNodes.
- * Otherwise, \p numDependentNodes entries will be filled in. If \p numDependentNodes is
- * higher than the actual number of dependent nodes, the remaining entries in
- * \p dependentNodes will be set to NULL, and the number of nodes actually obtained will
- * be returned in \p numDependentNodes.
- *
- * \param hNode - Node to query
- * \param dependentNodes - Pointer to return the dependent nodes
- * \param numDependentNodes - See description
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphNodeGetDependencies,
- * ::cuGraphGetNodes,
- * ::cuGraphGetRootNodes,
- * ::cuGraphGetEdges,
- * ::cuGraphAddDependencies,
- * ::cuGraphRemoveDependencies
- */
-CUresult CUDAAPI cuGraphNodeGetDependentNodes(CUgraphNode hNode, CUgraphNode *dependentNodes, size_t *numDependentNodes);
-
-/**
- * \brief Adds dependency edges to a graph
- *
- * The number of dependencies to be added is defined by \p numDependencies
- * Elements in \p from and \p to at corresponding indices define a dependency.
- * Each node in \p from and \p to must belong to \p hGraph.
- *
- * If \p numDependencies is 0, elements in \p from and \p to will be ignored.
- * Specifying an existing dependency will return an error.
- *
- * \param hGraph - Graph to which dependencies are added
- * \param from - Array of nodes that provide the dependencies
- * \param to - Array of dependent nodes
- * \param numDependencies - Number of dependencies to be added
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphRemoveDependencies,
- * ::cuGraphGetEdges,
- * ::cuGraphNodeGetDependencies,
- * ::cuGraphNodeGetDependentNodes
- */
-CUresult CUDAAPI cuGraphAddDependencies(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies);
-
-/**
- * \brief Removes dependency edges from a graph
- *
- * The number of \p dependencies to be removed is defined by \p numDependencies.
- * Elements in \p from and \p to at corresponding indices define a dependency.
- * Each node in \p from and \p to must belong to \p hGraph.
- *
- * If \p numDependencies is 0, elements in \p from and \p to will be ignored.
- * Specifying a non-existing dependency will return an error.
- *
- * \param hGraph - Graph from which to remove dependencies
- * \param from - Array of nodes that provide the dependencies
- * \param to - Array of dependent nodes
- * \param numDependencies - Number of dependencies to be removed
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphAddDependencies,
- * ::cuGraphGetEdges,
- * ::cuGraphNodeGetDependencies,
- * ::cuGraphNodeGetDependentNodes
- */
-CUresult CUDAAPI cuGraphRemoveDependencies(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies);
-
-/**
- * \brief Remove a node from the graph
- *
- * Removes \p hNode from its graph. This operation also severs any dependencies of other nodes
- * on \p hNode and vice versa.
- *
- * \param hNode - Node to remove
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphAddChildGraphNode,
- * ::cuGraphAddEmptyNode,
- * ::cuGraphAddKernelNode,
- * ::cuGraphAddHostNode,
- * ::cuGraphAddMemcpyNode,
- * ::cuGraphAddMemsetNode
- */
-CUresult CUDAAPI cuGraphDestroyNode(CUgraphNode hNode);
-
-/**
- * \brief Creates an executable graph from a graph
- *
- * Instantiates \p hGraph as an executable graph. The graph is validated for any
- * structural constraints or intra-node constraints which were not previously
- * validated. If instantiation is successful, a handle to the instantiated graph
- * is returned in \p graphExec.
- *
- * If there are any errors, diagnostic information may be returned in \p errorNode and
- * \p logBuffer. This is the primary way to inspect instantiation errors. The output
- * will be null terminated unless the diagnostics overflow
- * the buffer. In this case, they will be truncated, and the last byte can be
- * inspected to determine if truncation occurred.
- *
- * \param phGraphExec - Returns instantiated graph
- * \param hGraph - Graph to instantiate
- * \param phErrorNode - In case of an instantiation error, this may be modified to
- * indicate a node contributing to the error
- * \param logBuffer - A character buffer to store diagnostic messages
- * \param bufferSize - Size of the log buffer in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphCreate,
- * ::cuGraphLaunch,
- * ::cuGraphExecDestroy
- */
-CUresult CUDAAPI cuGraphInstantiate(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize);
-
-/**
- * \brief Sets the parameters for a kernel node in the given graphExec
- *
- * Sets the parameters of a kernel node in an executable graph \p hGraphExec.
- * The node is identified by the corresponding node \p hNode in the
- * non-executable graph, from which the executable graph was instantiated.
- *
- * \p hNode must not have been removed from the original graph. The \p func field
- * of \p nodeParams cannot be modified and must match the original value.
- * All other values can be modified.
- *
- * The modifications only affect future launches of \p hGraphExec. Already
- * enqueued or running launches of \p hGraphExec are not affected by this call.
- * \p hNode is also not modified by this call.
- *
- * \param hGraphExec - The executable graph in which to set the specified node
- * \param hNode - kernel node from the graph from which graphExec was instantiated
- * \param nodeParams - Updated Parameters to set
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphAddKernelNode,
- * ::cuGraphKernelNodeSetParams,
- * ::cuGraphInstantiate
- */
-CUresult CUDAAPI cuGraphExecKernelNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS *nodeParams);
-
-/**
- * \brief Sets the parameters for a memcpy node in the given graphExec.
- *
- * Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had
- * contained \p copyParams at instantiation. hNode must remain in the graph which was
- * used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored.
- *
- * The source and destination memory in \p copyParams must be allocated from the same
- * contexts as the original source and destination memory. Both the instantiation-time
- * memory operands and the memory operands in \p copyParams must be 1-dimensional.
- * Zero-length operations are not supported.
- *
- * The modifications only affect future launches of \p hGraphExec. Already enqueued
- * or running launches of \p hGraphExec are not affected by this call. hNode is also
- * not modified by this call.
- *
- * Returns CUDA_ERROR_INVALID_VALUE if the memory operands' mappings changed or
- * either the original or new memory operands are multidimensional.
- *
- * \param hGraphExec - The executable graph in which to set the specified node
- * \param hNode - Memcpy node from the graph which was used to instantiate graphExec
- * \param copyParams - The updated parameters to set
- * \param ctx - Context on which to run the node
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphInstantiate,
- * ::cuGraphExecKernelNodeSetParams
- * ::cuGraphExecMemsetNodeSetParams
- * ::cuGraphExecHostNodeSetParams
- */
-CUresult CUDAAPI cuGraphExecMemcpyNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMCPY3D *copyParams, CUcontext ctx);
-
-/**
- * \brief Sets the parameters for a memset node in the given graphExec.
- *
- * Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had
- * contained \p memsetParams at instantiation. hNode must remain in the graph which was
- * used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored.
- *
- * The destination memory in \p memsetParams must be allocated from the same
- * contexts as the original destination memory. Both the instantiation-time
- * memory operand and the memory operand in \p memsetParams must be 1-dimensional.
- * Zero-length operations are not supported.
- *
- * The modifications only affect future launches of \p hGraphExec. Already enqueued
- * or running launches of \p hGraphExec are not affected by this call. hNode is also
- * not modified by this call.
- *
- * Returns CUDA_ERROR_INVALID_VALUE if the memory operand's mappings changed or
- * either the original or new memory operand are multidimensional.
- *
- * \param hGraphExec - The executable graph in which to set the specified node
- * \param hNode - Memset node from the graph which was used to instantiate graphExec
- * \param memsetParams - The updated parameters to set
- * \param ctx - Context on which to run the node
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphInstantiate,
- * ::cuGraphExecKernelNodeSetParams
- * ::cuGraphExecMemcpyNodeSetParams
- * ::cuGraphExecHostNodeSetParams
- */
-CUresult CUDAAPI cuGraphExecMemsetNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS *memsetParams, CUcontext ctx);
-
-/**
- * \brief Sets the parameters for a host node in the given graphExec.
- *
- * Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had
- * contained \p nodeParams at instantiation. hNode must remain in the graph which was
- * used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored.
- *
- * The modifications only affect future launches of \p hGraphExec. Already enqueued
- * or running launches of \p hGraphExec are not affected by this call. hNode is also
- * not modified by this call.
- *
- * \param hGraphExec - The executable graph in which to set the specified node
- * \param hNode - Host node from the graph which was used to instantiate graphExec
- * \param nodeParams - The updated parameters to set
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphInstantiate,
- * ::cuGraphExecKernelNodeSetParams
- * ::cuGraphExecMemcpyNodeSetParams
- * ::cuGraphExecMemsetNodeSetParams
- */
-CUresult CUDAAPI cuGraphExecHostNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS *nodeParams);
-
-/**
- * \brief Launches an executable graph in a stream
- *
- * Executes \p hGraphExec in \p hStream. Only one instance of \p hGraphExec may be executing
- * at a time. Each launch is ordered behind both any previous work in \p hStream
- * and any previous launches of \p hGraphExec. To execute a graph concurrently, it must be
- * instantiated multiple times into multiple executable graphs.
- *
- * \param hGraphExec - Executable graph to launch
- * \param hStream - Stream in which to launch the graph
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphInstantiate,
- * ::cuGraphExecDestroy
- */
-CUresult CUDAAPI cuGraphLaunch(CUgraphExec hGraphExec, CUstream hStream);
-
-/**
- * \brief Destroys an executable graph
- *
- * Destroys the executable graph specified by \p hGraphExec, as well
- * as all of its executable nodes. If the executable graph is
- * in-flight, it will not be terminated, but rather freed
- * asynchronously on completion.
- *
- * \param hGraphExec - Executable graph to destroy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphInstantiate,
- * ::cuGraphLaunch
- */
-CUresult CUDAAPI cuGraphExecDestroy(CUgraphExec hGraphExec);
-
-/**
- * \brief Destroys a graph
- *
- * Destroys the graph specified by \p hGraph, as well as all of its nodes.
- *
- * \param hGraph - Graph to destroy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphCreate
- */
-CUresult CUDAAPI cuGraphDestroy(CUgraph hGraph);
-
-/**
- * \brief Check whether an executable graph can be updated with a graph and perform the update if possible
- *
- * Updates the node parameters in the instantiated graph specified by \p hGraphExec with the
- * node parameters in a topologically identical graph specified by \p hGraph.
- *
- * Limitations:
- *
- * - Kernel nodes:
- * - The function must not change (same restriction as cuGraphExecKernelNodeSetParams())
- * - Memset and memcpy nodes:
- * - The CUDA device(s) to which the operand(s) was allocated/mapped cannot change.
- * - The source/destination memory must be allocated from the same contexts as the original
- * source/destination memory.
- * - Only 1D memsets can be changed.
- * - Additional memcpy node restrictions:
- * - Changing either the source or destination memory type(i.e. CU_MEMORYTYPE_DEVICE,
- * CU_MEMORYTYPE_ARRAY, etc.) is not supported.
- *
- * Note: The API may add further restrictions in future releases. The return code should always be checked.
- *
- * Some node types are not currently supported:
- * - Empty graph nodes(CU_GRAPH_NODE_TYPE_EMPTY)
- * - Child graphs(CU_GRAPH_NODE_TYPE_GRAPH).
- *
- * cuGraphExecUpdate sets \p updateResult_out to CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED under
- * the following conditions:
- *
- * - The count of nodes directly in \p hGraphExec and \p hGraph differ, in which case \p hErrorNode_out
- * is NULL.
- * - A node is deleted in \p hGraph but not not its pair from \p hGraphExec, in which case \p hErrorNode_out
- * is NULL.
- * - A node is deleted in \p hGraphExec but not its pair from \p hGraph, in which case \p hErrorNode_out is
- * the pairless node from \p hGraph.
- * - The dependent nodes of a pair differ, in which case \p hErrorNode_out is the node from \p hGraph.
- *
- * cuGraphExecUpdate sets \p updateResult_out to:
- * - CU_GRAPH_EXEC_UPDATE_ERROR if passed an invalid value.
- * - CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED if the graph topology changed
- * - CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED if the type of a node changed, in which case
- * \p hErrorNode_out is set to the node from \p hGraph.
- * - CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED if the func field of a kernel changed, in which
- * case \p hErrorNode_out is set to the node from \p hGraph
- * - CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED if any parameters to a node changed in a way
- * that is not supported, in which case \p hErrorNode_out is set to the node from \p hGraph.
- * - CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED if something about a node is unsupported, like
- * the node's type or configuration, in which case \p hErrorNode_out is set to the node from \p hGraph
- *
- * If \p updateResult_out isn't set in one of the situations described above, the update check passes
- * and cuGraphExecUpdate updates \p hGraphExec to match the contents of \p hGraph. If an error happens
- * during the update, \p updateResult_out will be set to CU_GRAPH_EXEC_UPDATE_ERROR; otherwise,
- * \p updateResult_out is set to CU_GRAPH_EXEC_UPDATE_SUCCESS.
- *
- * cuGraphExecUpdate returns CUDA_SUCCESS when the updated was performed successfully. It returns
- * CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE if the graph update was not performed because it included
- * changes which violated constraints specific to instantiated graph update.
- *
- * \param hGraphExec The instantiated graph to be updated
- * \param hGraph The graph containing the updated parameters
- * \param hErrorNode_out The node which caused the permissibility check to forbid the update, if any
- * \param updateResult_out Whether the graph update was permitted. If was forbidden, the reason why
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE,
- * \note_graph_thread_safety
- * \notefnerr
- *
- * \sa
- * ::cuGraphInstantiate,
- */
-CUresult CUDAAPI cuGraphExecUpdate(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphNode *hErrorNode_out, CUgraphExecUpdateResult *updateResult_out);
-
-/**
- * \brief Copies attributes from source node to destination node.
- *
- * Copies attributes from source node \p src to destination node \p dst.
- * Both node must have the same context.
- *
- * \param[out] dst Destination node
- * \param[in] src Source node
- * For list of attributes see ::CUkernelNodeAttrID
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa
- * ::CUaccessPolicyWindow
- */
-CUresult CUDAAPI cuGraphKernelNodeCopyAttributes(CUgraphNode dst, CUgraphNode src);
-
-/**
- * \brief Queries node attribute.
- *
- * Queries attribute \p attr from node \p hNode and stores it in corresponding
- * member of \p value_out.
- *
- * \param[in] hNode
- * \param[in] attr
- * \param[out] value_out
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa
- * ::CUaccessPolicyWindow
- */
-CUresult CUDAAPI cuGraphKernelNodeGetAttribute(CUgraphNode hNode, CUkernelNodeAttrID attr,
- CUkernelNodeAttrValue *value_out);
-
-/**
- * \brief Sets node attribute.
- *
- * Sets attribute \p attr on node \p hNode from corresponding attribute of
- * \p value.
- *
- * \param[out] hNode
- * \param[in] attr
- * \param[out] value
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE
- * \notefnerr
- *
- * \sa
- * ::CUaccessPolicyWindow
- */
-CUresult CUDAAPI cuGraphKernelNodeSetAttribute(CUgraphNode hNode, CUkernelNodeAttrID attr,
- const CUkernelNodeAttrValue *value);
-
-/** @} */ /* END CUDA_GRAPH */
-
-/**
- * \defgroup CUDA_OCCUPANCY Occupancy
- *
- * ___MANBRIEF___ occupancy calculation functions of the low-level CUDA driver
- * API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the occupancy calculation functions of the low-level CUDA
- * driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Returns occupancy of a function
- *
- * Returns in \p *numBlocks the number of the maximum active blocks per
- * streaming multiprocessor.
- *
- * \param numBlocks - Returned occupancy
- * \param func - Kernel for which occupancy is calculated
- * \param blockSize - Block size the kernel is intended to be launched with
- * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_UNKNOWN
- * \notefnerr
- *
- * \sa
- * ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
- */
-CUresult CUDAAPI cuOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize);
-
-/**
- * \brief Returns occupancy of a function
- *
- * Returns in \p *numBlocks the number of the maximum active blocks per
- * streaming multiprocessor.
- *
- * The \p Flags parameter controls how special cases are handled. The
- * valid flags are:
- *
- * - ::CU_OCCUPANCY_DEFAULT, which maintains the default behavior as
- * ::cuOccupancyMaxActiveBlocksPerMultiprocessor;
- *
- * - ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses the
- * default behavior on platform where global caching affects
- * occupancy. On such platforms, if caching is enabled, but
- * per-block SM resource usage would result in zero occupancy, the
- * occupancy calculator will calculate the occupancy as if caching
- * is disabled. Setting ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE makes
- * the occupancy calculator to return 0 in such cases. More information
- * can be found about this feature in the "Unified L1/Texture Cache"
- * section of the Maxwell tuning guide.
- *
- * \param numBlocks - Returned occupancy
- * \param func - Kernel for which occupancy is calculated
- * \param blockSize - Block size the kernel is intended to be launched with
- * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes
- * \param flags - Requested behavior for the occupancy calculator
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_UNKNOWN
- * \notefnerr
- *
- * \sa
- * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
- */
-CUresult CUDAAPI cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize, unsigned int flags);
-
-/**
- * \brief Suggest a launch configuration with reasonable occupancy
- *
- * Returns in \p *blockSize a reasonable block size that can achieve
- * the maximum occupancy (or, the maximum number of active warps with
- * the fewest blocks per multiprocessor), and in \p *minGridSize the
- * minimum grid size to achieve the maximum occupancy.
- *
- * If \p blockSizeLimit is 0, the configurator will use the maximum
- * block size permitted by the device / function instead.
- *
- * If per-block dynamic shared memory allocation is not needed, the
- * user should leave both \p blockSizeToDynamicSMemSize and \p
- * dynamicSMemSize as 0.
- *
- * If per-block dynamic shared memory allocation is needed, then if
- * the dynamic shared memory size is constant regardless of block
- * size, the size should be passed through \p dynamicSMemSize, and \p
- * blockSizeToDynamicSMemSize should be NULL.
- *
- * Otherwise, if the per-block dynamic shared memory size varies with
- * different block sizes, the user needs to provide a unary function
- * through \p blockSizeToDynamicSMemSize that computes the dynamic
- * shared memory needed by \p func for any given block size. \p
- * dynamicSMemSize is ignored. An example signature is:
- *
- * \code
- * // Take block size, returns dynamic shared memory needed
- * size_t blockToSmem(int blockSize);
- * \endcode
- *
- * \param minGridSize - Returned minimum grid size needed to achieve the maximum occupancy
- * \param blockSize - Returned maximum block size that can achieve the maximum occupancy
- * \param func - Kernel for which launch configuration is calculated
- * \param blockSizeToDynamicSMemSize - A function that calculates how much per-block dynamic shared memory \p func uses based on the block size
- * \param dynamicSMemSize - Dynamic shared memory usage intended, in bytes
- * \param blockSizeLimit - The maximum block size \p func is designed to handle
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_UNKNOWN
- * \notefnerr
- *
- * \sa
- * ::cudaOccupancyMaxPotentialBlockSize
- */
-CUresult CUDAAPI cuOccupancyMaxPotentialBlockSize(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit);
-
-/**
- * \brief Suggest a launch configuration with reasonable occupancy
- *
- * An extended version of ::cuOccupancyMaxPotentialBlockSize. In
- * addition to arguments passed to ::cuOccupancyMaxPotentialBlockSize,
- * ::cuOccupancyMaxPotentialBlockSizeWithFlags also takes a \p Flags
- * parameter.
- *
- * The \p Flags parameter controls how special cases are handled. The
- * valid flags are:
- *
- * - ::CU_OCCUPANCY_DEFAULT, which maintains the default behavior as
- * ::cuOccupancyMaxPotentialBlockSize;
- *
- * - ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses the
- * default behavior on platform where global caching affects
- * occupancy. On such platforms, the launch configurations that
- * produces maximal occupancy might not support global
- * caching. Setting ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE
- * guarantees that the the produced launch configuration is global
- * caching compatible at a potential cost of occupancy. More information
- * can be found about this feature in the "Unified L1/Texture Cache"
- * section of the Maxwell tuning guide.
- *
- * \param minGridSize - Returned minimum grid size needed to achieve the maximum occupancy
- * \param blockSize - Returned maximum block size that can achieve the maximum occupancy
- * \param func - Kernel for which launch configuration is calculated
- * \param blockSizeToDynamicSMemSize - A function that calculates how much per-block dynamic shared memory \p func uses based on the block size
- * \param dynamicSMemSize - Dynamic shared memory usage intended, in bytes
- * \param blockSizeLimit - The maximum block size \p func is designed to handle
- * \param flags - Options
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_UNKNOWN
- * \notefnerr
- *
- * \sa
- * ::cudaOccupancyMaxPotentialBlockSizeWithFlags
- */
-CUresult CUDAAPI cuOccupancyMaxPotentialBlockSizeWithFlags(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit, unsigned int flags);
-
-/**
- * \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM
- *
- * Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM.
- *
- * \param dynamicSmemSize - Returned maximum dynamic shared memory
- * \param func - Kernel function for which occupancy is calculated
- * \param numBlocks - Number of blocks to fit on SM
- * \param blockSize - Size of the blocks
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_UNKNOWN
- * \notefnerr
- *
- * \sa
- */
-CUresult CUDAAPI cuOccupancyAvailableDynamicSMemPerBlock(size_t *dynamicSmemSize, CUfunction func, int numBlocks, int blockSize);
-
-/** @} */ /* END CUDA_OCCUPANCY */
-
-/**
- * \defgroup CUDA_TEXREF_DEPRECATED Texture Reference Management [DEPRECATED]
- *
- * ___MANBRIEF___ deprecated texture reference management functions of the
- * low-level CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the deprecated texture reference management
- * functions of the low-level CUDA driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Binds an array as a texture reference
- *
- * \deprecated
- *
- * Binds the CUDA array \p hArray to the texture reference \p hTexRef. Any
- * previous address or CUDA array state associated with the texture reference
- * is superseded by this function. \p Flags must be set to
- * ::CU_TRSA_OVERRIDE_FORMAT. Any CUDA array previously bound to \p hTexRef is
- * unbound.
- *
- * \param hTexRef - Texture reference to bind
- * \param hArray - Array to bind
- * \param Flags - Options (must be ::CU_TRSA_OVERRIDE_FORMAT)
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
- * ::cudaBindTextureToArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetArray(CUtexref hTexRef, CUarray hArray, unsigned int Flags);
-
-/**
- * \brief Binds a mipmapped array to a texture reference
- *
- * \deprecated
- *
- * Binds the CUDA mipmapped array \p hMipmappedArray to the texture reference \p hTexRef.
- * Any previous address or CUDA array state associated with the texture reference
- * is superseded by this function. \p Flags must be set to ::CU_TRSA_OVERRIDE_FORMAT.
- * Any CUDA array previously bound to \p hTexRef is unbound.
- *
- * \param hTexRef - Texture reference to bind
- * \param hMipmappedArray - Mipmapped array to bind
- * \param Flags - Options (must be ::CU_TRSA_OVERRIDE_FORMAT)
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
- * ::cudaBindTextureToMipmappedArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmappedArray(CUtexref hTexRef, CUmipmappedArray hMipmappedArray, unsigned int Flags);
-
-/**
- * \brief Binds an address as a texture reference
- *
- * \deprecated
- *
- * Binds a linear address range to the texture reference \p hTexRef. Any
- * previous address or CUDA array state associated with the texture reference
- * is superseded by this function. Any memory previously bound to \p hTexRef
- * is unbound.
- *
- * Since the hardware enforces an alignment requirement on texture base
- * addresses, ::cuTexRefSetAddress() passes back a byte offset in
- * \p *ByteOffset that must be applied to texture fetches in order to read from
- * the desired memory. This offset must be divided by the texel size and
- * passed to kernels that read from the texture so they can be applied to the
- * ::tex1Dfetch() function.
- *
- * If the device memory pointer was returned from ::cuMemAlloc(), the offset
- * is guaranteed to be 0 and NULL may be passed as the \p ByteOffset parameter.
- *
- * The total number of elements (or texels) in the linear address range
- * cannot exceed ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH.
- * The number of elements is computed as (\p bytes / bytesPerElement),
- * where bytesPerElement is determined from the data format and number of
- * components set using ::cuTexRefSetFormat().
- *
- * \param ByteOffset - Returned byte offset
- * \param hTexRef - Texture reference to bind
- * \param dptr - Device pointer to bind
- * \param bytes - Size of memory to bind in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
- * ::cudaBindTexture
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetAddress(size_t *ByteOffset, CUtexref hTexRef, CUdeviceptr dptr, size_t bytes);
-
-/**
- * \brief Binds an address as a 2D texture reference
- *
- * \deprecated
- *
- * Binds a linear address range to the texture reference \p hTexRef. Any
- * previous address or CUDA array state associated with the texture reference
- * is superseded by this function. Any memory previously bound to \p hTexRef
- * is unbound.
- *
- * Using a ::tex2D() function inside a kernel requires a call to either
- * ::cuTexRefSetArray() to bind the corresponding texture reference to an
- * array, or ::cuTexRefSetAddress2D() to bind the texture reference to linear
- * memory.
- *
- * Function calls to ::cuTexRefSetFormat() cannot follow calls to
- * ::cuTexRefSetAddress2D() for the same texture reference.
- *
- * It is required that \p dptr be aligned to the appropriate hardware-specific
- * texture alignment. You can query this value using the device attribute
- * ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. If an unaligned \p dptr is
- * supplied, ::CUDA_ERROR_INVALID_VALUE is returned.
- *
- * \p Pitch has to be aligned to the hardware-specific texture pitch alignment.
- * This value can be queried using the device attribute
- * ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. If an unaligned \p Pitch is
- * supplied, ::CUDA_ERROR_INVALID_VALUE is returned.
- *
- * Width and Height, which are specified in elements (or texels), cannot exceed
- * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and
- * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT respectively.
- * \p Pitch, which is specified in bytes, cannot exceed
- * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH.
- *
- * \param hTexRef - Texture reference to bind
- * \param desc - Descriptor of CUDA array
- * \param dptr - Device pointer to bind
- * \param Pitch - Line pitch in bytes
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
- * ::cudaBindTexture2D
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetAddress2D(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR *desc, CUdeviceptr dptr, size_t Pitch);
-
-/**
- * \brief Sets the format for a texture reference
- *
- * \deprecated
- *
- * Specifies the format of the data to be read by the texture reference
- * \p hTexRef. \p fmt and \p NumPackedComponents are exactly analogous to the
- * ::Format and ::NumChannels members of the ::CUDA_ARRAY_DESCRIPTOR structure:
- * They specify the format of each component and the number of components per
- * array element.
- *
- * \param hTexRef - Texture reference
- * \param fmt - Format to set
- * \param NumPackedComponents - Number of components per array element
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
- * ::cudaCreateChannelDesc,
- * ::cudaBindTexture,
- * ::cudaBindTexture2D,
- * ::cudaBindTextureToArray,
- * ::cudaBindTextureToMipmappedArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetFormat(CUtexref hTexRef, CUarray_format fmt, int NumPackedComponents);
-
-/**
- * \brief Sets the addressing mode for a texture reference
- *
- * \deprecated
- *
- * Specifies the addressing mode \p am for the given dimension \p dim of the
- * texture reference \p hTexRef. If \p dim is zero, the addressing mode is
- * applied to the first parameter of the functions used to fetch from the
- * texture; if \p dim is 1, the second, and so on. ::CUaddress_mode is defined
- * as:
- * \code
- typedef enum CUaddress_mode_enum {
- CU_TR_ADDRESS_MODE_WRAP = 0,
- CU_TR_ADDRESS_MODE_CLAMP = 1,
- CU_TR_ADDRESS_MODE_MIRROR = 2,
- CU_TR_ADDRESS_MODE_BORDER = 3
- } CUaddress_mode;
- * \endcode
- *
- * Note that this call has no effect if \p hTexRef is bound to linear memory.
- * Also, if the flag, ::CU_TRSF_NORMALIZED_COORDINATES, is not set, the only
- * supported address mode is ::CU_TR_ADDRESS_MODE_CLAMP.
- *
- * \param hTexRef - Texture reference
- * \param dim - Dimension
- * \param am - Addressing mode to set
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetArray,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
- * ::cudaBindTexture,
- * ::cudaBindTexture2D,
- * ::cudaBindTextureToArray,
- * ::cudaBindTextureToMipmappedArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetAddressMode(CUtexref hTexRef, int dim, CUaddress_mode am);
-
-/**
- * \brief Sets the filtering mode for a texture reference
- *
- * \deprecated
- *
- * Specifies the filtering mode \p fm to be used when reading memory through
- * the texture reference \p hTexRef. ::CUfilter_mode_enum is defined as:
- *
- * \code
- typedef enum CUfilter_mode_enum {
- CU_TR_FILTER_MODE_POINT = 0,
- CU_TR_FILTER_MODE_LINEAR = 1
- } CUfilter_mode;
- * \endcode
- *
- * Note that this call has no effect if \p hTexRef is bound to linear memory.
- *
- * \param hTexRef - Texture reference
- * \param fm - Filtering mode to set
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
- * ::cudaBindTextureToArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetFilterMode(CUtexref hTexRef, CUfilter_mode fm);
-
-/**
- * \brief Sets the mipmap filtering mode for a texture reference
- *
- * \deprecated
- *
- * Specifies the mipmap filtering mode \p fm to be used when reading memory through
- * the texture reference \p hTexRef. ::CUfilter_mode_enum is defined as:
- *
- * \code
- typedef enum CUfilter_mode_enum {
- CU_TR_FILTER_MODE_POINT = 0,
- CU_TR_FILTER_MODE_LINEAR = 1
- } CUfilter_mode;
- * \endcode
- *
- * Note that this call has no effect if \p hTexRef is not bound to a mipmapped array.
- *
- * \param hTexRef - Texture reference
- * \param fm - Filtering mode to set
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
- * ::cudaBindTextureToMipmappedArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmapFilterMode(CUtexref hTexRef, CUfilter_mode fm);
-
-/**
- * \brief Sets the mipmap level bias for a texture reference
- *
- * \deprecated
- *
- * Specifies the mipmap level bias \p bias to be added to the specified mipmap level when
- * reading memory through the texture reference \p hTexRef.
- *
- * Note that this call has no effect if \p hTexRef is not bound to a mipmapped array.
- *
- * \param hTexRef - Texture reference
- * \param bias - Mipmap level bias
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
- * ::cudaBindTextureToMipmappedArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmapLevelBias(CUtexref hTexRef, float bias);
-
-/**
- * \brief Sets the mipmap min/max mipmap level clamps for a texture reference
- *
- * \deprecated
- *
- * Specifies the min/max mipmap level clamps, \p minMipmapLevelClamp and \p maxMipmapLevelClamp
- * respectively, to be used when reading memory through the texture reference
- * \p hTexRef.
- *
- * Note that this call has no effect if \p hTexRef is not bound to a mipmapped array.
- *
- * \param hTexRef - Texture reference
- * \param minMipmapLevelClamp - Mipmap min level clamp
- * \param maxMipmapLevelClamp - Mipmap max level clamp
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
- * ::cudaBindTextureToMipmappedArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmapLevelClamp(CUtexref hTexRef, float minMipmapLevelClamp, float maxMipmapLevelClamp);
-
-/**
- * \brief Sets the maximum anisotropy for a texture reference
- *
- * \deprecated
- *
- * Specifies the maximum anisotropy \p maxAniso to be used when reading memory through
- * the texture reference \p hTexRef.
- *
- * Note that this call has no effect if \p hTexRef is bound to linear memory.
- *
- * \param hTexRef - Texture reference
- * \param maxAniso - Maximum anisotropy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
- * ::cudaBindTextureToArray,
- * ::cudaBindTextureToMipmappedArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMaxAnisotropy(CUtexref hTexRef, unsigned int maxAniso);
-
-/**
- * \brief Sets the border color for a texture reference
- *
- * \deprecated
- *
- * Specifies the value of the RGBA color via the \p pBorderColor to the texture reference
- * \p hTexRef. The color value supports only float type and holds color components in
- * the following sequence:
- * pBorderColor[0] holds 'R' component
- * pBorderColor[1] holds 'G' component
- * pBorderColor[2] holds 'B' component
- * pBorderColor[3] holds 'A' component
- *
- * Note that the color values can be set only when the Address mode is set to
- * CU_TR_ADDRESS_MODE_BORDER using ::cuTexRefSetAddressMode.
- * Applications using integer border color values have to "reinterpret_cast" their values to float.
- *
- * \param hTexRef - Texture reference
- * \param pBorderColor - RGBA color
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddressMode,
- * ::cuTexRefGetAddressMode, ::cuTexRefGetBorderColor,
- * ::cudaBindTexture,
- * ::cudaBindTexture2D,
- * ::cudaBindTextureToArray,
- * ::cudaBindTextureToMipmappedArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetBorderColor(CUtexref hTexRef, float *pBorderColor);
-
-/**
- * \brief Sets the flags for a texture reference
- *
- * \deprecated
- *
- * Specifies optional flags via \p Flags to specify the behavior of data
- * returned through the texture reference \p hTexRef. The valid flags are:
- *
- * - ::CU_TRSF_READ_AS_INTEGER, which suppresses the default behavior of
- * having the texture promote integer data to floating point data in the
- * range [0, 1]. Note that texture with 32-bit integer format
- * would not be promoted, regardless of whether or not this
- * flag is specified;
- * - ::CU_TRSF_NORMALIZED_COORDINATES, which suppresses the
- * default behavior of having the texture coordinates range
- * from [0, Dim) where Dim is the width or height of the CUDA
- * array. Instead, the texture coordinates [0, 1.0) reference
- * the entire breadth of the array dimension;
- * - ::CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION, which disables any trilinear
- * filtering optimizations. Trilinear optimizations improve texture filtering
- * performance by allowing bilinear filtering on textures in scenarios where
- * it can closely approximate the expected results.
- *
- * \param hTexRef - Texture reference
- * \param Flags - Optional flags to set
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,
- * ::cudaBindTexture,
- * ::cudaBindTexture2D,
- * ::cudaBindTextureToArray,
- * ::cudaBindTextureToMipmappedArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetFlags(CUtexref hTexRef, unsigned int Flags);
-
-/**
- * \brief Gets the address associated with a texture reference
- *
- * \deprecated
- *
- * Returns in \p *pdptr the base address bound to the texture reference
- * \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference
- * is not bound to any device memory range.
- *
- * \param pdptr - Returned device address
- * \param hTexRef - Texture reference
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetAddress(CUdeviceptr *pdptr, CUtexref hTexRef);
-
-/**
- * \brief Gets the array bound to a texture reference
- *
- * \deprecated
- *
- * Returns in \p *phArray the CUDA array bound to the texture reference
- * \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference
- * is not bound to any CUDA array.
- *
- * \param phArray - Returned array
- * \param hTexRef - Texture reference
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetArray(CUarray *phArray, CUtexref hTexRef);
-
-/**
- * \brief Gets the mipmapped array bound to a texture reference
- *
- * \deprecated
- *
- * Returns in \p *phMipmappedArray the CUDA mipmapped array bound to the texture
- * reference \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference
- * is not bound to any CUDA mipmapped array.
- *
- * \param phMipmappedArray - Returned mipmapped array
- * \param hTexRef - Texture reference
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmappedArray(CUmipmappedArray *phMipmappedArray, CUtexref hTexRef);
-
-/**
- * \brief Gets the addressing mode used by a texture reference
- *
- * \deprecated
- *
- * Returns in \p *pam the addressing mode corresponding to the
- * dimension \p dim of the texture reference \p hTexRef. Currently, the only
- * valid value for \p dim are 0 and 1.
- *
- * \param pam - Returned addressing mode
- * \param hTexRef - Texture reference
- * \param dim - Dimension
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetAddressMode(CUaddress_mode *pam, CUtexref hTexRef, int dim);
-
-/**
- * \brief Gets the filter-mode used by a texture reference
- *
- * \deprecated
- *
- * Returns in \p *pfm the filtering mode of the texture reference
- * \p hTexRef.
- *
- * \param pfm - Returned filtering mode
- * \param hTexRef - Texture reference
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFlags, ::cuTexRefGetFormat
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetFilterMode(CUfilter_mode *pfm, CUtexref hTexRef);
-
-/**
- * \brief Gets the format used by a texture reference
- *
- * \deprecated
- *
- * Returns in \p *pFormat and \p *pNumChannels the format and number
- * of components of the CUDA array bound to the texture reference \p hTexRef.
- * If \p pFormat or \p pNumChannels is NULL, it will be ignored.
- *
- * \param pFormat - Returned format
- * \param pNumChannels - Returned number of components
- * \param hTexRef - Texture reference
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetFormat(CUarray_format *pFormat, int *pNumChannels, CUtexref hTexRef);
-
-/**
- * \brief Gets the mipmap filtering mode for a texture reference
- *
- * \deprecated
- *
- * Returns the mipmap filtering mode in \p pfm that's used when reading memory through
- * the texture reference \p hTexRef.
- *
- * \param pfm - Returned mipmap filtering mode
- * \param hTexRef - Texture reference
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmapFilterMode(CUfilter_mode *pfm, CUtexref hTexRef);
-
-/**
- * \brief Gets the mipmap level bias for a texture reference
- *
- * \deprecated
- *
- * Returns the mipmap level bias in \p pBias that's added to the specified mipmap
- * level when reading memory through the texture reference \p hTexRef.
- *
- * \param pbias - Returned mipmap level bias
- * \param hTexRef - Texture reference
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmapLevelBias(float *pbias, CUtexref hTexRef);
-
-/**
- * \brief Gets the min/max mipmap level clamps for a texture reference
- *
- * \deprecated
- *
- * Returns the min/max mipmap level clamps in \p pminMipmapLevelClamp and \p pmaxMipmapLevelClamp
- * that's used when reading memory through the texture reference \p hTexRef.
- *
- * \param pminMipmapLevelClamp - Returned mipmap min level clamp
- * \param pmaxMipmapLevelClamp - Returned mipmap max level clamp
- * \param hTexRef - Texture reference
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmapLevelClamp(float *pminMipmapLevelClamp, float *pmaxMipmapLevelClamp, CUtexref hTexRef);
-
-/**
- * \brief Gets the maximum anisotropy for a texture reference
- *
- * \deprecated
- *
- * Returns the maximum anisotropy in \p pmaxAniso that's used when reading memory through
- * the texture reference \p hTexRef.
- *
- * \param pmaxAniso - Returned maximum anisotropy
- * \param hTexRef - Texture reference
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMaxAnisotropy(int *pmaxAniso, CUtexref hTexRef);
-
-/**
- * \brief Gets the border color used by a texture reference
- *
- * \deprecated
- *
- * Returns in \p pBorderColor, values of the RGBA color used by
- * the texture reference \p hTexRef.
- * The color value is of type float and holds color components in
- * the following sequence:
- * pBorderColor[0] holds 'R' component
- * pBorderColor[1] holds 'G' component
- * pBorderColor[2] holds 'B' component
- * pBorderColor[3] holds 'A' component
- *
- * \param hTexRef - Texture reference
- * \param pBorderColor - Returned Type and Value of RGBA color
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddressMode,
- * ::cuTexRefSetAddressMode, ::cuTexRefSetBorderColor
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetBorderColor(float *pBorderColor, CUtexref hTexRef);
-
-/**
- * \brief Gets the flags used by a texture reference
- *
- * \deprecated
- *
- * Returns in \p *pFlags the flags of the texture reference \p hTexRef.
- *
- * \param pFlags - Returned flags
- * \param hTexRef - Texture reference
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefSetAddress,
- * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,
- * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,
- * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,
- * ::cuTexRefGetFilterMode, ::cuTexRefGetFormat
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetFlags(unsigned int *pFlags, CUtexref hTexRef);
-
-/**
- * \brief Creates a texture reference
- *
- * \deprecated
- *
- * Creates a texture reference and returns its handle in \p *pTexRef. Once
- * created, the application must call ::cuTexRefSetArray() or
- * ::cuTexRefSetAddress() to associate the reference with allocated memory.
- * Other texture reference functions are used to specify the format and
- * interpretation (addressing, filtering, etc.) to be used when the memory is
- * read through this texture reference.
- *
- * \param pTexRef - Returned texture reference
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefDestroy
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefCreate(CUtexref *pTexRef);
-
-/**
- * \brief Destroys a texture reference
- *
- * \deprecated
- *
- * Destroys the texture reference specified by \p hTexRef.
- *
- * \param hTexRef - Texture reference to destroy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuTexRefCreate
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefDestroy(CUtexref hTexRef);
-
-/** @} */ /* END CUDA_TEXREF_DEPRECATED */
-
-
-/**
- * \defgroup CUDA_SURFREF_DEPRECATED Surface Reference Management [DEPRECATED]
- *
- * ___MANBRIEF___ surface reference management functions of the low-level CUDA
- * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the surface reference management functions of the
- * low-level CUDA driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Sets the CUDA array for a surface reference.
- *
- * \deprecated
- *
- * Sets the CUDA array \p hArray to be read and written by the surface reference
- * \p hSurfRef. Any previous CUDA array state associated with the surface
- * reference is superseded by this function. \p Flags must be set to 0.
- * The ::CUDA_ARRAY3D_SURFACE_LDST flag must have been set for the CUDA array.
- * Any CUDA array previously bound to \p hSurfRef is unbound.
-
- * \param hSurfRef - Surface reference handle
- * \param hArray - CUDA array handle
- * \param Flags - set to 0
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::cuModuleGetSurfRef,
- * ::cuSurfRefGetArray,
- * ::cudaBindSurfaceToArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuSurfRefSetArray(CUsurfref hSurfRef, CUarray hArray, unsigned int Flags);
-
-/**
- * \brief Passes back the CUDA array bound to a surface reference.
- *
- * \deprecated
- *
- * Returns in \p *phArray the CUDA array bound to the surface reference
- * \p hSurfRef, or returns ::CUDA_ERROR_INVALID_VALUE if the surface reference
- * is not bound to any CUDA array.
-
- * \param phArray - Surface reference handle
- * \param hSurfRef - Surface reference handle
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa ::cuModuleGetSurfRef, ::cuSurfRefSetArray
- */
-__CUDA_DEPRECATED CUresult CUDAAPI cuSurfRefGetArray(CUarray *phArray, CUsurfref hSurfRef);
-
-/** @} */ /* END CUDA_SURFREF_DEPRECATED */
-
-/**
- * \defgroup CUDA_TEXOBJECT Texture Object Management
- *
- * ___MANBRIEF___ texture object management functions of the low-level CUDA
- * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the texture object management functions of the
- * low-level CUDA driver application programming interface. The texture
- * object API is only supported on devices of compute capability 3.0 or higher.
- *
- * @{
- */
-
-/**
- * \brief Creates a texture object
- *
- * Creates a texture object and returns it in \p pTexObject. \p pResDesc describes
- * the data to texture from. \p pTexDesc describes how the data should be sampled.
- * \p pResViewDesc is an optional argument that specifies an alternate format for
- * the data described by \p pResDesc, and also describes the subresource region
- * to restrict access to when texturing. \p pResViewDesc can only be specified if
- * the type of resource is a CUDA array or a CUDA mipmapped array.
- *
- * Texture objects are only supported on devices of compute capability 3.0 or higher.
- * Additionally, a texture object is an opaque value, and, as such, should only be
- * accessed through CUDA API calls.
- *
- * The ::CUDA_RESOURCE_DESC structure is defined as:
- * \code
- typedef struct CUDA_RESOURCE_DESC_st
- {
- CUresourcetype resType;
-
- union {
- struct {
- CUarray hArray;
- } array;
- struct {
- CUmipmappedArray hMipmappedArray;
- } mipmap;
- struct {
- CUdeviceptr devPtr;
- CUarray_format format;
- unsigned int numChannels;
- size_t sizeInBytes;
- } linear;
- struct {
- CUdeviceptr devPtr;
- CUarray_format format;
- unsigned int numChannels;
- size_t width;
- size_t height;
- size_t pitchInBytes;
- } pitch2D;
- } res;
-
- unsigned int flags;
- } CUDA_RESOURCE_DESC;
-
- * \endcode
- * where:
- * - ::CUDA_RESOURCE_DESC::resType specifies the type of resource to texture from.
- * CUresourceType is defined as:
- * \code
- typedef enum CUresourcetype_enum {
- CU_RESOURCE_TYPE_ARRAY = 0x00,
- CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = 0x01,
- CU_RESOURCE_TYPE_LINEAR = 0x02,
- CU_RESOURCE_TYPE_PITCH2D = 0x03
- } CUresourcetype;
- * \endcode
- *
- * \par
- * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_ARRAY, ::CUDA_RESOURCE_DESC::res::array::hArray
- * must be set to a valid CUDA array handle.
- *
- * \par
- * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY, ::CUDA_RESOURCE_DESC::res::mipmap::hMipmappedArray
- * must be set to a valid CUDA mipmapped array handle.
- *
- * \par
- * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_LINEAR, ::CUDA_RESOURCE_DESC::res::linear::devPtr
- * must be set to a valid device pointer, that is aligned to ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT.
- * ::CUDA_RESOURCE_DESC::res::linear::format and ::CUDA_RESOURCE_DESC::res::linear::numChannels
- * describe the format of each component and the number of components per array element. ::CUDA_RESOURCE_DESC::res::linear::sizeInBytes
- * specifies the size of the array in bytes. The total number of elements in the linear address range cannot exceed
- * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH. The number of elements is computed as (sizeInBytes / (sizeof(format) * numChannels)).
- *
- * \par
- * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_PITCH2D, ::CUDA_RESOURCE_DESC::res::pitch2D::devPtr
- * must be set to a valid device pointer, that is aligned to ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT.
- * ::CUDA_RESOURCE_DESC::res::pitch2D::format and ::CUDA_RESOURCE_DESC::res::pitch2D::numChannels
- * describe the format of each component and the number of components per array element. ::CUDA_RESOURCE_DESC::res::pitch2D::width
- * and ::CUDA_RESOURCE_DESC::res::pitch2D::height specify the width and height of the array in elements, and cannot exceed
- * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT respectively.
- * ::CUDA_RESOURCE_DESC::res::pitch2D::pitchInBytes specifies the pitch between two rows in bytes and has to be aligned to
- * ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. Pitch cannot exceed ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH.
- *
- * - ::flags must be set to zero.
- *
- *
- * The ::CUDA_TEXTURE_DESC struct is defined as
- * \code
- typedef struct CUDA_TEXTURE_DESC_st {
- CUaddress_mode addressMode[3];
- CUfilter_mode filterMode;
- unsigned int flags;
- unsigned int maxAnisotropy;
- CUfilter_mode mipmapFilterMode;
- float mipmapLevelBias;
- float minMipmapLevelClamp;
- float maxMipmapLevelClamp;
- } CUDA_TEXTURE_DESC;
- * \endcode
- * where
- * - ::CUDA_TEXTURE_DESC::addressMode specifies the addressing mode for each dimension of the texture data. ::CUaddress_mode is defined as:
- * \code
- typedef enum CUaddress_mode_enum {
- CU_TR_ADDRESS_MODE_WRAP = 0,
- CU_TR_ADDRESS_MODE_CLAMP = 1,
- CU_TR_ADDRESS_MODE_MIRROR = 2,
- CU_TR_ADDRESS_MODE_BORDER = 3
- } CUaddress_mode;
- * \endcode
- * This is ignored if ::CUDA_RESOURCE_DESC::resType is ::CU_RESOURCE_TYPE_LINEAR. Also, if the flag, ::CU_TRSF_NORMALIZED_COORDINATES
- * is not set, the only supported address mode is ::CU_TR_ADDRESS_MODE_CLAMP.
- *
- * - ::CUDA_TEXTURE_DESC::filterMode specifies the filtering mode to be used when fetching from the texture. CUfilter_mode is defined as:
- * \code
- typedef enum CUfilter_mode_enum {
- CU_TR_FILTER_MODE_POINT = 0,
- CU_TR_FILTER_MODE_LINEAR = 1
- } CUfilter_mode;
- * \endcode
- * This is ignored if ::CUDA_RESOURCE_DESC::resType is ::CU_RESOURCE_TYPE_LINEAR.
- *
- * - ::CUDA_TEXTURE_DESC::flags can be any combination of the following:
- * - ::CU_TRSF_READ_AS_INTEGER, which suppresses the default behavior of
- * having the texture promote integer data to floating point data in the
- * range [0, 1]. Note that texture with 32-bit integer format would not be
- * promoted, regardless of whether or not this flag is specified.
- * - ::CU_TRSF_NORMALIZED_COORDINATES, which suppresses the default behavior
- * of having the texture coordinates range from [0, Dim) where Dim is the
- * width or height of the CUDA array. Instead, the texture coordinates
- * [0, 1.0) reference the entire breadth of the array dimension; Note that
- * for CUDA mipmapped arrays, this flag has to be set.
- * - ::CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION, which disables any trilinear
- * filtering optimizations. Trilinear optimizations improve texture filtering
- * performance by allowing bilinear filtering on textures in scenarios where
- * it can closely approximate the expected results.
- *
- * - ::CUDA_TEXTURE_DESC::maxAnisotropy specifies the maximum anisotropy ratio to be used when doing anisotropic filtering. This value will be
- * clamped to the range [1,16].
- *
- * - ::CUDA_TEXTURE_DESC::mipmapFilterMode specifies the filter mode when the calculated mipmap level lies between two defined mipmap levels.
- *
- * - ::CUDA_TEXTURE_DESC::mipmapLevelBias specifies the offset to be applied to the calculated mipmap level.
- *
- * - ::CUDA_TEXTURE_DESC::minMipmapLevelClamp specifies the lower end of the mipmap level range to clamp access to.
- *
- * - ::CUDA_TEXTURE_DESC::maxMipmapLevelClamp specifies the upper end of the mipmap level range to clamp access to.
- *
- *
- * The ::CUDA_RESOURCE_VIEW_DESC struct is defined as
- * \code
- typedef struct CUDA_RESOURCE_VIEW_DESC_st
- {
- CUresourceViewFormat format;
- size_t width;
- size_t height;
- size_t depth;
- unsigned int firstMipmapLevel;
- unsigned int lastMipmapLevel;
- unsigned int firstLayer;
- unsigned int lastLayer;
- } CUDA_RESOURCE_VIEW_DESC;
- * \endcode
- * where:
- * - ::CUDA_RESOURCE_VIEW_DESC::format specifies how the data contained in the CUDA array or CUDA mipmapped array should
- * be interpreted. Note that this can incur a change in size of the texture data. If the resource view format is a block
- * compressed format, then the underlying CUDA array or CUDA mipmapped array has to have a base of format ::CU_AD_FORMAT_UNSIGNED_INT32.
- * with 2 or 4 channels, depending on the block compressed format. For ex., BC1 and BC4 require the underlying CUDA array to have
- * a format of ::CU_AD_FORMAT_UNSIGNED_INT32 with 2 channels. The other BC formats require the underlying resource to have the same base
- * format but with 4 channels.
- *
- * - ::CUDA_RESOURCE_VIEW_DESC::width specifies the new width of the texture data. If the resource view format is a block
- * compressed format, this value has to be 4 times the original width of the resource. For non block compressed formats,
- * this value has to be equal to that of the original resource.
- *
- * - ::CUDA_RESOURCE_VIEW_DESC::height specifies the new height of the texture data. If the resource view format is a block
- * compressed format, this value has to be 4 times the original height of the resource. For non block compressed formats,
- * this value has to be equal to that of the original resource.
- *
- * - ::CUDA_RESOURCE_VIEW_DESC::depth specifies the new depth of the texture data. This value has to be equal to that of the
- * original resource.
- *
- * - ::CUDA_RESOURCE_VIEW_DESC::firstMipmapLevel specifies the most detailed mipmap level. This will be the new mipmap level zero.
- * For non-mipmapped resources, this value has to be zero.::CUDA_TEXTURE_DESC::minMipmapLevelClamp and ::CUDA_TEXTURE_DESC::maxMipmapLevelClamp
- * will be relative to this value. For ex., if the firstMipmapLevel is set to 2, and a minMipmapLevelClamp of 1.2 is specified,
- * then the actual minimum mipmap level clamp will be 3.2.
- *
- * - ::CUDA_RESOURCE_VIEW_DESC::lastMipmapLevel specifies the least detailed mipmap level. For non-mipmapped resources, this value
- * has to be zero.
- *
- * - ::CUDA_RESOURCE_VIEW_DESC::firstLayer specifies the first layer index for layered textures. This will be the new layer zero.
- * For non-layered resources, this value has to be zero.
- *
- * - ::CUDA_RESOURCE_VIEW_DESC::lastLayer specifies the last layer index for layered textures. For non-layered resources,
- * this value has to be zero.
- *
- *
- * \param pTexObject - Texture object to create
- * \param pResDesc - Resource descriptor
- * \param pTexDesc - Texture descriptor
- * \param pResViewDesc - Resource view descriptor
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::cuTexObjectDestroy,
- * ::cudaCreateTextureObject
- */
-CUresult CUDAAPI cuTexObjectCreate(CUtexObject *pTexObject, const CUDA_RESOURCE_DESC *pResDesc, const CUDA_TEXTURE_DESC *pTexDesc, const CUDA_RESOURCE_VIEW_DESC *pResViewDesc);
-
-/**
- * \brief Destroys a texture object
- *
- * Destroys the texture object specified by \p texObject.
- *
- * \param texObject - Texture object to destroy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::cuTexObjectCreate,
- * ::cudaDestroyTextureObject
- */
-CUresult CUDAAPI cuTexObjectDestroy(CUtexObject texObject);
-
-/**
- * \brief Returns a texture object's resource descriptor
- *
- * Returns the resource descriptor for the texture object specified by \p texObject.
- *
- * \param pResDesc - Resource descriptor
- * \param texObject - Texture object
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::cuTexObjectCreate,
- * ::cudaGetTextureObjectResourceDesc,
- */
-CUresult CUDAAPI cuTexObjectGetResourceDesc(CUDA_RESOURCE_DESC *pResDesc, CUtexObject texObject);
-
-/**
- * \brief Returns a texture object's texture descriptor
- *
- * Returns the texture descriptor for the texture object specified by \p texObject.
- *
- * \param pTexDesc - Texture descriptor
- * \param texObject - Texture object
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::cuTexObjectCreate,
- * ::cudaGetTextureObjectTextureDesc
- */
-CUresult CUDAAPI cuTexObjectGetTextureDesc(CUDA_TEXTURE_DESC *pTexDesc, CUtexObject texObject);
-
-/**
- * \brief Returns a texture object's resource view descriptor
- *
- * Returns the resource view descriptor for the texture object specified by \p texObject.
- * If no resource view was set for \p texObject, the ::CUDA_ERROR_INVALID_VALUE is returned.
- *
- * \param pResViewDesc - Resource view descriptor
- * \param texObject - Texture object
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::cuTexObjectCreate,
- * ::cudaGetTextureObjectResourceViewDesc
- */
-CUresult CUDAAPI cuTexObjectGetResourceViewDesc(CUDA_RESOURCE_VIEW_DESC *pResViewDesc, CUtexObject texObject);
-
-/** @} */ /* END CUDA_TEXOBJECT */
-
-/**
- * \defgroup CUDA_SURFOBJECT Surface Object Management
- *
- * ___MANBRIEF___ surface object management functions of the low-level CUDA
- * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the surface object management functions of the
- * low-level CUDA driver application programming interface. The surface
- * object API is only supported on devices of compute capability 3.0 or higher.
- *
- * @{
- */
-
-/**
- * \brief Creates a surface object
- *
- * Creates a surface object and returns it in \p pSurfObject. \p pResDesc describes
- * the data to perform surface load/stores on. ::CUDA_RESOURCE_DESC::resType must be
- * ::CU_RESOURCE_TYPE_ARRAY and ::CUDA_RESOURCE_DESC::res::array::hArray
- * must be set to a valid CUDA array handle. ::CUDA_RESOURCE_DESC::flags must be set to zero.
- *
- * Surface objects are only supported on devices of compute capability 3.0 or higher.
- * Additionally, a surface object is an opaque value, and, as such, should only be
- * accessed through CUDA API calls.
- *
- * \param pSurfObject - Surface object to create
- * \param pResDesc - Resource descriptor
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::cuSurfObjectDestroy,
- * ::cudaCreateSurfaceObject
- */
-CUresult CUDAAPI cuSurfObjectCreate(CUsurfObject *pSurfObject, const CUDA_RESOURCE_DESC *pResDesc);
-
-/**
- * \brief Destroys a surface object
- *
- * Destroys the surface object specified by \p surfObject.
- *
- * \param surfObject - Surface object to destroy
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::cuSurfObjectCreate,
- * ::cudaDestroySurfaceObject
- */
-CUresult CUDAAPI cuSurfObjectDestroy(CUsurfObject surfObject);
-
-/**
- * \brief Returns a surface object's resource descriptor
- *
- * Returns the resource descriptor for the surface object specified by \p surfObject.
- *
- * \param pResDesc - Resource descriptor
- * \param surfObject - Surface object
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE
- *
- * \sa
- * ::cuSurfObjectCreate,
- * ::cudaGetSurfaceObjectResourceDesc
- */
-CUresult CUDAAPI cuSurfObjectGetResourceDesc(CUDA_RESOURCE_DESC *pResDesc, CUsurfObject surfObject);
-
-/** @} */ /* END CUDA_SURFOBJECT */
-
-/**
- * \defgroup CUDA_PEER_ACCESS Peer Context Memory Access
- *
- * ___MANBRIEF___ direct peer context memory access functions of the low-level
- * CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the direct peer context memory access functions
- * of the low-level CUDA driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Queries if a device may directly access a peer device's memory.
- *
- * Returns in \p *canAccessPeer a value of 1 if contexts on \p dev are capable of
- * directly accessing memory from contexts on \p peerDev and 0 otherwise.
- * If direct access of \p peerDev from \p dev is possible, then access may be
- * enabled on two specific contexts by calling ::cuCtxEnablePeerAccess().
- *
- * \param canAccessPeer - Returned access capability
- * \param dev - Device from which allocations on \p peerDev are to
- * be directly accessed.
- * \param peerDev - Device on which the allocations to be directly accessed
- * by \p dev reside.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_DEVICE
- * \notefnerr
- *
- * \sa
- * ::cuCtxEnablePeerAccess,
- * ::cuCtxDisablePeerAccess,
- * ::cudaDeviceCanAccessPeer
- */
-CUresult CUDAAPI cuDeviceCanAccessPeer(int *canAccessPeer, CUdevice dev, CUdevice peerDev);
-
-/**
- * \brief Enables direct access to memory allocations in a peer context.
- *
- * If both the current context and \p peerContext are on devices which support unified
- * addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING) and same
- * major compute capability, then on success all allocations from \p peerContext will
- * immediately be accessible by the current context. See \ref CUDA_UNIFIED for additional
- * details.
- *
- * Note that access granted by this call is unidirectional and that in order to access
- * memory from the current context in \p peerContext, a separate symmetric call
- * to ::cuCtxEnablePeerAccess() is required.
- *
- * Note that there are both device-wide and system-wide limitations per system
- * configuration, as noted in the CUDA Programming Guide under the section
- * "Peer-to-Peer Memory Access".
- *
- * Returns ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED if ::cuDeviceCanAccessPeer() indicates
- * that the ::CUdevice of the current context cannot directly access memory
- * from the ::CUdevice of \p peerContext.
- *
- * Returns ::CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED if direct access of
- * \p peerContext from the current context has already been enabled.
- *
- * Returns ::CUDA_ERROR_TOO_MANY_PEERS if direct peer access is not possible
- * because hardware resources required for peer access have been exhausted.
- *
- * Returns ::CUDA_ERROR_INVALID_CONTEXT if there is no current context, \p peerContext
- * is not a valid context, or if the current context is \p peerContext.
- *
- * Returns ::CUDA_ERROR_INVALID_VALUE if \p Flags is not 0.
- *
- * \param peerContext - Peer context to enable direct access to from the current context
- * \param Flags - Reserved for future use and must be set to 0
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED,
- * ::CUDA_ERROR_TOO_MANY_PEERS,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa
- * ::cuDeviceCanAccessPeer,
- * ::cuCtxDisablePeerAccess,
- * ::cudaDeviceEnablePeerAccess
- */
-CUresult CUDAAPI cuCtxEnablePeerAccess(CUcontext peerContext, unsigned int Flags);
-
-/**
- * \brief Disables direct access to memory allocations in a peer context and
- * unregisters any registered allocations.
- *
- Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has
- * not yet been enabled from \p peerContext to the current context.
- *
- * Returns ::CUDA_ERROR_INVALID_CONTEXT if there is no current context, or if
- * \p peerContext is not a valid context.
- *
- * \param peerContext - Peer context to disable direct access to
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * \notefnerr
- *
- * \sa
- * ::cuDeviceCanAccessPeer,
- * ::cuCtxEnablePeerAccess,
- * ::cudaDeviceDisablePeerAccess
- */
-CUresult CUDAAPI cuCtxDisablePeerAccess(CUcontext peerContext);
-
-/**
- * \brief Queries attributes of the link between two devices.
- *
- * Returns in \p *value the value of the requested attribute \p attrib of the
- * link between \p srcDevice and \p dstDevice. The supported attributes are:
- * - ::CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK: A relative value indicating the
- * performance of the link between two devices.
- * - ::CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED P2P: 1 if P2P Access is enable.
- * - ::CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED: 1 if Atomic operations over
- * the link are supported.
- * - ::CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED: 1 if cudaArray can
- * be accessed over the link.
- *
- * Returns ::CUDA_ERROR_INVALID_DEVICE if \p srcDevice or \p dstDevice are not valid
- * or if they represent the same device.
- *
- * Returns ::CUDA_ERROR_INVALID_VALUE if \p attrib is not valid or if \p value is
- * a null pointer.
- *
- * \param value - Returned value of the requested attribute
- * \param attrib - The requested attribute of the link between \p srcDevice and \p dstDevice.
- * \param srcDevice - The source device of the target link.
- * \param dstDevice - The destination device of the target link.
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_DEVICE,
- * ::CUDA_ERROR_INVALID_VALUE
- * \notefnerr
- *
- * \sa
- * ::cuCtxEnablePeerAccess,
- * ::cuCtxDisablePeerAccess,
- * ::cuDeviceCanAccessPeer,
- * ::cudaDeviceGetP2PAttribute
- */
-CUresult CUDAAPI cuDeviceGetP2PAttribute(int* value, CUdevice_P2PAttribute attrib, CUdevice srcDevice, CUdevice dstDevice);
-
-/** @} */ /* END CUDA_PEER_ACCESS */
-
-/**
- * \defgroup CUDA_GRAPHICS Graphics Interoperability
- *
- * ___MANBRIEF___ graphics interoperability functions of the low-level CUDA
- * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
- *
- * This section describes the graphics interoperability functions of the
- * low-level CUDA driver application programming interface.
- *
- * @{
- */
-
-/**
- * \brief Unregisters a graphics resource for access by CUDA
- *
- * Unregisters the graphics resource \p resource so it is not accessible by
- * CUDA unless registered again.
- *
- * If \p resource is invalid then ::CUDA_ERROR_INVALID_HANDLE is
- * returned.
- *
- * \param resource - Resource to unregister
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_UNKNOWN
- * \notefnerr
- *
- * \sa
- * ::cuGraphicsD3D9RegisterResource,
- * ::cuGraphicsD3D10RegisterResource,
- * ::cuGraphicsD3D11RegisterResource,
- * ::cuGraphicsGLRegisterBuffer,
- * ::cuGraphicsGLRegisterImage,
- * ::cudaGraphicsUnregisterResource
- */
-CUresult CUDAAPI cuGraphicsUnregisterResource(CUgraphicsResource resource);
-
-/**
- * \brief Get an array through which to access a subresource of a mapped graphics resource.
- *
- * Returns in \p *pArray an array through which the subresource of the mapped
- * graphics resource \p resource which corresponds to array index \p arrayIndex
- * and mipmap level \p mipLevel may be accessed. The value set in \p *pArray may
- * change every time that \p resource is mapped.
- *
- * If \p resource is not a texture then it cannot be accessed via an array and
- * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned.
- * If \p arrayIndex is not a valid array index for \p resource then
- * ::CUDA_ERROR_INVALID_VALUE is returned.
- * If \p mipLevel is not a valid mipmap level for \p resource then
- * ::CUDA_ERROR_INVALID_VALUE is returned.
- * If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned.
- *
- * \param pArray - Returned array through which a subresource of \p resource may be accessed
- * \param resource - Mapped resource to access
- * \param arrayIndex - Array index for array textures or cubemap face
- * index as defined by ::CUarray_cubemap_face for
- * cubemap textures for the subresource to access
- * \param mipLevel - Mipmap level for the subresource to access
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_NOT_MAPPED,
- * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY
- * \notefnerr
- *
- * \sa
- * ::cuGraphicsResourceGetMappedPointer,
- * ::cudaGraphicsSubResourceGetMappedArray
- */
-CUresult CUDAAPI cuGraphicsSubResourceGetMappedArray(CUarray *pArray, CUgraphicsResource resource, unsigned int arrayIndex, unsigned int mipLevel);
-
-/**
- * \brief Get a mipmapped array through which to access a mapped graphics resource.
- *
- * Returns in \p *pMipmappedArray a mipmapped array through which the mapped graphics
- * resource \p resource. The value set in \p *pMipmappedArray may change every time
- * that \p resource is mapped.
- *
- * If \p resource is not a texture then it cannot be accessed via a mipmapped array and
- * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned.
- * If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned.
- *
- * \param pMipmappedArray - Returned mipmapped array through which \p resource may be accessed
- * \param resource - Mapped resource to access
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_NOT_MAPPED,
- * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY
- * \notefnerr
- *
- * \sa
- * ::cuGraphicsResourceGetMappedPointer,
- * ::cudaGraphicsResourceGetMappedMipmappedArray
- */
-CUresult CUDAAPI cuGraphicsResourceGetMappedMipmappedArray(CUmipmappedArray *pMipmappedArray, CUgraphicsResource resource);
-
-/**
- * \brief Get a device pointer through which to access a mapped graphics resource.
- *
- * Returns in \p *pDevPtr a pointer through which the mapped graphics resource
- * \p resource may be accessed.
- * Returns in \p pSize the size of the memory in bytes which may be accessed from that pointer.
- * The value set in \p pPointer may change every time that \p resource is mapped.
- *
- * If \p resource is not a buffer then it cannot be accessed via a pointer and
- * ::CUDA_ERROR_NOT_MAPPED_AS_POINTER is returned.
- * If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned.
- * *
- * \param pDevPtr - Returned pointer through which \p resource may be accessed
- * \param pSize - Returned size of the buffer accessible starting at \p *pPointer
- * \param resource - Mapped resource to access
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_NOT_MAPPED,
- * ::CUDA_ERROR_NOT_MAPPED_AS_POINTER
- * \notefnerr
- *
- * \sa
- * ::cuGraphicsMapResources,
- * ::cuGraphicsSubResourceGetMappedArray,
- * ::cudaGraphicsResourceGetMappedPointer
- */
-CUresult CUDAAPI cuGraphicsResourceGetMappedPointer(CUdeviceptr *pDevPtr, size_t *pSize, CUgraphicsResource resource);
-
-/**
- * \brief Set usage flags for mapping a graphics resource
- *
- * Set \p flags for mapping the graphics resource \p resource.
- *
- * Changes to \p flags will take effect the next time \p resource is mapped.
- * The \p flags argument may be any of the following:
-
- * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
- * resource will be used. It is therefore assumed that this resource will be
- * read from and written to by CUDA kernels. This is the default value.
- * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READONLY: Specifies that CUDA kernels which
- * access this resource will not write to this resource.
- * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITEDISCARD: Specifies that CUDA kernels
- * which access this resource will not read from this resource and will
- * write over the entire contents of the resource, so none of the data
- * previously stored in the resource will be preserved.
- *
- * If \p resource is presently mapped for access by CUDA then
- * ::CUDA_ERROR_ALREADY_MAPPED is returned.
- * If \p flags is not one of the above values then ::CUDA_ERROR_INVALID_VALUE is returned.
- *
- * \param resource - Registered resource to set flags for
- * \param flags - Parameters for resource mapping
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_ALREADY_MAPPED
- * \notefnerr
- *
- * \sa
- * ::cuGraphicsMapResources,
- * ::cudaGraphicsResourceSetMapFlags
- */
-CUresult CUDAAPI cuGraphicsResourceSetMapFlags(CUgraphicsResource resource, unsigned int flags);
-
-/**
- * \brief Map graphics resources for access by CUDA
- *
- * Maps the \p count graphics resources in \p resources for access by CUDA.
- *
- * The resources in \p resources may be accessed by CUDA until they
- * are unmapped. The graphics API from which \p resources were registered
- * should not access any resources while they are mapped by CUDA. If an
- * application does so, the results are undefined.
- *
- * This function provides the synchronization guarantee that any graphics calls
- * issued before ::cuGraphicsMapResources() will complete before any subsequent CUDA
- * work issued in \p stream begins.
- *
- * If \p resources includes any duplicate entries then ::CUDA_ERROR_INVALID_HANDLE is returned.
- * If any of \p resources are presently mapped for access by CUDA then ::CUDA_ERROR_ALREADY_MAPPED is returned.
- *
- * \param count - Number of resources to map
- * \param resources - Resources to map for CUDA usage
- * \param hStream - Stream with which to synchronize
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_ALREADY_MAPPED,
- * ::CUDA_ERROR_UNKNOWN
- * \note_null_stream
- * \notefnerr
- *
- * \sa
- * ::cuGraphicsResourceGetMappedPointer,
- * ::cuGraphicsSubResourceGetMappedArray,
- * ::cuGraphicsUnmapResources,
- * ::cudaGraphicsMapResources
- */
-CUresult CUDAAPI cuGraphicsMapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream);
-
-/**
- * \brief Unmap graphics resources.
- *
- * Unmaps the \p count graphics resources in \p resources.
- *
- * Once unmapped, the resources in \p resources may not be accessed by CUDA
- * until they are mapped again.
- *
- * This function provides the synchronization guarantee that any CUDA work issued
- * in \p stream before ::cuGraphicsUnmapResources() will complete before any
- * subsequently issued graphics work begins.
- *
- *
- * If \p resources includes any duplicate entries then ::CUDA_ERROR_INVALID_HANDLE is returned.
- * If any of \p resources are not presently mapped for access by CUDA then ::CUDA_ERROR_NOT_MAPPED is returned.
- *
- * \param count - Number of resources to unmap
- * \param resources - Resources to unmap
- * \param hStream - Stream with which to synchronize
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_HANDLE,
- * ::CUDA_ERROR_NOT_MAPPED,
- * ::CUDA_ERROR_UNKNOWN
- * \note_null_stream
- * \notefnerr
- *
- * \sa
- * ::cuGraphicsMapResources,
- * ::cudaGraphicsUnmapResources
- */
-CUresult CUDAAPI cuGraphicsUnmapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream);
-
-/** @} */ /* END CUDA_GRAPHICS */
-
-CUresult CUDAAPI cuGetExportTable(const void **ppExportTable, const CUuuid *pExportTableId);
-
-
-/**
- * \brief Returns a module handle
- *
- * Returns in \p *hmod the handle of the module that function \p hfunc
- * is located in. The lifetime of the module corresponds to the lifetime of
- * the context it was loaded in or until the module is explicitly unloaded.
- *
- * The CUDA runtime manages its own modules loaded into the primary context.
- * If the handle returned by this API refers to a module loaded by the CUDA runtime,
- * calling ::cuModuleUnload() on that module will result in undefined behavior.
- *
- * \param hmod - Returned module handle
- * \param hfunc - Function to retrieve module for
- *
- * \return
- * ::CUDA_SUCCESS,
- * ::CUDA_ERROR_DEINITIALIZED,
- * ::CUDA_ERROR_NOT_INITIALIZED,
- * ::CUDA_ERROR_INVALID_CONTEXT,
- * ::CUDA_ERROR_INVALID_VALUE,
- * ::CUDA_ERROR_NOT_FOUND
- * \notefnerr
- *
- */
-CUresult CUDAAPI cuFuncGetModule(CUmodule *hmod, CUfunction hfunc);
-
-
-/**
- * CUDA API versioning support
- */
-#if defined(__CUDA_API_VERSION_INTERNAL)
- #undef cuMemHostRegister
- #undef cuGraphicsResourceSetMapFlags
- #undef cuLinkCreate
- #undef cuLinkAddData
- #undef cuLinkAddFile
- #undef cuDeviceTotalMem
- #undef cuCtxCreate
- #undef cuModuleGetGlobal
- #undef cuMemGetInfo
- #undef cuMemAlloc
- #undef cuMemAllocPitch
- #undef cuMemFree
- #undef cuMemGetAddressRange
- #undef cuMemAllocHost
- #undef cuMemHostGetDevicePointer
- #undef cuMemcpyHtoD
- #undef cuMemcpyDtoH
- #undef cuMemcpyDtoD
- #undef cuMemcpyDtoA
- #undef cuMemcpyAtoD
- #undef cuMemcpyHtoA
- #undef cuMemcpyAtoH
- #undef cuMemcpyAtoA
- #undef cuMemcpyHtoAAsync
- #undef cuMemcpyAtoHAsync
- #undef cuMemcpy2D
- #undef cuMemcpy2DUnaligned
- #undef cuMemcpy3D
- #undef cuMemcpyHtoDAsync
- #undef cuMemcpyDtoHAsync
- #undef cuMemcpyDtoDAsync
- #undef cuMemcpy2DAsync
- #undef cuMemcpy3DAsync
- #undef cuMemsetD8
- #undef cuMemsetD16
- #undef cuMemsetD32
- #undef cuMemsetD2D8
- #undef cuMemsetD2D16
- #undef cuMemsetD2D32
- #undef cuArrayCreate
- #undef cuArrayGetDescriptor
- #undef cuArray3DCreate
- #undef cuArray3DGetDescriptor
- #undef cuTexRefSetAddress
- #undef cuTexRefSetAddress2D
- #undef cuTexRefGetAddress
- #undef cuGraphicsResourceGetMappedPointer
- #undef cuCtxDestroy
- #undef cuCtxPopCurrent
- #undef cuCtxPushCurrent
- #undef cuStreamDestroy
- #undef cuEventDestroy
- #undef cuMemcpy
- #undef cuMemcpyAsync
- #undef cuMemcpyPeer
- #undef cuMemcpyPeerAsync
- #undef cuMemcpy3DPeer
- #undef cuMemcpy3DPeerAsync
- #undef cuMemsetD8Async
- #undef cuMemsetD16Async
- #undef cuMemsetD32Async
- #undef cuMemsetD2D8Async
- #undef cuMemsetD2D16Async
- #undef cuMemsetD2D32Async
- #undef cuStreamGetPriority
- #undef cuStreamGetFlags
- #undef cuStreamGetCtx
- #undef cuStreamWaitEvent
- #undef cuStreamAddCallback
- #undef cuStreamAttachMemAsync
- #undef cuStreamQuery
- #undef cuStreamSynchronize
- #undef cuEventRecord
- #undef cuLaunchKernel
- #undef cuLaunchHostFunc
- #undef cuGraphicsMapResources
- #undef cuGraphicsUnmapResources
- #undef cuStreamWriteValue32
- #undef cuStreamWaitValue32
- #undef cuStreamWriteValue64
- #undef cuStreamWaitValue64
- #undef cuStreamBatchMemOp
- #undef cuMemPrefetchAsync
- #undef cuLaunchCooperativeKernel
- #undef cuSignalExternalSemaphoresAsync
- #undef cuWaitExternalSemaphoresAsync
- #undef cuStreamBeginCapture
- #undef cuStreamEndCapture
- #undef cuStreamIsCapturing
- #undef cuStreamGetCaptureInfo
- #undef cuGraphLaunch
- #undef cuDevicePrimaryCtxRelease
- #undef cuDevicePrimaryCtxReset
- #undef cuDevicePrimaryCtxSetFlags
- #undef cuStreamCopyAttributes
- #undef cuStreamSetAttribute
- #undef cuStreamGetAttribute
- #undef cuGraphInstantiate
-
- CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, unsigned int Flags);
- CUresult CUDAAPI cuGraphicsResourceSetMapFlags(CUgraphicsResource resource, unsigned int flags);
- CUresult CUDAAPI cuLinkCreate(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut);
- CUresult CUDAAPI cuLinkAddData(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name,
- unsigned int numOptions, CUjit_option *options, void **optionValues);
- CUresult CUDAAPI cuLinkAddFile(CUlinkState state, CUjitInputType type, const char *path,
- unsigned int numOptions, CUjit_option *options, void **optionValues);
- CUresult CUDAAPI cuTexRefSetAddress2D_v2(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR *desc, CUdeviceptr dptr, size_t Pitch);
-
- typedef unsigned int CUdeviceptr_v1;
-
- typedef struct CUDA_MEMCPY2D_v1_st
- {
- unsigned int srcXInBytes; /**< Source X in bytes */
- unsigned int srcY; /**< Source Y */
- CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */
- const void *srcHost; /**< Source host pointer */
- CUdeviceptr_v1 srcDevice; /**< Source device pointer */
- CUarray srcArray; /**< Source array reference */
- unsigned int srcPitch; /**< Source pitch (ignored when src is array) */
-
- unsigned int dstXInBytes; /**< Destination X in bytes */
- unsigned int dstY; /**< Destination Y */
- CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */
- void *dstHost; /**< Destination host pointer */
- CUdeviceptr_v1 dstDevice; /**< Destination device pointer */
- CUarray dstArray; /**< Destination array reference */
- unsigned int dstPitch; /**< Destination pitch (ignored when dst is array) */
-
- unsigned int WidthInBytes; /**< Width of 2D memory copy in bytes */
- unsigned int Height; /**< Height of 2D memory copy */
- } CUDA_MEMCPY2D_v1;
-
- typedef struct CUDA_MEMCPY3D_v1_st
- {
- unsigned int srcXInBytes; /**< Source X in bytes */
- unsigned int srcY; /**< Source Y */
- unsigned int srcZ; /**< Source Z */
- unsigned int srcLOD; /**< Source LOD */
- CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */
- const void *srcHost; /**< Source host pointer */
- CUdeviceptr_v1 srcDevice; /**< Source device pointer */
- CUarray srcArray; /**< Source array reference */
- void *reserved0; /**< Must be NULL */
- unsigned int srcPitch; /**< Source pitch (ignored when src is array) */
- unsigned int srcHeight; /**< Source height (ignored when src is array; may be 0 if Depth==1) */
-
- unsigned int dstXInBytes; /**< Destination X in bytes */
- unsigned int dstY; /**< Destination Y */
- unsigned int dstZ; /**< Destination Z */
- unsigned int dstLOD; /**< Destination LOD */
- CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */
- void *dstHost; /**< Destination host pointer */
- CUdeviceptr_v1 dstDevice; /**< Destination device pointer */
- CUarray dstArray; /**< Destination array reference */
- void *reserved1; /**< Must be NULL */
- unsigned int dstPitch; /**< Destination pitch (ignored when dst is array) */
- unsigned int dstHeight; /**< Destination height (ignored when dst is array; may be 0 if Depth==1) */
-
- unsigned int WidthInBytes; /**< Width of 3D memory copy in bytes */
- unsigned int Height; /**< Height of 3D memory copy */
- unsigned int Depth; /**< Depth of 3D memory copy */
- } CUDA_MEMCPY3D_v1;
-
- typedef struct CUDA_ARRAY_DESCRIPTOR_v1_st
- {
- unsigned int Width; /**< Width of array */
- unsigned int Height; /**< Height of array */
-
- CUarray_format Format; /**< Array format */
- unsigned int NumChannels; /**< Channels per array element */
- } CUDA_ARRAY_DESCRIPTOR_v1;
-
- typedef struct CUDA_ARRAY3D_DESCRIPTOR_v1_st
- {
- unsigned int Width; /**< Width of 3D array */
- unsigned int Height; /**< Height of 3D array */
- unsigned int Depth; /**< Depth of 3D array */
-
- CUarray_format Format; /**< Array format */
- unsigned int NumChannels; /**< Channels per array element */
- unsigned int Flags; /**< Flags */
- } CUDA_ARRAY3D_DESCRIPTOR_v1;
-
- CUresult CUDAAPI cuDeviceTotalMem(unsigned int *bytes, CUdevice dev);
- CUresult CUDAAPI cuCtxCreate(CUcontext *pctx, unsigned int flags, CUdevice dev);
- CUresult CUDAAPI cuModuleGetGlobal(CUdeviceptr_v1 *dptr, unsigned int *bytes, CUmodule hmod, const char *name);
- CUresult CUDAAPI cuMemGetInfo(unsigned int *free, unsigned int *total);
- CUresult CUDAAPI cuMemAlloc(CUdeviceptr_v1 *dptr, unsigned int bytesize);
- CUresult CUDAAPI cuMemAllocPitch(CUdeviceptr_v1 *dptr, unsigned int *pPitch, unsigned int WidthInBytes, unsigned int Height, unsigned int ElementSizeBytes);
- CUresult CUDAAPI cuMemFree(CUdeviceptr_v1 dptr);
- CUresult CUDAAPI cuMemGetAddressRange(CUdeviceptr_v1 *pbase, unsigned int *psize, CUdeviceptr_v1 dptr);
- CUresult CUDAAPI cuMemAllocHost(void **pp, unsigned int bytesize);
- CUresult CUDAAPI cuMemHostGetDevicePointer(CUdeviceptr_v1 *pdptr, void *p, unsigned int Flags);
- CUresult CUDAAPI cuMemcpyHtoD(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount);
- CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount);
- CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount);
- CUresult CUDAAPI cuMemcpyDtoA(CUarray dstArray, unsigned int dstOffset, CUdeviceptr_v1 srcDevice, unsigned int ByteCount);
- CUresult CUDAAPI cuMemcpyAtoD(CUdeviceptr_v1 dstDevice, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount);
- CUresult CUDAAPI cuMemcpyHtoA(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount);
- CUresult CUDAAPI cuMemcpyAtoH(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount);
- CUresult CUDAAPI cuMemcpyAtoA(CUarray dstArray, unsigned int dstOffset, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount);
- CUresult CUDAAPI cuMemcpyHtoAAsync(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount, CUstream hStream);
- CUresult CUDAAPI cuMemcpyAtoHAsync(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount, CUstream hStream);
- CUresult CUDAAPI cuMemcpy2D(const CUDA_MEMCPY2D_v1 *pCopy);
- CUresult CUDAAPI cuMemcpy2DUnaligned(const CUDA_MEMCPY2D_v1 *pCopy);
- CUresult CUDAAPI cuMemcpy3D(const CUDA_MEMCPY3D_v1 *pCopy);
- CUresult CUDAAPI cuMemcpyHtoDAsync(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount, CUstream hStream);
- CUresult CUDAAPI cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream);
- CUresult CUDAAPI cuMemcpyDtoDAsync(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream);
- CUresult CUDAAPI cuMemcpy2DAsync(const CUDA_MEMCPY2D_v1 *pCopy, CUstream hStream);
- CUresult CUDAAPI cuMemcpy3DAsync(const CUDA_MEMCPY3D_v1 *pCopy, CUstream hStream);
- CUresult CUDAAPI cuMemsetD8(CUdeviceptr_v1 dstDevice, unsigned char uc, unsigned int N);
- CUresult CUDAAPI cuMemsetD16(CUdeviceptr_v1 dstDevice, unsigned short us, unsigned int N);
- CUresult CUDAAPI cuMemsetD32(CUdeviceptr_v1 dstDevice, unsigned int ui, unsigned int N);
- CUresult CUDAAPI cuMemsetD2D8(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned char uc, unsigned int Width, unsigned int Height);
- CUresult CUDAAPI cuMemsetD2D16(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned short us, unsigned int Width, unsigned int Height);
- CUresult CUDAAPI cuMemsetD2D32(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned int ui, unsigned int Width, unsigned int Height);
- CUresult CUDAAPI cuArrayCreate(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR_v1 *pAllocateArray);
- CUresult CUDAAPI cuArrayGetDescriptor(CUDA_ARRAY_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray);
- CUresult CUDAAPI cuArray3DCreate(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v1 *pAllocateArray);
- CUresult CUDAAPI cuArray3DGetDescriptor(CUDA_ARRAY3D_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray);
- CUresult CUDAAPI cuTexRefSetAddress(unsigned int *ByteOffset, CUtexref hTexRef, CUdeviceptr_v1 dptr, unsigned int bytes);
- CUresult CUDAAPI cuTexRefSetAddress2D(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v1 *desc, CUdeviceptr_v1 dptr, unsigned int Pitch);
- CUresult CUDAAPI cuTexRefGetAddress(CUdeviceptr_v1 *pdptr, CUtexref hTexRef);
- CUresult CUDAAPI cuGraphicsResourceGetMappedPointer(CUdeviceptr_v1 *pDevPtr, unsigned int *pSize, CUgraphicsResource resource);
-
- CUresult CUDAAPI cuCtxDestroy(CUcontext ctx);
- CUresult CUDAAPI cuCtxPopCurrent(CUcontext *pctx);
- CUresult CUDAAPI cuCtxPushCurrent(CUcontext ctx);
- CUresult CUDAAPI cuStreamDestroy(CUstream hStream);
- CUresult CUDAAPI cuEventDestroy(CUevent hEvent);
- CUresult CUDAAPI cuDevicePrimaryCtxRelease(CUdevice dev);
- CUresult CUDAAPI cuDevicePrimaryCtxReset(CUdevice dev);
- CUresult CUDAAPI cuDevicePrimaryCtxSetFlags(CUdevice dev, unsigned int flags);
-
- CUresult CUDAAPI cuMemcpyHtoD_v2(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount);
- CUresult CUDAAPI cuMemcpyDtoH_v2(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount);
- CUresult CUDAAPI cuMemcpyDtoD_v2(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount);
- CUresult CUDAAPI cuMemcpyDtoA_v2(CUarray dstArray, size_t dstOffset, CUdeviceptr srcDevice, size_t ByteCount);
- CUresult CUDAAPI cuMemcpyAtoD_v2(CUdeviceptr dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount);
- CUresult CUDAAPI cuMemcpyHtoA_v2(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount);
- CUresult CUDAAPI cuMemcpyAtoH_v2(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount);
- CUresult CUDAAPI cuMemcpyAtoA_v2(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount);
- CUresult CUDAAPI cuMemcpyHtoAAsync_v2(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream);
- CUresult CUDAAPI cuMemcpyAtoHAsync_v2(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream);
- CUresult CUDAAPI cuMemcpy2D_v2(const CUDA_MEMCPY2D *pCopy);
- CUresult CUDAAPI cuMemcpy2DUnaligned_v2(const CUDA_MEMCPY2D *pCopy);
- CUresult CUDAAPI cuMemcpy3D_v2(const CUDA_MEMCPY3D *pCopy);
- CUresult CUDAAPI cuMemcpyHtoDAsync_v2(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream);
- CUresult CUDAAPI cuMemcpyDtoHAsync_v2(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream);
- CUresult CUDAAPI cuMemcpyDtoDAsync_v2(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream);
- CUresult CUDAAPI cuMemcpy2DAsync_v2(const CUDA_MEMCPY2D *pCopy, CUstream hStream);
- CUresult CUDAAPI cuMemcpy3DAsync_v2(const CUDA_MEMCPY3D *pCopy, CUstream hStream);
- CUresult CUDAAPI cuMemsetD8_v2(CUdeviceptr dstDevice, unsigned char uc, size_t N);
- CUresult CUDAAPI cuMemsetD16_v2(CUdeviceptr dstDevice, unsigned short us, size_t N);
- CUresult CUDAAPI cuMemsetD32_v2(CUdeviceptr dstDevice, unsigned int ui, size_t N);
- CUresult CUDAAPI cuMemsetD2D8_v2(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height);
- CUresult CUDAAPI cuMemsetD2D16_v2(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height);
- CUresult CUDAAPI cuMemsetD2D32_v2(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height);
- CUresult CUDAAPI cuMemcpy(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount);
- CUresult CUDAAPI cuMemcpyAsync(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount, CUstream hStream);
- CUresult CUDAAPI cuMemcpyPeer(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount);
- CUresult CUDAAPI cuMemcpyPeerAsync(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream);
- CUresult CUDAAPI cuMemcpy3DPeer(const CUDA_MEMCPY3D_PEER *pCopy);
- CUresult CUDAAPI cuMemcpy3DPeerAsync(const CUDA_MEMCPY3D_PEER *pCopy, CUstream hStream);
-
- CUresult CUDAAPI cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc, size_t N, CUstream hStream);
- CUresult CUDAAPI cuMemsetD16Async(CUdeviceptr dstDevice, unsigned short us, size_t N, CUstream hStream);
- CUresult CUDAAPI cuMemsetD32Async(CUdeviceptr dstDevice, unsigned int ui, size_t N, CUstream hStream);
- CUresult CUDAAPI cuMemsetD2D8Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream);
- CUresult CUDAAPI cuMemsetD2D16Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream);
- CUresult CUDAAPI cuMemsetD2D32Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream);
-
- CUresult CUDAAPI cuStreamGetPriority(CUstream hStream, int *priority);
- CUresult CUDAAPI cuStreamGetFlags(CUstream hStream, unsigned int *flags);
- CUresult CUDAAPI cuStreamGetCtx(CUstream hStream, CUcontext *pctx);
- CUresult CUDAAPI cuStreamWaitEvent(CUstream hStream, CUevent hEvent, unsigned int Flags);
- CUresult CUDAAPI cuStreamAddCallback(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags);
- CUresult CUDAAPI cuStreamAttachMemAsync(CUstream hStream, CUdeviceptr dptr, size_t length, unsigned int flags);
- CUresult CUDAAPI cuStreamQuery(CUstream hStream);
- CUresult CUDAAPI cuStreamSynchronize(CUstream hStream);
- CUresult CUDAAPI cuEventRecord(CUevent hEvent, CUstream hStream);
- CUresult CUDAAPI cuLaunchKernel(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra);
- CUresult CUDAAPI cuLaunchHostFunc(CUstream hStream, CUhostFn fn, void *userData);
- CUresult CUDAAPI cuGraphicsMapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream);
- CUresult CUDAAPI cuGraphicsUnmapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream);
- CUresult CUDAAPI cuStreamWriteValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags);
- CUresult CUDAAPI cuStreamWaitValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags);
- CUresult CUDAAPI cuStreamWriteValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags);
- CUresult CUDAAPI cuStreamWaitValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags);
- CUresult CUDAAPI cuStreamBatchMemOp(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags);
- CUresult CUDAAPI cuMemPrefetchAsync(CUdeviceptr devPtr, size_t count, CUdevice dstDevice, CUstream hStream);
- CUresult CUDAAPI cuLaunchCooperativeKernel(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams);
- CUresult CUDAAPI cuSignalExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream);
- CUresult CUDAAPI cuWaitExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream);
- CUresult CUDAAPI cuStreamBeginCapture(CUstream hStream);
- CUresult CUDAAPI cuStreamBeginCapture_ptsz(CUstream hStream);
- CUresult CUDAAPI cuStreamBeginCapture_v2(CUstream hStream, CUstreamCaptureMode mode);
- CUresult CUDAAPI cuStreamEndCapture(CUstream hStream, CUgraph *phGraph);
- CUresult CUDAAPI cuStreamIsCapturing(CUstream hStream, CUstreamCaptureStatus *captureStatus);
- CUresult CUDAAPI cuStreamGetCaptureInfo(CUstream hStream, CUstreamCaptureStatus *captureStatus, cuuint64_t *id);
- CUresult CUDAAPI cuGraphLaunch(CUgraphExec hGraph, CUstream hStream);
- CUresult CUDAAPI cuStreamCopyAttributes(CUstream dstStream, CUstream srcStream);
- CUresult CUDAAPI cuStreamGetAttribute(CUstream hStream, CUstreamAttrID attr, CUstreamAttrValue *value);
- CUresult CUDAAPI cuStreamSetAttribute(CUstream hStream, CUstreamAttrID attr, const CUstreamAttrValue *param);
- CUresult CUDAAPI cuGraphInstantiate(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize);
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#if defined(__GNUC__)
- #if defined(__CUDA_API_PUSH_VISIBILITY_DEFAULT)
- #pragma GCC visibility pop
- #endif
-#endif
-
-#undef __CUDA_DEPRECATED
-
-#endif /* __cuda_cuda_h__ */
diff --git a/third_party/cuda/nvvm/libdevice/libdevice.10.bc b/third_party/cuda/nvvm/libdevice/libdevice.10.bc
deleted file mode 100644
index 897c8c9..0000000
--- a/third_party/cuda/nvvm/libdevice/libdevice.10.bc
+++ /dev/null
Binary files differ
diff --git a/third_party/cuda/version.txt b/third_party/cuda/version.txt
deleted file mode 100644
index e6750c8..0000000
--- a/third_party/cuda/version.txt
+++ /dev/null
@@ -1 +0,0 @@
-CUDA Version 11.0.207
diff --git a/third_party/llvm-project b/third_party/llvm-project
index 7cb4c26..07ad054 160000
--- a/third_party/llvm-project
+++ b/third_party/llvm-project
@@ -1 +1 @@
-Subproject commit 7cb4c2617391b80993e7c10f3a34c9e172f7ad41
+Subproject commit 07ad054a724dc1fda57a0e4f90358273586e8201
diff --git a/third_party/mlir-hlo b/third_party/mlir-hlo
index 631843e..496a134 160000
--- a/third_party/mlir-hlo
+++ b/third_party/mlir-hlo
@@ -1 +1 @@
-Subproject commit 631843e39eea2affa61295b3394055c873a36cd0
+Subproject commit 496a134541f8483e1c01ab488bc416b9780df63a