aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/stream_executor/stream_executor_internal.h
diff options
context:
space:
mode:
authorGravatar Smit Hinsu <hinsu@google.com>2018-05-21 17:42:15 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-05-21 17:44:41 -0700
commitb1139814f91c5216eb5ff229ee7e1982e5f4e888 (patch)
tree7f85c8229bfd47eeba49890aa75b59c8680e619c /tensorflow/stream_executor/stream_executor_internal.h
parentd913a243196fa07d4728c8f7c1ce6444ecd086eb (diff)
Introduce an option to allocate CUDA unified memory
PiperOrigin-RevId: 197490523
Diffstat (limited to 'tensorflow/stream_executor/stream_executor_internal.h')
-rw-r--r--tensorflow/stream_executor/stream_executor_internal.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/tensorflow/stream_executor/stream_executor_internal.h b/tensorflow/stream_executor/stream_executor_internal.h
index 2584c92f0c..9c989b971d 100644
--- a/tensorflow/stream_executor/stream_executor_internal.h
+++ b/tensorflow/stream_executor/stream_executor_internal.h
@@ -174,6 +174,15 @@ class StreamExecutorInterface {
virtual void *AllocateSubBuffer(DeviceMemoryBase *parent, uint64 offset,
uint64 size) = 0;
virtual void Deallocate(DeviceMemoryBase *mem) = 0;
+ // Allocates unified memory space of the given size, if supported.
+ // See
+ // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-unified-memory-programming-hd
+ // for more details on unified memory.
+ virtual void *UnifiedMemoryAllocate(uint64 size) { return nullptr; }
+
+ // Deallocates unified memory space previously allocated with
+ // UnifiedMemoryAllocate.
+ virtual void UnifiedMemoryDeallocate(void *mem) {}
virtual void *HostMemoryAllocate(uint64 size) = 0;
virtual void HostMemoryDeallocate(void *mem) = 0;
virtual bool HostMemoryRegister(void *mem, uint64 size) = 0;