2 * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
4 * Please refer to the NVIDIA end user license agreement (EULA) associated
5 * with this source code for terms and conditions that govern your use of
6 * this software. Any use, reproduction, disclosure, or distribution of
7 * this software and related documentation outside the terms of the EULA
8 * is strictly prohibited.
12 #ifndef _DRVAPI_ERROR_STRING_H_
13 #define _DRVAPI_ERROR_STRING_H_
19 // Error Code string definitions here
22 char const *error_string;
29 static s_CudaErrorStr sCudaDrvErrorString[] =
32 * The API call returned with no errors. In the case of query calls, this
33 * can also mean that the operation being queried is complete (see
34 * ::cuEventQuery() and ::cuStreamQuery()).
36 { "CUDA_SUCCESS", 0 },
39 * This indicates that one or more of the parameters passed to the API call
40 * is not within an acceptable range of values.
42 { "CUDA_ERROR_INVALID_VALUE", 1 },
45 * The API call failed because it was unable to allocate enough memory to
46 * perform the requested operation.
48 { "CUDA_ERROR_OUT_OF_MEMORY", 2 },
51 * This indicates that the CUDA driver has not been initialized with
52 * ::cuInit() or that initialization has failed.
54 { "CUDA_ERROR_NOT_INITIALIZED", 3 },
57 * This indicates that the CUDA driver is in the process of shutting down.
59 { "CUDA_ERROR_DEINITIALIZED", 4 },
62 * This indicates profiling APIs are called while application is running
63 * in visual profiler mode.
65 { "CUDA_ERROR_PROFILER_DISABLED", 5 },
67 * This indicates profiling has not been initialized for this context.
68 * Call cuProfilerInitialize() to resolve this.
70 { "CUDA_ERROR_PROFILER_NOT_INITIALIZED", 6 },
72 * This indicates profiler has already been started and probably
73 * cuProfilerStart() is incorrectly called.
75 { "CUDA_ERROR_PROFILER_ALREADY_STARTED", 7 },
77 * This indicates profiler has already been stopped and probably
78 * cuProfilerStop() is incorrectly called.
80 { "CUDA_ERROR_PROFILER_ALREADY_STOPPED", 8 },
82 * This indicates that no CUDA-capable devices were detected by the installed
85 { "CUDA_ERROR_NO_DEVICE (no CUDA-capable devices were detected)", 100 },
88 * This indicates that the device ordinal supplied by the user does not
89 * correspond to a valid CUDA device.
91 { "CUDA_ERROR_INVALID_DEVICE (device specified is not a valid CUDA device)", 101 },
95 * This indicates that the device kernel image is invalid. This can also
96 * indicate an invalid CUDA module.
98 { "CUDA_ERROR_INVALID_IMAGE", 200 },
101 * This most frequently indicates that there is no context bound to the
102 * current thread. This can also be returned if the context passed to an
103 * API call is not a valid handle (such as a context that has had
104 * ::cuCtxDestroy() invoked on it). This can also be returned if a user
105 * mixes different API versions (i.e. 3010 context with 3020 API calls).
106 * See ::cuCtxGetApiVersion() for more details.
108 { "CUDA_ERROR_INVALID_CONTEXT", 201 },
111 * This indicated that the context being supplied as a parameter to the
112 * API call was already the active context.
114 * This error return is deprecated as of CUDA 3.2. It is no longer an
115 * error to attempt to push the active context via ::cuCtxPushCurrent().
117 { "CUDA_ERROR_CONTEXT_ALREADY_CURRENT", 202 },
120 * This indicates that a map or register operation has failed.
122 { "CUDA_ERROR_MAP_FAILED", 205 },
125 * This indicates that an unmap or unregister operation has failed.
127 { "CUDA_ERROR_UNMAP_FAILED", 206 },
130 * This indicates that the specified array is currently mapped and thus
131 * cannot be destroyed.
133 { "CUDA_ERROR_ARRAY_IS_MAPPED", 207 },
136 * This indicates that the resource is already mapped.
138 { "CUDA_ERROR_ALREADY_MAPPED", 208 },
141 * This indicates that there is no kernel image available that is suitable
142 * for the device. This can occur when a user specifies code generation
143 * options for a particular CUDA source file that do not include the
144 * corresponding device configuration.
146 { "CUDA_ERROR_NO_BINARY_FOR_GPU", 209 },
149 * This indicates that a resource has already been acquired.
151 { "CUDA_ERROR_ALREADY_ACQUIRED", 210 },
154 * This indicates that a resource is not mapped.
156 { "CUDA_ERROR_NOT_MAPPED", 211 },
159 * This indicates that a mapped resource is not available for access as an
162 { "CUDA_ERROR_NOT_MAPPED_AS_ARRAY", 212 },
165 * This indicates that a mapped resource is not available for access as a
168 { "CUDA_ERROR_NOT_MAPPED_AS_POINTER", 213 },
171 * This indicates that an uncorrectable ECC error was detected during
174 { "CUDA_ERROR_ECC_UNCORRECTABLE", 214 },
177 * This indicates that the ::CUlimit passed to the API call is not
178 * supported by the active device.
180 { "CUDA_ERROR_UNSUPPORTED_LIMIT", 215 },
183 * This indicates that the ::CUcontext passed to the API call can
184 * only be bound to a single CPU thread at a time but is already
185 * bound to a CPU thread.
187 { "CUDA_ERROR_CONTEXT_ALREADY_IN_USE", 216 },
190 * This indicates that peer access is not supported across the given
193 { "CUDA_ERROR_PEER_ACCESS_UNSUPPORTED", 217},
196 * This indicates that the device kernel source is invalid.
198 { "CUDA_ERROR_INVALID_SOURCE", 300 },
201 * This indicates that the file specified was not found.
203 { "CUDA_ERROR_FILE_NOT_FOUND", 301 },
206 * This indicates that a link to a shared object failed to resolve.
208 { "CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND", 302 },
211 * This indicates that initialization of a shared object failed.
213 { "CUDA_ERROR_SHARED_OBJECT_INIT_FAILED", 303 },
216 * This indicates that an OS call failed.
218 { "CUDA_ERROR_OPERATING_SYSTEM", 304 },
222 * This indicates that a resource handle passed to the API call was not
223 * valid. Resource handles are opaque types like ::CUstream and ::CUevent.
225 { "CUDA_ERROR_INVALID_HANDLE", 400 },
229 * This indicates that a named symbol was not found. Examples of symbols
230 * are global/constant variable names, texture names }, and surface names.
232 { "CUDA_ERROR_NOT_FOUND", 500 },
236 * This indicates that asynchronous operations issued previously have not
237 * completed yet. This result is not actually an error, but must be indicated
238 * differently than ::CUDA_SUCCESS (which indicates completion). Calls that
239 * may return this value include ::cuEventQuery() and ::cuStreamQuery().
241 { "CUDA_ERROR_NOT_READY", 600 },
245 * An exception occurred on the device while executing a kernel. Common
246 * causes include dereferencing an invalid device pointer and accessing
247 * out of bounds shared memory. The context cannot be used }, so it must
248 * be destroyed (and a new one should be created). All existing device
249 * memory allocations from this context are invalid and must be
250 * reconstructed if the program is to continue using CUDA.
252 { "CUDA_ERROR_LAUNCH_FAILED", 700 },
255 * This indicates that a launch did not occur because it did not have
256 * appropriate resources. This error usually indicates that the user has
257 * attempted to pass too many arguments to the device kernel, or the
258 * kernel launch specifies too many threads for the kernel's register
259 * count. Passing arguments of the wrong size (i.e. a 64-bit pointer
260 * when a 32-bit int is expected) is equivalent to passing too many
261 * arguments and can also result in this error.
263 { "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES", 701 },
266 * This indicates that the device kernel took too long to execute. This can
267 * only occur if timeouts are enabled - see the device attribute
268 * ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT for more information. The
269 * context cannot be used (and must be destroyed similar to
270 * ::CUDA_ERROR_LAUNCH_FAILED). All existing device memory allocations from
271 * this context are invalid and must be reconstructed if the program is to
272 * continue using CUDA.
274 { "CUDA_ERROR_LAUNCH_TIMEOUT", 702 },
277 * This error indicates a kernel launch that uses an incompatible texturing
280 { "CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING", 703 },
283 * This error indicates that a call to ::cuCtxEnablePeerAccess() is
284 * trying to re-enable peer access to a context which has already
285 * had peer access to it enabled.
287 { "CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED", 704 },
290 * This error indicates that ::cuCtxDisablePeerAccess() is
291 * trying to disable peer access which has not been enabled yet
292 * via ::cuCtxEnablePeerAccess().
294 { "CUDA_ERROR_PEER_ACCESS_NOT_ENABLED", 705 },
297 * This error indicates that the primary context for the specified device
298 * has already been initialized.
300 { "CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE", 708 },
303 * This error indicates that the context current to the calling thread
304 * has been destroyed using ::cuCtxDestroy }, or is a primary context which
305 * has not yet been initialized.
307 { "CUDA_ERROR_CONTEXT_IS_DESTROYED", 709 },
310 * A device-side assert triggered during kernel execution. The context
311 * cannot be used anymore, and must be destroyed. All existing device
312 * memory allocations from this context are invalid and must be
313 * reconstructed if the program is to continue using CUDA.
315 { "CUDA_ERROR_ASSERT", 710 },
318 * This error indicates that the hardware resources required to enable
319 * peer access have been exhausted for one or more of the devices
320 * passed to ::cuCtxEnablePeerAccess().
322 { "CUDA_ERROR_TOO_MANY_PEERS", 711 },
325 * This error indicates that the memory range passed to ::cuMemHostRegister()
326 * has already been registered.
328 { "CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED", 712 },
331 * This error indicates that the pointer passed to ::cuMemHostUnregister()
332 * does not correspond to any currently registered memory region.
334 { "CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED", 713 },
337 * This error indicates that the attempted operation is not permitted.
339 { "CUDA_ERROR_NOT_PERMITTED", 800 },
342 * This error indicates that the attempted operation is not supported
343 * on the current system or device.
345 { "CUDA_ERROR_NOT_SUPPORTED", 801 },
348 * This indicates that an unknown internal error has occurred.
350 { "CUDA_ERROR_UNKNOWN", 999 },
354 // This is just a linear search through the array, since the error_id's are not
355 // always ocurring consecutively
356 const char * getCudaDrvErrorString(CUresult error_id)
359 while (sCudaDrvErrorString[index].error_id != error_id &&
360 sCudaDrvErrorString[index].error_id != -1)
364 if (sCudaDrvErrorString[index].error_id == error_id)
365 return (const char *)sCudaDrvErrorString[index].error_string;
367 return (const char *)"CUDA_ERROR not found!";