Sophie

Sophie

distrib > Mageia > 2 > i586 > media > nonfree-release > by-pkgid > f86555c654b1f4a4c7ccf47789979868 > files > 1066

nvidia-cuda-toolkit-devel-4.2.9-2.mga2.nonfree.i586.rpm

<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
<title>NVIDIA CUDA Library: cuCtxCreate</title>
<link href="customdoxygen.css" rel="stylesheet" type="text/css">
<link href="tabs.css" rel="stylesheet" type="text/css">
</head><body>
<!-- Generated by Doxygen 1.5.8 -->
<div class="navigation" id="top">
  <div class="tabs">
    <ul>
      <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
      <li><a href="modules.html"><span>Modules</span></a></li>
      <li><a href="annotated.html"><span>Data&nbsp;Structures</span></a></li>
      <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
    </ul>
  </div>
</div>
<div class="contents">
  <div class="navpath"><a class="el" href="group__CUDA__CTX.html">Context Management</a>
  </div>
<table cellspacing="0" cellpadding="0" border="0">
  <tr>
   <td valign="top">
      <div class="navtab">
        <table>
          <tr><td class="navtab"><a class="qindexHL" href="group__CUDA__CTX_g65dc0012348bc84810e2103a40d8e2cf.html#g65dc0012348bc84810e2103a40d8e2cf">cuCtxCreate</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g27a365aebb0eb548166309f58a1e8b8e.html#g27a365aebb0eb548166309f58a1e8b8e">cuCtxDestroy</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g088a90490dafca5893ef6fbebc8de8fb.html#g088a90490dafca5893ef6fbebc8de8fb">cuCtxGetApiVersion</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g40b6b141698f76744dea6e39b9a25360.html#g40b6b141698f76744dea6e39b9a25360">cuCtxGetCacheConfig</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g8f13165846b73750693640fb3e8380d0.html#g8f13165846b73750693640fb3e8380d0">cuCtxGetCurrent</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g4e84b109eba36cdaaade167f34ae881e.html#g4e84b109eba36cdaaade167f34ae881e">cuCtxGetDevice</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g9f2d47d1745752aa16da7ed0d111b6a8.html#g9f2d47d1745752aa16da7ed0d111b6a8">cuCtxGetLimit</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g2fac188026a062d92e91a8687d0a7902.html#g2fac188026a062d92e91a8687d0a7902">cuCtxPopCurrent</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_gb02d4c850eb16f861fe5a29682cc90ba.html#gb02d4c850eb16f861fe5a29682cc90ba">cuCtxPushCurrent</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g54699acf7e2ef27279d013ca2095f4a3.html#g54699acf7e2ef27279d013ca2095f4a3">cuCtxSetCacheConfig</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_gbe562ee6258b4fcc272ca6478ca2a2f7.html#gbe562ee6258b4fcc272ca6478ca2a2f7">cuCtxSetCurrent</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g0651954dfb9788173e60a9af7201e65a.html#g0651954dfb9788173e60a9af7201e65a">cuCtxSetLimit</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g7a54725f28d34b8c6299f0c6ca579616.html#g7a54725f28d34b8c6299f0c6ca579616">cuCtxSynchronize</a></td></tr>
        </table>
      </div>
   </td>
   <td valign="top">
<a class="anchor" name="g65dc0012348bc84810e2103a40d8e2cf"></a><!-- doxytag: member="cuda.h::cuCtxCreate" ref="g65dc0012348bc84810e2103a40d8e2cf" args="(CUcontext *pctx, unsigned int flags, CUdevice dev)" -->
<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="group__CUDA__TYPES_g09da14df1a751dcbfeccb9cf0073d64c.html#g09da14df1a751dcbfeccb9cf0073d64c">CUresult</a> cuCtxCreate           </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="group__CUDA__TYPES_gf9f5bd81658f866613785b3a0bb7d7d9.html#gf9f5bd81658f866613785b3a0bb7d7d9">CUcontext</a> *&nbsp;</td>
          <td class="paramname"> <em>pctx</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">unsigned int&nbsp;</td>
          <td class="paramname"> <em>flags</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="group__CUDA__TYPES_gcd81b70eb9968392bb5cdf582af8eab4.html#gcd81b70eb9968392bb5cdf582af8eab4">CUdevice</a>&nbsp;</td>
          <td class="paramname"> <em>dev</em></td><td>&nbsp;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td><td></td>
        </tr>
      </table>
</div>
<div class="memdoc">

<p>
Creates a new CUDA context and associates it with the calling thread. The <code>flags</code> parameter is described below. The context is created with a usage count of 1 and the caller of <a class="el" href="group__CUDA__CTX_g65dc0012348bc84810e2103a40d8e2cf.html#g65dc0012348bc84810e2103a40d8e2cf" title="Create a CUDA context.">cuCtxCreate()</a> must call <a class="el" href="group__CUDA__CTX_g27a365aebb0eb548166309f58a1e8b8e.html#g27a365aebb0eb548166309f58a1e8b8e" title="Destroy a CUDA context.">cuCtxDestroy()</a> or when done using the context. If a context is already current to the thread, it is supplanted by the newly created context and may be restored by a subsequent call to <a class="el" href="group__CUDA__CTX_g2fac188026a062d92e91a8687d0a7902.html#g2fac188026a062d92e91a8687d0a7902" title="Pops the current CUDA context from the current CPU thread.">cuCtxPopCurrent()</a>.<p>
The three LSBs of the <code>flags</code> parameter can be used to control how the OS thread, which owns the CUDA context at the time of an API call, interacts with the OS scheduler when waiting for results from the GPU. Only one of the scheduling flags can be set when creating a context.<p>
<ul>
<li><a class="el" href="group__CUDA__TYPES_g12d89ce3fea2678bf187aa2876ed67a6.html#gg12d89ce3fea2678bf187aa2876ed67a6d75f4c24f8c35ef2ee9d0793badfd88c">CU_CTX_SCHED_AUTO</a>: The default value if the <code>flags</code> parameter is zero, uses a heuristic based on the number of active CUDA contexts in the process <em>C</em> and the number of logical processors in the system <em>P</em>. If <em>C</em> &gt; <em>P</em>, then CUDA will yield to other OS threads when waiting for the GPU, otherwise CUDA will not yield while waiting for results and actively spin on the processor.</li></ul>
<p>
<ul>
<li><a class="el" href="group__CUDA__TYPES_g12d89ce3fea2678bf187aa2876ed67a6.html#gg12d89ce3fea2678bf187aa2876ed67a6331d3ed1e0b55597258bd58346603afc">CU_CTX_SCHED_SPIN</a>: Instruct CUDA to actively spin when waiting for results from the GPU. This can decrease latency when waiting for the GPU, but may lower the performance of CPU threads if they are performing work in parallel with the CUDA thread.</li></ul>
<p>
<ul>
<li><a class="el" href="group__CUDA__TYPES_g12d89ce3fea2678bf187aa2876ed67a6.html#gg12d89ce3fea2678bf187aa2876ed67a64bc43364906d8dd5a7d7c8ad46ccc548">CU_CTX_SCHED_YIELD</a>: Instruct CUDA to yield its thread when waiting for results from the GPU. This can increase latency when waiting for the GPU, but can increase the performance of CPU threads performing work in parallel with the GPU.</li></ul>
<p>
<ul>
<li><a class="el" href="group__CUDA__TYPES_g12d89ce3fea2678bf187aa2876ed67a6.html#gg12d89ce3fea2678bf187aa2876ed67a662aebfe6432ade3feb32f1a409027852">CU_CTX_SCHED_BLOCKING_SYNC</a>: Instruct CUDA to block the CPU thread on a synchronization primitive when waiting for the GPU to finish work.</li></ul>
<p>
<ul>
<li><a class="el" href="group__CUDA__TYPES_g12d89ce3fea2678bf187aa2876ed67a6.html#gg12d89ce3fea2678bf187aa2876ed67a6b5bf395cc60a8cbded4c329ae9430b91">CU_CTX_BLOCKING_SYNC</a>: Instruct CUDA to block the CPU thread on a synchronization primitive when waiting for the GPU to finish work. <br>
 <b>Deprecated:</b> This flag was deprecated as of CUDA 4.0 and was replaced with <a class="el" href="group__CUDA__TYPES_g12d89ce3fea2678bf187aa2876ed67a6.html#gg12d89ce3fea2678bf187aa2876ed67a662aebfe6432ade3feb32f1a409027852">CU_CTX_SCHED_BLOCKING_SYNC</a>.</li></ul>
<p>
<ul>
<li><a class="el" href="group__CUDA__TYPES_g12d89ce3fea2678bf187aa2876ed67a6.html#gg12d89ce3fea2678bf187aa2876ed67a608c822db270f4322af6e6bb0a7786514">CU_CTX_MAP_HOST</a>: Instruct CUDA to support mapped pinned allocations. This flag must be set in order to allocate pinned host memory that is accessible to the GPU.</li></ul>
<p>
<ul>
<li><a class="el" href="group__CUDA__TYPES_g12d89ce3fea2678bf187aa2876ed67a6.html#gg12d89ce3fea2678bf187aa2876ed67a6b5a83507c2a7e14d301621c40c343a81">CU_CTX_LMEM_RESIZE_TO_MAX</a>: Instruct CUDA to not reduce local memory after resizing local memory for a kernel. This can prevent thrashing by local memory allocations when launching many kernels with high local memory usage at the cost of potentially increased memory usage.</li></ul>
<p>
Context creation will fail with <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaac5a6ab0245179d297f1fa56ed0097183">CUDA_ERROR_UNKNOWN</a> if the compute mode of the device is <a class="el" href="group__CUDA__TYPES_g409cfd7e4863c34f8430757482886d75.html#gg409cfd7e4863c34f8430757482886d75db8a226241187db3b1f41999bb70eb47">CU_COMPUTEMODE_PROHIBITED</a>. Similarly, context creation will also fail with <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaac5a6ab0245179d297f1fa56ed0097183">CUDA_ERROR_UNKNOWN</a> if the compute mode for the device is set to <a class="el" href="group__CUDA__TYPES_g409cfd7e4863c34f8430757482886d75.html#gg409cfd7e4863c34f8430757482886d758b2f4d5e3661d0ecd3c1a4b6fc622e30">CU_COMPUTEMODE_EXCLUSIVE</a> and there is already an active context on the device. The function <a class="el" href="group__CUDA__DEVICE_g9c3e1414f0ad901d3278a4d6645fc266.html#g9c3e1414f0ad901d3278a4d6645fc266" title="Returns information about the device.">cuDeviceGetAttribute()</a> can be used with <a class="el" href="group__CUDA__TYPES_g3b9f561d2a42733dde99b2cedcaa413a.html#gg3b9f561d2a42733dde99b2cedcaa413af6669a29a6d42968047747cbfc501289">CU_DEVICE_ATTRIBUTE_COMPUTE_MODE</a> to determine the compute mode of the device. The <em>nvidia-smi</em> tool can be used to set the compute mode for devices. Documentation for <em>nvidia-smi</em> can be obtained by passing a -h option to it.<p>
<dl compact><dt><b>Parameters:</b></dt><dd>
  <table border="0" cellspacing="2" cellpadding="0">
    <tr><td valign="top"></td><td valign="top"><em>pctx</em>&nbsp;</td><td>- Returned context handle of the new context </td></tr>
    <tr><td valign="top"></td><td valign="top"><em>flags</em>&nbsp;</td><td>- Context creation flags </td></tr>
    <tr><td valign="top"></td><td valign="top"><em>dev</em>&nbsp;</td><td>- Device to create context on</td></tr>
  </table>
</dl>
<dl class="return" compact><dt><b>Returns:</b></dt><dd><a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaaa0eed720f8a87cd1c5fd1c453bc7a03d">CUDA_SUCCESS</a>, <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaaacf52f132faf29b473cdda6061f0f44a">CUDA_ERROR_DEINITIALIZED</a>, <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaa8feb999f0af99b4a25ab26b3866f4df8">CUDA_ERROR_NOT_INITIALIZED</a>, <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaaa484e9af32c1e9893ff21f0e0191a12d">CUDA_ERROR_INVALID_CONTEXT</a>, <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaa6f047e7215788ca96c81af92a04bfb6c">CUDA_ERROR_INVALID_DEVICE</a>, <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaa90696c86fcee1f536a1ec7d25867feeb">CUDA_ERROR_INVALID_VALUE</a>, <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaa264c50688ed110e8476b591befe60c02">CUDA_ERROR_OUT_OF_MEMORY</a>, <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaac5a6ab0245179d297f1fa56ed0097183">CUDA_ERROR_UNKNOWN</a> </dd></dl>
<dl class="note" compact><dt><b>Note:</b></dt><dd>Note that this function may also return error codes from previous, asynchronous launches.</dd></dl>
<dl class="see" compact><dt><b>See also:</b></dt><dd><a class="el" href="group__CUDA__CTX_g27a365aebb0eb548166309f58a1e8b8e.html#g27a365aebb0eb548166309f58a1e8b8e" title="Destroy a CUDA context.">cuCtxDestroy</a>, <a class="el" href="group__CUDA__CTX_g088a90490dafca5893ef6fbebc8de8fb.html#g088a90490dafca5893ef6fbebc8de8fb" title="Gets the context&#39;s API version.">cuCtxGetApiVersion</a>, <a class="el" href="group__CUDA__CTX_g40b6b141698f76744dea6e39b9a25360.html#g40b6b141698f76744dea6e39b9a25360" title="Returns the preferred cache configuration for the current context.">cuCtxGetCacheConfig</a>, <a class="el" href="group__CUDA__CTX_g4e84b109eba36cdaaade167f34ae881e.html#g4e84b109eba36cdaaade167f34ae881e" title="Returns the device ID for the current context.">cuCtxGetDevice</a>, <a class="el" href="group__CUDA__CTX_g9f2d47d1745752aa16da7ed0d111b6a8.html#g9f2d47d1745752aa16da7ed0d111b6a8" title="Returns resource limits.">cuCtxGetLimit</a>, <a class="el" href="group__CUDA__CTX_g2fac188026a062d92e91a8687d0a7902.html#g2fac188026a062d92e91a8687d0a7902" title="Pops the current CUDA context from the current CPU thread.">cuCtxPopCurrent</a>, <a class="el" href="group__CUDA__CTX_gb02d4c850eb16f861fe5a29682cc90ba.html#gb02d4c850eb16f861fe5a29682cc90ba" title="Pushes a context on the current CPU thread.">cuCtxPushCurrent</a>, <a class="el" href="group__CUDA__CTX_g54699acf7e2ef27279d013ca2095f4a3.html#g54699acf7e2ef27279d013ca2095f4a3" title="Sets the preferred cache configuration for the current context.">cuCtxSetCacheConfig</a>, <a class="el" href="group__CUDA__CTX_g0651954dfb9788173e60a9af7201e65a.html#g0651954dfb9788173e60a9af7201e65a" title="Set resource limits.">cuCtxSetLimit</a>, <a class="el" href="group__CUDA__CTX_g7a54725f28d34b8c6299f0c6ca579616.html#g7a54725f28d34b8c6299f0c6ca579616" title="Block for a context&#39;s tasks to complete.">cuCtxSynchronize</a> </dd></dl>

</div>
</div><p>
    </td>
  </tr>
</table>
</div>
<hr size="1"><address style="text-align: right;"><small>
Generated by Doxygen for NVIDIA CUDA Library &nbsp;<a
href="http://www.nvidia.com/cuda"><img src="nvidia_logo.jpg" alt="NVIDIA" align="middle" border="0" height="80"></a></small></address>
</body>
</html>