Sophie

Sophie

distrib > Mageia > 2 > i586 > media > nonfree-release > by-pkgid > f86555c654b1f4a4c7ccf47789979868 > files > 1059

nvidia-cuda-toolkit-devel-4.2.9-2.mga2.nonfree.i586.rpm

<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
<title>NVIDIA CUDA Library: cuCtxSetLimit</title>
<link href="customdoxygen.css" rel="stylesheet" type="text/css">
<link href="tabs.css" rel="stylesheet" type="text/css">
</head><body>
<!-- Generated by Doxygen 1.5.8 -->
<div class="navigation" id="top">
  <div class="tabs">
    <ul>
      <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
      <li><a href="modules.html"><span>Modules</span></a></li>
      <li><a href="annotated.html"><span>Data&nbsp;Structures</span></a></li>
      <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
    </ul>
  </div>
</div>
<div class="contents">
  <div class="navpath"><a class="el" href="group__CUDA__CTX.html">Context Management</a>
  </div>
<table cellspacing="0" cellpadding="0" border="0">
  <tr>
   <td valign="top">
      <div class="navtab">
        <table>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g65dc0012348bc84810e2103a40d8e2cf.html#g65dc0012348bc84810e2103a40d8e2cf">cuCtxCreate</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g27a365aebb0eb548166309f58a1e8b8e.html#g27a365aebb0eb548166309f58a1e8b8e">cuCtxDestroy</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g088a90490dafca5893ef6fbebc8de8fb.html#g088a90490dafca5893ef6fbebc8de8fb">cuCtxGetApiVersion</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g40b6b141698f76744dea6e39b9a25360.html#g40b6b141698f76744dea6e39b9a25360">cuCtxGetCacheConfig</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g8f13165846b73750693640fb3e8380d0.html#g8f13165846b73750693640fb3e8380d0">cuCtxGetCurrent</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g4e84b109eba36cdaaade167f34ae881e.html#g4e84b109eba36cdaaade167f34ae881e">cuCtxGetDevice</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g9f2d47d1745752aa16da7ed0d111b6a8.html#g9f2d47d1745752aa16da7ed0d111b6a8">cuCtxGetLimit</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g2fac188026a062d92e91a8687d0a7902.html#g2fac188026a062d92e91a8687d0a7902">cuCtxPopCurrent</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_gb02d4c850eb16f861fe5a29682cc90ba.html#gb02d4c850eb16f861fe5a29682cc90ba">cuCtxPushCurrent</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g54699acf7e2ef27279d013ca2095f4a3.html#g54699acf7e2ef27279d013ca2095f4a3">cuCtxSetCacheConfig</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_gbe562ee6258b4fcc272ca6478ca2a2f7.html#gbe562ee6258b4fcc272ca6478ca2a2f7">cuCtxSetCurrent</a></td></tr>
          <tr><td class="navtab"><a class="qindexHL" href="group__CUDA__CTX_g0651954dfb9788173e60a9af7201e65a.html#g0651954dfb9788173e60a9af7201e65a">cuCtxSetLimit</a></td></tr>
          <tr><td class="navtab"><a class="qindex" href="group__CUDA__CTX_g7a54725f28d34b8c6299f0c6ca579616.html#g7a54725f28d34b8c6299f0c6ca579616">cuCtxSynchronize</a></td></tr>
        </table>
      </div>
   </td>
   <td valign="top">
<a class="anchor" name="g0651954dfb9788173e60a9af7201e65a"></a><!-- doxytag: member="cuda.h::cuCtxSetLimit" ref="g0651954dfb9788173e60a9af7201e65a" args="(CUlimit limit, size_t value)" -->
<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="group__CUDA__TYPES_g09da14df1a751dcbfeccb9cf0073d64c.html#g09da14df1a751dcbfeccb9cf0073d64c">CUresult</a> cuCtxSetLimit           </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="group__CUDA__TYPES_g535044ee87a9596a955c65a92b3caa27.html#g535044ee87a9596a955c65a92b3caa27">CUlimit</a>&nbsp;</td>
          <td class="paramname"> <em>limit</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_t&nbsp;</td>
          <td class="paramname"> <em>value</em></td><td>&nbsp;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td><td></td>
        </tr>
      </table>
</div>
<div class="memdoc">

<p>
Setting <code>limit</code> to <code>value</code> is a request by the application to update the current limit maintained by the context. The driver is free to modify the requested value to meet h/w requirements (this could be clamping to minimum or maximum values, rounding up to nearest element size, etc). The application can use <a class="el" href="group__CUDA__CTX_g9f2d47d1745752aa16da7ed0d111b6a8.html#g9f2d47d1745752aa16da7ed0d111b6a8" title="Returns resource limits.">cuCtxGetLimit()</a> to find out exactly what the limit has been set to.<p>
Setting each <a class="el" href="group__CUDA__TYPES_g535044ee87a9596a955c65a92b3caa27.html#g535044ee87a9596a955c65a92b3caa27">CUlimit</a> has its own specific restrictions, so each is discussed here.<p>
<ul>
<li><a class="el" href="group__CUDA__TYPES_g8054bb850a48a884875c90659d81bfd8.html#gg8054bb850a48a884875c90659d81bfd8ebe51e384a8b4b79459915bb1c31bc39">CU_LIMIT_STACK_SIZE</a> controls the stack size of each GPU thread. This limit is only applicable to devices of compute capability 2.0 and higher. Attempting to set this limit on devices of compute capability less than 2.0 will result in the error <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaad10e6e6ef4b01290d2202d43c3ca6821">CUDA_ERROR_UNSUPPORTED_LIMIT</a> being returned.</li></ul>
<p>
<ul>
<li><a class="el" href="group__CUDA__TYPES_g8054bb850a48a884875c90659d81bfd8.html#gg8054bb850a48a884875c90659d81bfd816f25aa2c37a06580ab533d8ae7db948">CU_LIMIT_PRINTF_FIFO_SIZE</a> controls the size of the FIFO used by the printf() device system call. Setting <a class="el" href="group__CUDA__TYPES_g8054bb850a48a884875c90659d81bfd8.html#gg8054bb850a48a884875c90659d81bfd816f25aa2c37a06580ab533d8ae7db948">CU_LIMIT_PRINTF_FIFO_SIZE</a> must be performed before launching any kernel that uses the printf() device system call, otherwise <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaa90696c86fcee1f536a1ec7d25867feeb">CUDA_ERROR_INVALID_VALUE</a> will be returned. This limit is only applicable to devices of compute capability 2.0 and higher. Attempting to set this limit on devices of compute capability less than 2.0 will result in the error <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaad10e6e6ef4b01290d2202d43c3ca6821">CUDA_ERROR_UNSUPPORTED_LIMIT</a> being returned.</li></ul>
<p>
<ul>
<li><a class="el" href="group__CUDA__TYPES_g8054bb850a48a884875c90659d81bfd8.html#gg8054bb850a48a884875c90659d81bfd886d01dbc431b04edd5d618257aaa246b">CU_LIMIT_MALLOC_HEAP_SIZE</a> controls the size of the heap used by the malloc() and free() device system calls. Setting <a class="el" href="group__CUDA__TYPES_g8054bb850a48a884875c90659d81bfd8.html#gg8054bb850a48a884875c90659d81bfd886d01dbc431b04edd5d618257aaa246b">CU_LIMIT_MALLOC_HEAP_SIZE</a> must be performed before launching any kernel that uses the malloc() or free() device system calls, otherwise <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaa90696c86fcee1f536a1ec7d25867feeb">CUDA_ERROR_INVALID_VALUE</a> will be returned. This limit is only applicable to devices of compute capability 2.0 and higher. Attempting to set this limit on devices of compute capability less than 2.0 will result in the error <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaad10e6e6ef4b01290d2202d43c3ca6821">CUDA_ERROR_UNSUPPORTED_LIMIT</a> being returned.</li></ul>
<p>
<dl compact><dt><b>Parameters:</b></dt><dd>
  <table border="0" cellspacing="2" cellpadding="0">
    <tr><td valign="top"></td><td valign="top"><em>limit</em>&nbsp;</td><td>- Limit to set </td></tr>
    <tr><td valign="top"></td><td valign="top"><em>value</em>&nbsp;</td><td>- Size in bytes of limit</td></tr>
  </table>
</dl>
<dl class="return" compact><dt><b>Returns:</b></dt><dd><a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaaa0eed720f8a87cd1c5fd1c453bc7a03d">CUDA_SUCCESS</a>, <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaa90696c86fcee1f536a1ec7d25867feeb">CUDA_ERROR_INVALID_VALUE</a>, <a class="el" href="group__CUDA__TYPES_g0cdead942fd5028d157641eef6bdeeaa.html#gg0cdead942fd5028d157641eef6bdeeaad10e6e6ef4b01290d2202d43c3ca6821">CUDA_ERROR_UNSUPPORTED_LIMIT</a> </dd></dl>
<dl class="note" compact><dt><b>Note:</b></dt><dd>Note that this function may also return error codes from previous, asynchronous launches.</dd></dl>
<dl class="see" compact><dt><b>See also:</b></dt><dd><a class="el" href="group__CUDA__CTX_g65dc0012348bc84810e2103a40d8e2cf.html#g65dc0012348bc84810e2103a40d8e2cf" title="Create a CUDA context.">cuCtxCreate</a>, <a class="el" href="group__CUDA__CTX_g27a365aebb0eb548166309f58a1e8b8e.html#g27a365aebb0eb548166309f58a1e8b8e" title="Destroy a CUDA context.">cuCtxDestroy</a>, <a class="el" href="group__CUDA__CTX_g088a90490dafca5893ef6fbebc8de8fb.html#g088a90490dafca5893ef6fbebc8de8fb" title="Gets the context&#39;s API version.">cuCtxGetApiVersion</a>, <a class="el" href="group__CUDA__CTX_g40b6b141698f76744dea6e39b9a25360.html#g40b6b141698f76744dea6e39b9a25360" title="Returns the preferred cache configuration for the current context.">cuCtxGetCacheConfig</a>, <a class="el" href="group__CUDA__CTX_g4e84b109eba36cdaaade167f34ae881e.html#g4e84b109eba36cdaaade167f34ae881e" title="Returns the device ID for the current context.">cuCtxGetDevice</a>, <a class="el" href="group__CUDA__CTX_g9f2d47d1745752aa16da7ed0d111b6a8.html#g9f2d47d1745752aa16da7ed0d111b6a8" title="Returns resource limits.">cuCtxGetLimit</a>, <a class="el" href="group__CUDA__CTX_g2fac188026a062d92e91a8687d0a7902.html#g2fac188026a062d92e91a8687d0a7902" title="Pops the current CUDA context from the current CPU thread.">cuCtxPopCurrent</a>, <a class="el" href="group__CUDA__CTX_gb02d4c850eb16f861fe5a29682cc90ba.html#gb02d4c850eb16f861fe5a29682cc90ba" title="Pushes a context on the current CPU thread.">cuCtxPushCurrent</a>, <a class="el" href="group__CUDA__CTX_g54699acf7e2ef27279d013ca2095f4a3.html#g54699acf7e2ef27279d013ca2095f4a3" title="Sets the preferred cache configuration for the current context.">cuCtxSetCacheConfig</a>, <a class="el" href="group__CUDA__CTX_g7a54725f28d34b8c6299f0c6ca579616.html#g7a54725f28d34b8c6299f0c6ca579616" title="Block for a context&#39;s tasks to complete.">cuCtxSynchronize</a> </dd></dl>

</div>
</div><p>
    </td>
  </tr>
</table>
</div>
<hr size="1"><address style="text-align: right;"><small>
Generated by Doxygen for NVIDIA CUDA Library &nbsp;<a
href="http://www.nvidia.com/cuda"><img src="nvidia_logo.jpg" alt="NVIDIA" align="middle" border="0" height="80"></a></small></address>
</body>
</html>