arm_compute v18.02
[platform/upstream/armcl.git] / documentation / softmax__layer__quantized_8cl_source.xhtml
index a059460..18a8481 100644 (file)
@@ -40,7 +40,7 @@
  <tr style="height: 56px;">
   <td style="padding-left: 0.5em;">
    <div id="projectname">Compute Library
-   &#160;<span id="projectnumber">17.12</span>
+   &#160;<span id="projectnumber">18.02</span>
    </div>
   </td>
  </tr>
@@ -117,36 +117,45 @@ $(document).ready(function(){initNavTree('softmax__layer__quantized_8cl_source.x
 <div class="title">softmax_layer_quantized.cl</div>  </div>
 </div><!--header-->
 <div class="contents">
-<a href="softmax__layer__quantized_8cl.xhtml">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno">    1</span>&#160;<span class="comment">/*</span></div><div class="line"><a name="l00002"></a><span class="lineno">    2</span>&#160;<span class="comment"> * Copyright (c) 2017 ARM Limited.</span></div><div class="line"><a name="l00003"></a><span class="lineno">    3</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00004"></a><span class="lineno">    4</span>&#160;<span class="comment"> * SPDX-License-Identifier: MIT</span></div><div class="line"><a name="l00005"></a><span class="lineno">    5</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00006"></a><span class="lineno">    6</span>&#160;<span class="comment"> * Permission is hereby granted, free of charge, to any person obtaining a copy</span></div><div class="line"><a name="l00007"></a><span class="lineno">    7</span>&#160;<span class="comment"> * of this software and associated documentation files (the &quot;Software&quot;), to</span></div><div class="line"><a name="l00008"></a><span class="lineno">    8</span>&#160;<span class="comment"> * deal in the Software without restriction, including without limitation the</span></div><div class="line"><a name="l00009"></a><span class="lineno">    9</span>&#160;<span class="comment"> * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or</span></div><div class="line"><a name="l00010"></a><span class="lineno">   10</span>&#160;<span class="comment"> * sell copies of the Software, and to permit persons to whom the Software is</span></div><div class="line"><a name="l00011"></a><span class="lineno">   11</span>&#160;<span class="comment"> * furnished to do so, subject to the following conditions:</span></div><div class="line"><a name="l00012"></a><span class="lineno">   12</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00013"></a><span class="lineno">   13</span>&#160;<span class="comment"> * The above copyright notice and this permission notice shall be included in all</span></div><div class="line"><a name="l00014"></a><span class="lineno">   14</span>&#160;<span class="comment"> * copies or substantial portions of the Software.</span></div><div class="line"><a name="l00015"></a><span class="lineno">   15</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00016"></a><span class="lineno">   16</span>&#160;<span class="comment"> * THE SOFTWARE IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR</span></div><div class="line"><a name="l00017"></a><span class="lineno">   17</span>&#160;<span class="comment"> * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,</span></div><div class="line"><a name="l00018"></a><span class="lineno">   18</span>&#160;<span class="comment"> * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE</span></div><div class="line"><a name="l00019"></a><span class="lineno">   19</span>&#160;<span class="comment"> * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER</span></div><div class="line"><a name="l00020"></a><span class="lineno">   20</span>&#160;<span class="comment"> * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,</span></div><div class="line"><a name="l00021"></a><span class="lineno">   21</span>&#160;<span class="comment"> * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE</span></div><div class="line"><a name="l00022"></a><span class="lineno">   22</span>&#160;<span class="comment"> * SOFTWARE.</span></div><div class="line"><a name="l00023"></a><span class="lineno">   23</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00024"></a><span class="lineno">   24</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="asymm__helper_8h.xhtml">asymm_helper.h</a>&quot;</span></div><div class="line"><a name="l00025"></a><span class="lineno">   25</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="helpers_8h.xhtml">helpers.h</a>&quot;</span></div><div class="line"><a name="l00026"></a><span class="lineno">   26</span>&#160;</div><div class="line"><a name="l00027"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">   27</a></span>&#160;<span class="preprocessor">#define MAX_OP(x, y, type, size) max((x), (y))</span></div><div class="line"><a name="l00028"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">   28</a></span>&#160;<span class="preprocessor">#define ADD_OP(x, y, type, size) ((x) + (y))</span></div><div class="line"><a name="l00029"></a><span class="lineno">   29</span>&#160;</div><div class="line"><a name="l00030"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a4701bcd6d0c0472d7d3e8017a24f2be6">   30</a></span>&#160;__constant uchar16 <a class="code" href="softmax__layer__quantized_8cl.xhtml#a4701bcd6d0c0472d7d3e8017a24f2be6">type_min</a> = 0;</div><div class="line"><a name="l00031"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a0712735973f172ac9efc7d48a31e47ad">   31</a></span>&#160;__constant uint16 <a class="code" href="softmax__layer__quantized_8cl.xhtml#a0712735973f172ac9efc7d48a31e47ad">idx16</a>     = (uint16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);</div><div class="line"><a name="l00032"></a><span class="lineno">   32</span>&#160;</div><div class="line"><a name="l00055"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a79bd9092e1a23c550d4dc1aeb9b746ad">   55</a></span>&#160;__kernel <span class="keywordtype">void</span> <a class="code" href="softmax__layer__quantized_8cl.xhtml#a79bd9092e1a23c550d4dc1aeb9b746ad">softmax_layer_max_quantized</a>(</div><div class="line"><a name="l00056"></a><span class="lineno">   56</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(src),</div><div class="line"><a name="l00057"></a><span class="lineno">   57</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>),</div><div class="line"><a name="l00058"></a><span class="lineno">   58</span>&#160;    uint width)</div><div class="line"><a name="l00059"></a><span class="lineno">   59</span>&#160;{</div><div class="line"><a name="l00060"></a><span class="lineno">   60</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> src = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(src);</div><div class="line"><a name="l00061"></a><span class="lineno">   61</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a> = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(dst);</div><div class="line"><a name="l00062"></a><span class="lineno">   62</span>&#160;</div><div class="line"><a name="l00063"></a><span class="lineno">   63</span>&#160;    <span class="comment">// Initialize local maximum</span></div><div class="line"><a name="l00064"></a><span class="lineno">   64</span>&#160;    uchar16 max_val = 0;</div><div class="line"><a name="l00065"></a><span class="lineno">   65</span>&#160;</div><div class="line"><a name="l00066"></a><span class="lineno">   66</span>&#160;    <span class="comment">// Calculate max of row</span></div><div class="line"><a name="l00067"></a><span class="lineno">   67</span>&#160;    <span class="keyword">const</span> uint width4 = width &gt;&gt; 4;</div><div class="line"><a name="l00068"></a><span class="lineno">   68</span>&#160;    <span class="keywordflow">for</span>(uint i = 0; i &lt; width4; i++)</div><div class="line"><a name="l00069"></a><span class="lineno">   69</span>&#160;    {</div><div class="line"><a name="l00070"></a><span class="lineno">   70</span>&#160;        uchar16 data = vload16(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, i &lt;&lt; 4, 0));</div><div class="line"><a name="l00071"></a><span class="lineno">   71</span>&#160;        max_val      = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(data, max_val, uchar, 16);</div><div class="line"><a name="l00072"></a><span class="lineno">   72</span>&#160;    }</div><div class="line"><a name="l00073"></a><span class="lineno">   73</span>&#160;</div><div class="line"><a name="l00074"></a><span class="lineno">   74</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_16</span></div><div class="line"><a name="l00075"></a><span class="lineno">   75</span>&#160;    <span class="comment">// Handle non multiple of 16</span></div><div class="line"><a name="l00076"></a><span class="lineno">   76</span>&#160;    uchar16 data = vload16(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, width4 &lt;&lt; 4, 0));</div><div class="line"><a name="l00077"></a><span class="lineno">   77</span>&#160;    uchar16 widx = convert_uchar16(((uint16)(width4 &lt;&lt; 4) + <a class="code" href="softmax__layer__quantized_8cl.xhtml#a0712735973f172ac9efc7d48a31e47ad">idx16</a>) &lt; width);</div><div class="line"><a name="l00078"></a><span class="lineno">   78</span>&#160;    max_val      = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val, select(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a4701bcd6d0c0472d7d3e8017a24f2be6">type_min</a>, data, widx), uchar, 16);</div><div class="line"><a name="l00079"></a><span class="lineno">   79</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_16 */</span><span class="preprocessor"></span></div><div class="line"><a name="l00080"></a><span class="lineno">   80</span>&#160;</div><div class="line"><a name="l00081"></a><span class="lineno">   81</span>&#160;    <span class="comment">// Perform max reduction</span></div><div class="line"><a name="l00082"></a><span class="lineno">   82</span>&#160;    max_val.s01234567 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val.s01234567, max_val.s89ABCDEF, uchar, 8);</div><div class="line"><a name="l00083"></a><span class="lineno">   83</span>&#160;    max_val.s0123     = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val.s0123, max_val.s4567, uchar, 4);</div><div class="line"><a name="l00084"></a><span class="lineno">   84</span>&#160;    max_val.s01       = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val.s01, max_val.s23, uchar, 2);</div><div class="line"><a name="l00085"></a><span class="lineno">   85</span>&#160;    max_val.s0        = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val.s0, max_val.s1, uchar, 1);</div><div class="line"><a name="l00086"></a><span class="lineno">   86</span>&#160;</div><div class="line"><a name="l00087"></a><span class="lineno">   87</span>&#160;    <span class="comment">// Store result</span></div><div class="line"><a name="l00088"></a><span class="lineno">   88</span>&#160;    *((__global uchar *)dst.<a class="code" href="struct_image.xhtml#acf52c23cbd7424606c10a606524e3e32">ptr</a>) = max_val.s0;</div><div class="line"><a name="l00089"></a><span class="lineno">   89</span>&#160;}</div><div class="line"><a name="l00090"></a><span class="lineno">   90</span>&#160;</div><div class="line"><a name="l00091"></a><span class="lineno">   91</span>&#160;<span class="preprocessor">#if defined(DIFF_MIN)</span></div><div class="line"><a name="l00092"></a><span class="lineno">   92</span>&#160;</div><div class="line"><a name="l00093"></a><span class="lineno">   93</span>&#160;int16 mult_by_quantized_multiplier(int16 data)</div><div class="line"><a name="l00094"></a><span class="lineno">   94</span>&#160;{</div><div class="line"><a name="l00095"></a><span class="lineno">   95</span>&#160;<span class="preprocessor">#if defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT)</span></div><div class="line"><a name="l00096"></a><span class="lineno">   96</span>&#160;    <span class="keywordflow">if</span>(INPUT_BETA_MULTIPLIER &gt; 1)</div><div class="line"><a name="l00097"></a><span class="lineno">   97</span>&#160;    {</div><div class="line"><a name="l00098"></a><span class="lineno">   98</span>&#160;        <span class="keywordflow">return</span> <a class="code" href="asymm__helper_8h.xhtml#ab98c50e16a18dd439b2b7ecdd1ecd81c">asymm_mult</a>(data * (1 &lt;&lt; INPUT_BETA_LEFT_SHIFT), INPUT_BETA_MULTIPLIER);</div><div class="line"><a name="l00099"></a><span class="lineno">   99</span>&#160;    }</div><div class="line"><a name="l00100"></a><span class="lineno">  100</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT) */</span><span class="preprocessor"></span></div><div class="line"><a name="l00101"></a><span class="lineno">  101</span>&#160;    <span class="keywordflow">return</span> data;</div><div class="line"><a name="l00102"></a><span class="lineno">  102</span>&#160;}</div><div class="line"><a name="l00103"></a><span class="lineno">  103</span>&#160;</div><div class="line"><a name="l00145"></a><span class="lineno">  145</span>&#160;__kernel <span class="keywordtype">void</span> softmax_layer_shift_exp_sum_quantized(</div><div class="line"><a name="l00146"></a><span class="lineno">  146</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(src),</div><div class="line"><a name="l00147"></a><span class="lineno">  147</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1fixed__point__arithmetic_1_1detail.xhtml#ad91bb73431b4de1f4946ed949d444849">max</a>),</div><div class="line"><a name="l00148"></a><span class="lineno">  148</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>),</div><div class="line"><a name="l00149"></a><span class="lineno">  149</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>),</div><div class="line"><a name="l00150"></a><span class="lineno">  150</span>&#160;    uint width)</div><div class="line"><a name="l00151"></a><span class="lineno">  151</span>&#160;{</div><div class="line"><a name="l00152"></a><span class="lineno">  152</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> src = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(src);</div><div class="line"><a name="l00153"></a><span class="lineno">  153</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a> = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(dst);</div><div class="line"><a name="l00154"></a><span class="lineno">  154</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1fixed__point__arithmetic_1_1detail.xhtml#ad91bb73431b4de1f4946ed949d444849">max</a> = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(max);</div><div class="line"><a name="l00155"></a><span class="lineno">  155</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a> = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(sum);</div><div class="line"><a name="l00156"></a><span class="lineno">  156</span>&#160;</div><div class="line"><a name="l00157"></a><span class="lineno">  157</span>&#160;    <span class="comment">// Load max value of 1D logits vector (row)</span></div><div class="line"><a name="l00158"></a><span class="lineno">  158</span>&#160;    <span class="keywordtype">int</span> max_val = convert_int(*((__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;max, 0, 0)));</div><div class="line"><a name="l00159"></a><span class="lineno">  159</span>&#160;</div><div class="line"><a name="l00160"></a><span class="lineno">  160</span>&#160;    <span class="comment">// Set sum vector, Q(EXP_ACCUMULATION_INT_BITS)</span></div><div class="line"><a name="l00161"></a><span class="lineno">  161</span>&#160;    int16 sum1D = 0;</div><div class="line"><a name="l00162"></a><span class="lineno">  162</span>&#160;</div><div class="line"><a name="l00163"></a><span class="lineno">  163</span>&#160;    <span class="comment">// Shift values, exp and sum</span></div><div class="line"><a name="l00164"></a><span class="lineno">  164</span>&#160;    <span class="keyword">const</span> uint width4 = width &gt;&gt; 4;</div><div class="line"><a name="l00165"></a><span class="lineno">  165</span>&#160;    <span class="keywordflow">for</span>(uint i = 0; i &lt; width4; i++)</div><div class="line"><a name="l00166"></a><span class="lineno">  166</span>&#160;    {</div><div class="line"><a name="l00167"></a><span class="lineno">  167</span>&#160;        uchar16 data         = vload16(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, i &lt;&lt; 4, 0));</div><div class="line"><a name="l00168"></a><span class="lineno">  168</span>&#160;        int16 data_fp        = convert_int16(data);</div><div class="line"><a name="l00169"></a><span class="lineno">  169</span>&#160;        int16 data_diff      = data_fp - max_val;</div><div class="line"><a name="l00170"></a><span class="lineno">  170</span>&#160;        int16 data_diff_mult = mult_by_quantized_multiplier(data_diff);</div><div class="line"><a name="l00171"></a><span class="lineno">  171</span>&#160;        data_fp              = <a class="code" href="asymm__helper_8h.xhtml#ad1aa37a99125a65c4f4f3741011677e2">asymm_exp_on_negative_values</a>(data_diff_mult, SCALED_DIFF_INT_BITS);</div><div class="line"><a name="l00172"></a><span class="lineno">  172</span>&#160;        data_fp              = <a class="code" href="asymm__helper_8h.xhtml#a4496e3d044dda78318d27aa7f9c0e810">asymm_rescale</a>(data_fp, 0, EXP_ACCUMULATION_INT_BITS);</div><div class="line"><a name="l00173"></a><span class="lineno">  173</span>&#160;        vstore16(data_diff, 0, (__global <span class="keywordtype">int</span> *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;dst, i &lt;&lt; 4, 0));</div><div class="line"><a name="l00174"></a><span class="lineno">  174</span>&#160;        sum1D = sum1D + select(0, data_fp, data_diff &gt;= (int16)(DIFF_MIN));</div><div class="line"><a name="l00175"></a><span class="lineno">  175</span>&#160;    }</div><div class="line"><a name="l00176"></a><span class="lineno">  176</span>&#160;</div><div class="line"><a name="l00177"></a><span class="lineno">  177</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_16</span></div><div class="line"><a name="l00178"></a><span class="lineno">  178</span>&#160;    <span class="comment">// Handle non multiple of 16</span></div><div class="line"><a name="l00179"></a><span class="lineno">  179</span>&#160;    uchar16 data         = vload16(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, width4 &lt;&lt; 4, 0));</div><div class="line"><a name="l00180"></a><span class="lineno">  180</span>&#160;    int16 data_fp        = convert_int16(data);</div><div class="line"><a name="l00181"></a><span class="lineno">  181</span>&#160;    int16 data_diff      = data_fp - max_val;</div><div class="line"><a name="l00182"></a><span class="lineno">  182</span>&#160;    int16 data_diff_mult = mult_by_quantized_multiplier(data_diff);</div><div class="line"><a name="l00183"></a><span class="lineno">  183</span>&#160;    data_fp              = <a class="code" href="asymm__helper_8h.xhtml#ad1aa37a99125a65c4f4f3741011677e2">asymm_exp_on_negative_values</a>(data_diff_mult, SCALED_DIFF_INT_BITS);</div><div class="line"><a name="l00184"></a><span class="lineno">  184</span>&#160;    data_fp              = <a class="code" href="asymm__helper_8h.xhtml#a4496e3d044dda78318d27aa7f9c0e810">asymm_rescale</a>(data_fp, 0, EXP_ACCUMULATION_INT_BITS);</div><div class="line"><a name="l00185"></a><span class="lineno">  185</span>&#160;    int16 widx           = convert_int16(((uint16)(width4 &lt;&lt; 4) + <a class="code" href="softmax__layer__quantized_8cl.xhtml#a0712735973f172ac9efc7d48a31e47ad">idx16</a>) &lt; width);</div><div class="line"><a name="l00186"></a><span class="lineno">  186</span>&#160;    vstore16(data_diff, 0, (__global <span class="keywordtype">int</span> *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;dst, width4 &lt;&lt; 4, 0));</div><div class="line"><a name="l00187"></a><span class="lineno">  187</span>&#160;    data_fp = select(0, data_fp, data_diff &gt;= (int16)(DIFF_MIN));</div><div class="line"><a name="l00188"></a><span class="lineno">  188</span>&#160;    sum1D   = sum1D + select(0, data_fp, widx);</div><div class="line"><a name="l00189"></a><span class="lineno">  189</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_16 */</span><span class="preprocessor"></span></div><div class="line"><a name="l00190"></a><span class="lineno">  190</span>&#160;</div><div class="line"><a name="l00191"></a><span class="lineno">  191</span>&#160;    <span class="comment">// Perform min/max reduction</span></div><div class="line"><a name="l00192"></a><span class="lineno">  192</span>&#160;    sum1D.s01234567 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s01234567, sum1D.s89ABCDEF, <a class="code" href="fixed__point_8h.xhtml#a26a8ca855cd14b1867173d301baf6c4f">qs16</a>, 8);</div><div class="line"><a name="l00193"></a><span class="lineno">  193</span>&#160;    sum1D.s0123     = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s0123, sum1D.s4567, <a class="code" href="fixed__point_8h.xhtml#a26a8ca855cd14b1867173d301baf6c4f">qs16</a>, 4);</div><div class="line"><a name="l00194"></a><span class="lineno">  194</span>&#160;    sum1D.s01       = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s01, sum1D.s23, <a class="code" href="fixed__point_8h.xhtml#a26a8ca855cd14b1867173d301baf6c4f">qs16</a>, 2);</div><div class="line"><a name="l00195"></a><span class="lineno">  195</span>&#160;    sum1D.s0        = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s0, sum1D.s1, <a class="code" href="fixed__point_8h.xhtml#a26a8ca855cd14b1867173d301baf6c4f">qs16</a>, 1);</div><div class="line"><a name="l00196"></a><span class="lineno">  196</span>&#160;</div><div class="line"><a name="l00197"></a><span class="lineno">  197</span>&#160;    <span class="comment">// Calculate and store result</span></div><div class="line"><a name="l00198"></a><span class="lineno">  198</span>&#160;    *((__global <span class="keywordtype">int</span> *)sum.<a class="code" href="struct_image.xhtml#acf52c23cbd7424606c10a606524e3e32">ptr</a>) = sum1D.s0;</div><div class="line"><a name="l00199"></a><span class="lineno">  199</span>&#160;}</div><div class="line"><a name="l00200"></a><span class="lineno">  200</span>&#160;</div><div class="line"><a name="l00232"></a><span class="lineno">  232</span>&#160;__kernel <span class="keywordtype">void</span> softmax_layer_norm_quantized(</div><div class="line"><a name="l00233"></a><span class="lineno">  233</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(src),</div><div class="line"><a name="l00234"></a><span class="lineno">  234</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>),</div><div class="line"><a name="l00235"></a><span class="lineno">  235</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>))</div><div class="line"><a name="l00236"></a><span class="lineno">  236</span>&#160;{</div><div class="line"><a name="l00237"></a><span class="lineno">  237</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> src = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(src);</div><div class="line"><a name="l00238"></a><span class="lineno">  238</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a> = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(dst);</div><div class="line"><a name="l00239"></a><span class="lineno">  239</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a> = <a class="code" href="helpers_8h.xhtml#a071aa45af973feac43b14f62e54a6fce">CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP</a>(sum);</div><div class="line"><a name="l00240"></a><span class="lineno">  240</span>&#160;</div><div class="line"><a name="l00241"></a><span class="lineno">  241</span>&#160;    <span class="comment">// Load max value of 1D logits vector (row)</span></div><div class="line"><a name="l00242"></a><span class="lineno">  242</span>&#160;    <span class="keywordtype">int</span> sum_val = *((__global <span class="keywordtype">int</span> *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;sum, 0, get_global_id(1)));</div><div class="line"><a name="l00243"></a><span class="lineno">  243</span>&#160;</div><div class="line"><a name="l00244"></a><span class="lineno">  244</span>&#160;    <span class="comment">// It will be better to calculate this in prev layer and pass here as parameter</span></div><div class="line"><a name="l00245"></a><span class="lineno">  245</span>&#160;    uint  sum_val_u               = convert_uint(sum_val);</div><div class="line"><a name="l00246"></a><span class="lineno">  246</span>&#160;    <span class="keywordtype">int</span>   headroom_plus_one       = <a class="code" href="namespacearm__compute_1_1test_1_1fixed__point__arithmetic_1_1detail.xhtml#aa43c67a536c661ec28398d06bc8ba5af">clz</a>(sum_val_u);</div><div class="line"><a name="l00247"></a><span class="lineno">  247</span>&#160;    <span class="keywordtype">int</span>   num_bits_over_unit      = EXP_ACCUMULATION_INT_BITS - headroom_plus_one;</div><div class="line"><a name="l00248"></a><span class="lineno">  248</span>&#160;    <span class="keywordtype">int</span>   shifted_sum_minus_one_1 = convert_int((sum_val_u &lt;&lt; headroom_plus_one) - (1u &lt;&lt; 31));</div><div class="line"><a name="l00249"></a><span class="lineno">  249</span>&#160;    int16 shifted_sum_minus_one   = shifted_sum_minus_one_1;</div><div class="line"><a name="l00250"></a><span class="lineno">  250</span>&#160;    int16 shifted_scale           = <a class="code" href="asymm__helper_8h.xhtml#adae82358534f6f4a2bcbdd833413ccb8">asymm_one_over_one_plus_x_for_x_in_0_1</a>(shifted_sum_minus_one);</div><div class="line"><a name="l00251"></a><span class="lineno">  251</span>&#160;</div><div class="line"><a name="l00252"></a><span class="lineno">  252</span>&#160;    <span class="comment">// It was already calculated in prev layer, should be stored into tmp output and reused</span></div><div class="line"><a name="l00253"></a><span class="lineno">  253</span>&#160;    int16 data_diff      = vload16(0, (__global <span class="keywordtype">int</span> *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, 0, 0));</div><div class="line"><a name="l00254"></a><span class="lineno">  254</span>&#160;    int16 data_diff_mult = mult_by_quantized_multiplier(data_diff);</div><div class="line"><a name="l00255"></a><span class="lineno">  255</span>&#160;    int16 data           = <a class="code" href="asymm__helper_8h.xhtml#ad1aa37a99125a65c4f4f3741011677e2">asymm_exp_on_negative_values</a>(data_diff_mult, SCALED_DIFF_INT_BITS);</div><div class="line"><a name="l00256"></a><span class="lineno">  256</span>&#160;</div><div class="line"><a name="l00257"></a><span class="lineno">  257</span>&#160;    data = <a class="code" href="asymm__helper_8h.xhtml#ab98c50e16a18dd439b2b7ecdd1ecd81c">asymm_mult</a>(shifted_scale, data);</div><div class="line"><a name="l00258"></a><span class="lineno">  258</span>&#160;    data = <a class="code" href="asymm__helper_8h.xhtml#a766e0a81889256cfa161f7c08951c0b8">asymm_rounding_divide_by_pow2</a>(data, num_bits_over_unit + 31 - 8);</div><div class="line"><a name="l00259"></a><span class="lineno">  259</span>&#160;    data = select(0, data, data_diff &gt;= (int16)(DIFF_MIN));</div><div class="line"><a name="l00260"></a><span class="lineno">  260</span>&#160;    vstore16(convert_uchar16_sat(data), 0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;dst, 0, 0));</div><div class="line"><a name="l00261"></a><span class="lineno">  261</span>&#160;}</div><div class="line"><a name="l00262"></a><span class="lineno">  262</span>&#160;</div><div class="line"><a name="l00263"></a><span class="lineno">  263</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* defined(DIFF_MIN) */</span><span class="preprocessor"></span></div><div class="ttc" id="helpers_8h_xhtml_a071aa45af973feac43b14f62e54a6fce"><div class="ttname"><a href="helpers_8h.xhtml#a071aa45af973feac43b14f62e54a6fce">CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP</a></div><div class="ttdeci">#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name)</div><div class="ttdef"><b>Definition:</b> <a href="helpers_8h_source.xhtml#l00111">helpers.h:111</a></div></div>
-<div class="ttc" id="asymm__helper_8h_xhtml_ad1aa37a99125a65c4f4f3741011677e2"><div class="ttname"><a href="asymm__helper_8h.xhtml#ad1aa37a99125a65c4f4f3741011677e2">asymm_exp_on_negative_values</a></div><div class="ttdeci">int16 asymm_exp_on_negative_values(int16 a, int k_integer_bits)</div><div class="ttdoc">Calculates  for x &lt; 0. </div><div class="ttdef"><b>Definition:</b> <a href="asymm__helper_8h_source.xhtml#l00200">asymm_helper.h:200</a></div></div>
-<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a4701bcd6d0c0472d7d3e8017a24f2be6"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a4701bcd6d0c0472d7d3e8017a24f2be6">type_min</a></div><div class="ttdeci">__constant uchar16 type_min</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00030">softmax_layer_quantized.cl:30</a></div></div>
-<div class="ttc" id="asymm__helper_8h_xhtml_adae82358534f6f4a2bcbdd833413ccb8"><div class="ttname"><a href="asymm__helper_8h.xhtml#adae82358534f6f4a2bcbdd833413ccb8">asymm_one_over_one_plus_x_for_x_in_0_1</a></div><div class="ttdeci">int16 asymm_one_over_one_plus_x_for_x_in_0_1(int16 a)</div><div class="ttdoc">Calculates  for x in (0, 1). </div><div class="ttdef"><b>Definition:</b> <a href="asymm__helper_8h_source.xhtml#l00243">asymm_helper.h:243</a></div></div>
-<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_abaa48ad818c44e415fd3f9dd0f27bf01"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a></div><div class="ttdeci">#define MAX_OP(x, y, type, size)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00027">softmax_layer_quantized.cl:27</a></div></div>
+<a href="softmax__layer__quantized_8cl.xhtml">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno">    1</span>&#160;<span class="comment">/*</span></div><div class="line"><a name="l00002"></a><span class="lineno">    2</span>&#160;<span class="comment"> * Copyright (c) 2017-2018 ARM Limited.</span></div><div class="line"><a name="l00003"></a><span class="lineno">    3</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00004"></a><span class="lineno">    4</span>&#160;<span class="comment"> * SPDX-License-Identifier: MIT</span></div><div class="line"><a name="l00005"></a><span class="lineno">    5</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00006"></a><span class="lineno">    6</span>&#160;<span class="comment"> * Permission is hereby granted, free of charge, to any person obtaining a copy</span></div><div class="line"><a name="l00007"></a><span class="lineno">    7</span>&#160;<span class="comment"> * of this software and associated documentation files (the &quot;Software&quot;), to</span></div><div class="line"><a name="l00008"></a><span class="lineno">    8</span>&#160;<span class="comment"> * deal in the Software without restriction, including without limitation the</span></div><div class="line"><a name="l00009"></a><span class="lineno">    9</span>&#160;<span class="comment"> * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or</span></div><div class="line"><a name="l00010"></a><span class="lineno">   10</span>&#160;<span class="comment"> * sell copies of the Software, and to permit persons to whom the Software is</span></div><div class="line"><a name="l00011"></a><span class="lineno">   11</span>&#160;<span class="comment"> * furnished to do so, subject to the following conditions:</span></div><div class="line"><a name="l00012"></a><span class="lineno">   12</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00013"></a><span class="lineno">   13</span>&#160;<span class="comment"> * The above copyright notice and this permission notice shall be included in all</span></div><div class="line"><a name="l00014"></a><span class="lineno">   14</span>&#160;<span class="comment"> * copies or substantial portions of the Software.</span></div><div class="line"><a name="l00015"></a><span class="lineno">   15</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00016"></a><span class="lineno">   16</span>&#160;<span class="comment"> * THE SOFTWARE IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR</span></div><div class="line"><a name="l00017"></a><span class="lineno">   17</span>&#160;<span class="comment"> * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,</span></div><div class="line"><a name="l00018"></a><span class="lineno">   18</span>&#160;<span class="comment"> * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE</span></div><div class="line"><a name="l00019"></a><span class="lineno">   19</span>&#160;<span class="comment"> * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER</span></div><div class="line"><a name="l00020"></a><span class="lineno">   20</span>&#160;<span class="comment"> * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,</span></div><div class="line"><a name="l00021"></a><span class="lineno">   21</span>&#160;<span class="comment"> * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE</span></div><div class="line"><a name="l00022"></a><span class="lineno">   22</span>&#160;<span class="comment"> * SOFTWARE.</span></div><div class="line"><a name="l00023"></a><span class="lineno">   23</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00024"></a><span class="lineno">   24</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="helpers__asymm_8h.xhtml">helpers_asymm.h</a>&quot;</span></div><div class="line"><a name="l00025"></a><span class="lineno">   25</span>&#160;</div><div class="line"><a name="l00026"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">   26</a></span>&#160;<span class="preprocessor">#define MAX_OP(x, y, type, size) max((x), (y))</span></div><div class="line"><a name="l00027"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">   27</a></span>&#160;<span class="preprocessor">#define ADD_OP(x, y, type, size) ((x) + (y))</span></div><div class="line"><a name="l00028"></a><span class="lineno">   28</span>&#160;</div><div class="line"><a name="l00029"></a><span class="lineno">   29</span>&#160;<span class="comment">/* Number of workitems in dimension 0. */</span></div><div class="line"><a name="l00030"></a><span class="lineno">   30</span>&#160;<span class="preprocessor">#if !defined(GRID_SIZE)</span></div><div class="line"><a name="l00031"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">   31</a></span>&#160;<span class="preprocessor">#define GRID_SIZE 1</span></div><div class="line"><a name="l00032"></a><span class="lineno">   32</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* !defined(GRID_SIZE) */</span><span class="preprocessor"></span></div><div class="line"><a name="l00033"></a><span class="lineno">   33</span>&#160;</div><div class="line"><a name="l00034"></a><span class="lineno">   34</span>&#160;<span class="preprocessor">#if VECTOR_SIZE == 2</span></div><div class="line"><a name="l00035"></a><span class="lineno">   35</span>&#160;__constant uint2 <a class="code" href="softmax__layer__quantized_8cl.xhtml#aa1dd94b8d98f1c6d790bdf0fc5de29e9">idx__</a> = (uint2)(0, 1);</div><div class="line"><a name="l00036"></a><span class="lineno">   36</span>&#160;<span class="preprocessor">#define asymm_mult(a, b) ASYMM_MULT(a, b, 2)</span></div><div class="line"><a name="l00037"></a><span class="lineno">   37</span>&#160;<span class="preprocessor">#define asymm_exp_on_negative_values(a, k_integer_bits) ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, 2)</span></div><div class="line"><a name="l00038"></a><span class="lineno">   38</span>&#160;<span class="preprocessor">#define asymm_rescale(value, src_integer_bits, dst_integer_bits) ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, 2)</span></div><div class="line"><a name="l00039"></a><span class="lineno">   39</span>&#160;</div><div class="line"><a name="l00040"></a><span class="lineno">   40</span>&#160;<span class="preprocessor">#elif VECTOR_SIZE == 4</span></div><div class="line"><a name="l00041"></a><span class="lineno">   41</span>&#160;__constant uint4 <a class="code" href="softmax__layer__quantized_8cl.xhtml#aa1dd94b8d98f1c6d790bdf0fc5de29e9">idx__</a> = (uint4)(0, 1, 2, 3);</div><div class="line"><a name="l00042"></a><span class="lineno">   42</span>&#160;<span class="preprocessor">#define asymm_mult(a, b) ASYMM_MULT(a, b, 4)</span></div><div class="line"><a name="l00043"></a><span class="lineno">   43</span>&#160;<span class="preprocessor">#define asymm_exp_on_negative_values(a, k_integer_bits) ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, 4)</span></div><div class="line"><a name="l00044"></a><span class="lineno">   44</span>&#160;<span class="preprocessor">#define asymm_rescale(value, src_integer_bits, dst_integer_bits) ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, 4)</span></div><div class="line"><a name="l00045"></a><span class="lineno">   45</span>&#160;</div><div class="line"><a name="l00046"></a><span class="lineno">   46</span>&#160;<span class="preprocessor">#elif VECTOR_SIZE == 8</span></div><div class="line"><a name="l00047"></a><span class="lineno">   47</span>&#160;__constant uint8 idx__ = (uint8)(0, 1, 2, 3, 4, 5, 6, 7);</div><div class="line"><a name="l00048"></a><span class="lineno">   48</span>&#160;<span class="preprocessor">#define asymm_mult(a, b) ASYMM_MULT(a, b, 8)</span></div><div class="line"><a name="l00049"></a><span class="lineno">   49</span>&#160;<span class="preprocessor">#define asymm_exp_on_negative_values(a, k_integer_bits) ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, 8)</span></div><div class="line"><a name="l00050"></a><span class="lineno">   50</span>&#160;<span class="preprocessor">#define asymm_rescale(value, src_integer_bits, dst_integer_bits) ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, 8)</span></div><div class="line"><a name="l00051"></a><span class="lineno">   51</span>&#160;</div><div class="line"><a name="l00052"></a><span class="lineno">   52</span>&#160;<span class="preprocessor">#else </span><span class="comment">/* VECTOR_SIZE DEFAULT */</span><span class="preprocessor"></span></div><div class="line"><a name="l00053"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">   53</a></span>&#160;<span class="preprocessor">#define VECTOR_SIZE 16</span></div><div class="line"><a name="l00054"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">   54</a></span>&#160;<span class="preprocessor">#define LOG_VECTOR_SIZE 4</span></div><div class="line"><a name="l00055"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#aa1dd94b8d98f1c6d790bdf0fc5de29e9">   55</a></span>&#160;__constant uint16 idx__ = (uint16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);</div><div class="line"><a name="l00056"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a525a42d38133b1051b8924b456add4a1">   56</a></span>&#160;<span class="preprocessor">#define asymm_mult(a, b) ASYMM_MULT(a, b, 16)</span></div><div class="line"><a name="l00057"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a54aedfa17c5ac2567107d5f488b0f4af">   57</a></span>&#160;<span class="preprocessor">#define asymm_exp_on_negative_values(a, k_integer_bits) ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, 16)</span></div><div class="line"><a name="l00058"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#ad57ea340cdcfeb2e1375b70c3ae59bae">   58</a></span>&#160;<span class="preprocessor">#define asymm_rescale(value, src_integer_bits, dst_integer_bits) ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, 16)</span></div><div class="line"><a name="l00059"></a><span class="lineno">   59</span>&#160;</div><div class="line"><a name="l00060"></a><span class="lineno">   60</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR_SIZE END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00061"></a><span class="lineno">   61</span>&#160;</div><div class="line"><a name="l00062"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">   62</a></span>&#160;<span class="preprocessor">#define VEC_UCHAR VEC_DATA_TYPE(uchar, VECTOR_SIZE)</span></div><div class="line"><a name="l00063"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a16110bd2b92003141dbaf8a44498ff82">   63</a></span>&#160;<span class="preprocessor">#define VEC_UINT VEC_DATA_TYPE(uint, VECTOR_SIZE)</span></div><div class="line"><a name="l00064"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">   64</a></span>&#160;<span class="preprocessor">#define VEC_INT VEC_DATA_TYPE(int, VECTOR_SIZE)</span></div><div class="line"><a name="l00065"></a><span class="lineno">   65</span>&#160;</div><div class="line"><a name="l00066"></a><span class="lineno">   66</span>&#160;<span class="preprocessor">#if defined(DIFF_MIN)</span></div><div class="line"><a name="l00067"></a><span class="lineno">   67</span>&#160;</div><div class="line"><a name="l00068"></a><span class="lineno">   68</span>&#160;<a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> mult_by_quantized_multiplier_serial(<a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data)</div><div class="line"><a name="l00069"></a><span class="lineno">   69</span>&#160;{</div><div class="line"><a name="l00070"></a><span class="lineno">   70</span>&#160;<span class="preprocessor">#if defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT)</span></div><div class="line"><a name="l00071"></a><span class="lineno">   71</span>&#160;    <span class="keywordflow">if</span>(INPUT_BETA_MULTIPLIER &gt; 1)</div><div class="line"><a name="l00072"></a><span class="lineno">   72</span>&#160;    {</div><div class="line"><a name="l00073"></a><span class="lineno">   73</span>&#160;        <span class="keywordflow">return</span> <a class="code" href="softmax__layer__quantized_8cl.xhtml#a525a42d38133b1051b8924b456add4a1">asymm_mult</a>(data * (1 &lt;&lt; INPUT_BETA_LEFT_SHIFT), INPUT_BETA_MULTIPLIER);</div><div class="line"><a name="l00074"></a><span class="lineno">   74</span>&#160;    }</div><div class="line"><a name="l00075"></a><span class="lineno">   75</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT) */</span><span class="preprocessor"></span></div><div class="line"><a name="l00076"></a><span class="lineno">   76</span>&#160;    <span class="keywordflow">return</span> data;</div><div class="line"><a name="l00077"></a><span class="lineno">   77</span>&#160;}</div><div class="line"><a name="l00078"></a><span class="lineno">   78</span>&#160;</div><div class="line"><a name="l00079"></a><span class="lineno">   79</span>&#160;int4 mult_by_quantized_multiplier_parallel(int4 data)</div><div class="line"><a name="l00080"></a><span class="lineno">   80</span>&#160;{</div><div class="line"><a name="l00081"></a><span class="lineno">   81</span>&#160;<span class="preprocessor">#if defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT)</span></div><div class="line"><a name="l00082"></a><span class="lineno">   82</span>&#160;    <span class="keywordflow">if</span>(INPUT_BETA_MULTIPLIER &gt; 1)</div><div class="line"><a name="l00083"></a><span class="lineno">   83</span>&#160;    {</div><div class="line"><a name="l00084"></a><span class="lineno">   84</span>&#160;        <span class="keywordflow">return</span> <a class="code" href="helpers__asymm_8h.xhtml#a5483aefd5e07244661178bfd3f434448">ASYMM_MULT</a>(data * (1 &lt;&lt; INPUT_BETA_LEFT_SHIFT), INPUT_BETA_MULTIPLIER, 4);</div><div class="line"><a name="l00085"></a><span class="lineno">   85</span>&#160;    }</div><div class="line"><a name="l00086"></a><span class="lineno">   86</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT) */</span><span class="preprocessor"></span></div><div class="line"><a name="l00087"></a><span class="lineno">   87</span>&#160;    <span class="keywordflow">return</span> data;</div><div class="line"><a name="l00088"></a><span class="lineno">   88</span>&#160;}</div><div class="line"><a name="l00089"></a><span class="lineno">   89</span>&#160;</div><div class="line"><a name="l00131"></a><span class="lineno">  131</span>&#160;__kernel <span class="keywordtype">void</span> softmax_layer_max_shift_exp_sum_quantized_serial(</div><div class="line"><a name="l00132"></a><span class="lineno">  132</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a6743f0a130e8311e6f5b1a23df102472">src</a>),</div><div class="line"><a name="l00133"></a><span class="lineno">  133</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(maxo),</div><div class="line"><a name="l00134"></a><span class="lineno">  134</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>),</div><div class="line"><a name="l00135"></a><span class="lineno">  135</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>),</div><div class="line"><a name="l00136"></a><span class="lineno">  136</span>&#160;    uint width)</div><div class="line"><a name="l00137"></a><span class="lineno">  137</span>&#160;{</div><div class="line"><a name="l00138"></a><span class="lineno">  138</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a6743f0a130e8311e6f5b1a23df102472">src</a>  = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(src);</div><div class="line"><a name="l00139"></a><span class="lineno">  139</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>  = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(dst);</div><div class="line"><a name="l00140"></a><span class="lineno">  140</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> maxo = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(maxo);</div><div class="line"><a name="l00141"></a><span class="lineno">  141</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>  = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(sum);</div><div class="line"><a name="l00142"></a><span class="lineno">  142</span>&#160;</div><div class="line"><a name="l00143"></a><span class="lineno">  143</span>&#160;    <a class="code" href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">VEC_UCHAR</a> max_val_vec = 0;</div><div class="line"><a name="l00144"></a><span class="lineno">  144</span>&#160;</div><div class="line"><a name="l00145"></a><span class="lineno">  145</span>&#160;    <span class="comment">// Calculate max of row</span></div><div class="line"><a name="l00146"></a><span class="lineno">  146</span>&#160;    <span class="keyword">const</span> uint width4 = width &gt;&gt; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">LOG_VECTOR_SIZE</a>;</div><div class="line"><a name="l00147"></a><span class="lineno">  147</span>&#160;    <span class="keywordflow">for</span>(uint i = 0; i &lt; width4; i++)</div><div class="line"><a name="l00148"></a><span class="lineno">  148</span>&#160;    {</div><div class="line"><a name="l00149"></a><span class="lineno">  149</span>&#160;        <a class="code" href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">VEC_UCHAR</a> data = <a class="code" href="helpers_8h.xhtml#a287e2fc366c312b468382c95bb90f91f">VLOAD</a>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a>)(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, i &lt;&lt; LOG_VECTOR_SIZE, 0));</div><div class="line"><a name="l00150"></a><span class="lineno">  150</span>&#160;        max_val_vec    = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(data, max_val_vec, uchar, 16);</div><div class="line"><a name="l00151"></a><span class="lineno">  151</span>&#160;    }</div><div class="line"><a name="l00152"></a><span class="lineno">  152</span>&#160;</div><div class="line"><a name="l00153"></a><span class="lineno">  153</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_VECTOR_SIZE</span></div><div class="line"><a name="l00154"></a><span class="lineno">  154</span>&#160;    <span class="comment">// Handle non multiple of 16</span></div><div class="line"><a name="l00155"></a><span class="lineno">  155</span>&#160;    <a class="code" href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">VEC_UCHAR</a> uchar_min = (<a class="code" href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">VEC_UCHAR</a>)0;</div><div class="line"><a name="l00156"></a><span class="lineno">  156</span>&#160;    <a class="code" href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">VEC_UCHAR</a> data      = <a class="code" href="helpers_8h.xhtml#a287e2fc366c312b468382c95bb90f91f">VLOAD</a>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a>)(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, width4 &lt;&lt; LOG_VECTOR_SIZE, 0));</div><div class="line"><a name="l00157"></a><span class="lineno">  157</span>&#160;    <a class="code" href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">VEC_UCHAR</a> widx      = <a class="code" href="fixed__point_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a>(((<a class="code" href="softmax__layer__quantized_8cl.xhtml#a16110bd2b92003141dbaf8a44498ff82">VEC_UINT</a>)(width4 &lt;&lt; LOG_VECTOR_SIZE) + idx__) &lt; width, <a class="code" href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">VEC_UCHAR</a>);</div><div class="line"><a name="l00158"></a><span class="lineno">  158</span>&#160;    max_val_vec         = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec, select(uchar_min, data, widx), uchar, 16);</div><div class="line"><a name="l00159"></a><span class="lineno">  159</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_VECTOR_SIZE */</span><span class="preprocessor"></span></div><div class="line"><a name="l00160"></a><span class="lineno">  160</span>&#160;</div><div class="line"><a name="l00161"></a><span class="lineno">  161</span>&#160;    <span class="comment">// Perform max reduction</span></div><div class="line"><a name="l00162"></a><span class="lineno">  162</span>&#160;<span class="preprocessor">#if VECTOR_SIZE == 16</span></div><div class="line"><a name="l00163"></a><span class="lineno">  163</span>&#160;    max_val_vec.s01234567 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec.s01234567, max_val_vec.s89ABCDEF, uchar, 8);</div><div class="line"><a name="l00164"></a><span class="lineno">  164</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR SIZE 16 END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00165"></a><span class="lineno">  165</span>&#160;<span class="preprocessor">#if VECTOR_SIZE &gt;= 8</span></div><div class="line"><a name="l00166"></a><span class="lineno">  166</span>&#160;    max_val_vec.s0123 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec.s0123, max_val_vec.s4567, uchar, 4);</div><div class="line"><a name="l00167"></a><span class="lineno">  167</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR SIZE 8 END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00168"></a><span class="lineno">  168</span>&#160;<span class="preprocessor">#if VECTOR_SIZE &gt;= 4</span></div><div class="line"><a name="l00169"></a><span class="lineno">  169</span>&#160;    max_val_vec.s01 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec.s01, max_val_vec.s23, uchar, 2);</div><div class="line"><a name="l00170"></a><span class="lineno">  170</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR SIZE 4 END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00171"></a><span class="lineno">  171</span>&#160;    max_val_vec.s0 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec.s0, max_val_vec.s1, uchar, 1);</div><div class="line"><a name="l00172"></a><span class="lineno">  172</span>&#160;</div><div class="line"><a name="l00173"></a><span class="lineno">  173</span>&#160;    <span class="comment">// Store result</span></div><div class="line"><a name="l00174"></a><span class="lineno">  174</span>&#160;    *((__global uchar *)maxo.<a class="code" href="struct_image.xhtml#acf52c23cbd7424606c10a606524e3e32">ptr</a>) = max_val_vec.s0;</div><div class="line"><a name="l00175"></a><span class="lineno">  175</span>&#160;</div><div class="line"><a name="l00176"></a><span class="lineno">  176</span>&#160;    <span class="comment">// Second part</span></div><div class="line"><a name="l00177"></a><span class="lineno">  177</span>&#160;</div><div class="line"><a name="l00178"></a><span class="lineno">  178</span>&#160;    <span class="comment">// Load max value of 1D logits vector (row)</span></div><div class="line"><a name="l00179"></a><span class="lineno">  179</span>&#160;    <span class="keywordtype">int</span> max_val = convert_int(*((__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;maxo, 0, 0)));</div><div class="line"><a name="l00180"></a><span class="lineno">  180</span>&#160;</div><div class="line"><a name="l00181"></a><span class="lineno">  181</span>&#160;    <span class="comment">// Set sum vector, Q(EXP_ACCUMULATION_INT_BITS)</span></div><div class="line"><a name="l00182"></a><span class="lineno">  182</span>&#160;    <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> sum1D = 0;</div><div class="line"><a name="l00183"></a><span class="lineno">  183</span>&#160;</div><div class="line"><a name="l00184"></a><span class="lineno">  184</span>&#160;    <span class="comment">// Shift values, exp and sum</span></div><div class="line"><a name="l00185"></a><span class="lineno">  185</span>&#160;    <span class="keywordflow">for</span>(uint i = 0; i &lt; width4; i++)</div><div class="line"><a name="l00186"></a><span class="lineno">  186</span>&#160;    {</div><div class="line"><a name="l00187"></a><span class="lineno">  187</span>&#160;        <a class="code" href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">VEC_UCHAR</a> data         = <a class="code" href="helpers_8h.xhtml#a287e2fc366c312b468382c95bb90f91f">VLOAD</a>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a>)(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, i &lt;&lt; LOG_VECTOR_SIZE, 0));</div><div class="line"><a name="l00188"></a><span class="lineno">  188</span>&#160;        <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data_fp        = <a class="code" href="fixed__point_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a>(data, <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a>);</div><div class="line"><a name="l00189"></a><span class="lineno">  189</span>&#160;        <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data_diff      = data_fp - max_val;</div><div class="line"><a name="l00190"></a><span class="lineno">  190</span>&#160;        <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data_diff_mult = mult_by_quantized_multiplier_serial(data_diff);</div><div class="line"><a name="l00191"></a><span class="lineno">  191</span>&#160;        data_fp                = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a54aedfa17c5ac2567107d5f488b0f4af">asymm_exp_on_negative_values</a>(data_diff_mult, SCALED_DIFF_INT_BITS);</div><div class="line"><a name="l00192"></a><span class="lineno">  192</span>&#160;        data_fp                = <a class="code" href="softmax__layer__quantized_8cl.xhtml#ad57ea340cdcfeb2e1375b70c3ae59bae">asymm_rescale</a>(data_fp, 0, EXP_ACCUMULATION_INT_BITS);</div><div class="line"><a name="l00193"></a><span class="lineno">  193</span>&#160;        <a class="code" href="helpers_8h.xhtml#acb282042d1edeeaa3cc979a206f78b54">VSTORE</a>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a>)</div><div class="line"><a name="l00194"></a><span class="lineno">  194</span>&#160;        (data_diff, 0, (__global <span class="keywordtype">int</span> *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;dst, i &lt;&lt; LOG_VECTOR_SIZE, 0));</div><div class="line"><a name="l00195"></a><span class="lineno">  195</span>&#160;        sum1D = sum1D + select(0, data_fp, data_diff &gt;= (<a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a>)(DIFF_MIN));</div><div class="line"><a name="l00196"></a><span class="lineno">  196</span>&#160;    }</div><div class="line"><a name="l00197"></a><span class="lineno">  197</span>&#160;</div><div class="line"><a name="l00198"></a><span class="lineno">  198</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_VECTOR_SIZE</span></div><div class="line"><a name="l00199"></a><span class="lineno">  199</span>&#160;    <span class="comment">// Handle non multiple of 16</span></div><div class="line"><a name="l00200"></a><span class="lineno">  200</span>&#160;    data                   = <a class="code" href="helpers_8h.xhtml#a287e2fc366c312b468382c95bb90f91f">VLOAD</a>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a>)(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, width4 &lt;&lt; LOG_VECTOR_SIZE, 0));</div><div class="line"><a name="l00201"></a><span class="lineno">  201</span>&#160;    <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data_fp        = <a class="code" href="fixed__point_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a>(data, <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a>);</div><div class="line"><a name="l00202"></a><span class="lineno">  202</span>&#160;    <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data_diff      = data_fp - max_val;</div><div class="line"><a name="l00203"></a><span class="lineno">  203</span>&#160;    <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data_diff_mult = mult_by_quantized_multiplier_serial(data_diff);</div><div class="line"><a name="l00204"></a><span class="lineno">  204</span>&#160;    data_fp                = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a54aedfa17c5ac2567107d5f488b0f4af">asymm_exp_on_negative_values</a>(data_diff_mult, SCALED_DIFF_INT_BITS);</div><div class="line"><a name="l00205"></a><span class="lineno">  205</span>&#160;    data_fp                = <a class="code" href="softmax__layer__quantized_8cl.xhtml#ad57ea340cdcfeb2e1375b70c3ae59bae">asymm_rescale</a>(data_fp, 0, EXP_ACCUMULATION_INT_BITS);</div><div class="line"><a name="l00206"></a><span class="lineno">  206</span>&#160;    <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> widx_          = <a class="code" href="fixed__point_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a>(((<a class="code" href="softmax__layer__quantized_8cl.xhtml#a16110bd2b92003141dbaf8a44498ff82">VEC_UINT</a>)(width4 &lt;&lt; LOG_VECTOR_SIZE) + idx__) &lt; width, <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a>);</div><div class="line"><a name="l00207"></a><span class="lineno">  207</span>&#160;    <a class="code" href="helpers_8h.xhtml#acb282042d1edeeaa3cc979a206f78b54">VSTORE</a>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a>)</div><div class="line"><a name="l00208"></a><span class="lineno">  208</span>&#160;    (data_diff, 0, (__global <span class="keywordtype">int</span> *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;dst, width4 &lt;&lt; LOG_VECTOR_SIZE, 0));</div><div class="line"><a name="l00209"></a><span class="lineno">  209</span>&#160;    data_fp = select(0, data_fp, data_diff &gt;= (<a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a>)(DIFF_MIN));</div><div class="line"><a name="l00210"></a><span class="lineno">  210</span>&#160;    sum1D   = sum1D + select(0, data_fp, widx_);</div><div class="line"><a name="l00211"></a><span class="lineno">  211</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_VECTOR_SIZE */</span><span class="preprocessor"></span></div><div class="line"><a name="l00212"></a><span class="lineno">  212</span>&#160;</div><div class="line"><a name="l00213"></a><span class="lineno">  213</span>&#160;    <span class="comment">// Perform sum reduction</span></div><div class="line"><a name="l00214"></a><span class="lineno">  214</span>&#160;<span class="preprocessor">#if VECTOR_SIZE == 16</span></div><div class="line"><a name="l00215"></a><span class="lineno">  215</span>&#160;    sum1D.s01234567 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s01234567, sum1D.s89ABCDEF, uchar, 8);</div><div class="line"><a name="l00216"></a><span class="lineno">  216</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR SIZE 16 END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00217"></a><span class="lineno">  217</span>&#160;<span class="preprocessor">#if VECTOR_SIZE &gt;= 8</span></div><div class="line"><a name="l00218"></a><span class="lineno">  218</span>&#160;    sum1D.s0123 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s0123, sum1D.s4567, uchar, 4);</div><div class="line"><a name="l00219"></a><span class="lineno">  219</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR SIZE 8 END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00220"></a><span class="lineno">  220</span>&#160;<span class="preprocessor">#if VECTOR_SIZE &gt;= 4</span></div><div class="line"><a name="l00221"></a><span class="lineno">  221</span>&#160;    sum1D.s01 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s01, sum1D.s23, uchar, 2);</div><div class="line"><a name="l00222"></a><span class="lineno">  222</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR SIZE 4 END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00223"></a><span class="lineno">  223</span>&#160;    sum1D.s0 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s0, sum1D.s1, uchar, 1);</div><div class="line"><a name="l00224"></a><span class="lineno">  224</span>&#160;</div><div class="line"><a name="l00225"></a><span class="lineno">  225</span>&#160;    <span class="comment">// Calculate and store result</span></div><div class="line"><a name="l00226"></a><span class="lineno">  226</span>&#160;    *((__global <span class="keywordtype">int</span> *)sum.<a class="code" href="struct_image.xhtml#acf52c23cbd7424606c10a606524e3e32">ptr</a>) = sum1D.s0;</div><div class="line"><a name="l00227"></a><span class="lineno">  227</span>&#160;}</div><div class="line"><a name="l00228"></a><span class="lineno">  228</span>&#160;</div><div class="line"><a name="l00270"></a><span class="lineno">  270</span>&#160;__kernel <span class="keywordtype">void</span> softmax_layer_max_shift_exp_sum_quantized_parallel(</div><div class="line"><a name="l00271"></a><span class="lineno">  271</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a6743f0a130e8311e6f5b1a23df102472">src</a>),</div><div class="line"><a name="l00272"></a><span class="lineno">  272</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(maxo),</div><div class="line"><a name="l00273"></a><span class="lineno">  273</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>),</div><div class="line"><a name="l00274"></a><span class="lineno">  274</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>),</div><div class="line"><a name="l00275"></a><span class="lineno">  275</span>&#160;    uint width)</div><div class="line"><a name="l00276"></a><span class="lineno">  276</span>&#160;{</div><div class="line"><a name="l00277"></a><span class="lineno">  277</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a6743f0a130e8311e6f5b1a23df102472">src</a>  = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(src);</div><div class="line"><a name="l00278"></a><span class="lineno">  278</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>  = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(dst);</div><div class="line"><a name="l00279"></a><span class="lineno">  279</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> maxo = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(maxo);</div><div class="line"><a name="l00280"></a><span class="lineno">  280</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>  = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(sum);</div><div class="line"><a name="l00281"></a><span class="lineno">  281</span>&#160;</div><div class="line"><a name="l00282"></a><span class="lineno">  282</span>&#160;    <span class="keyword">const</span> uint4 <a class="code" href="softmax__layer_8cl.xhtml#a4884a666a1e93fbf8c27bd7d2da3c8bb">idx4</a> = (uint4)(0, 1, 2, 3);</div><div class="line"><a name="l00283"></a><span class="lineno">  283</span>&#160;    <span class="keyword">const</span> uint  lid  = get_local_id(0);</div><div class="line"><a name="l00284"></a><span class="lineno">  284</span>&#160;</div><div class="line"><a name="l00285"></a><span class="lineno">  285</span>&#160;    <span class="comment">// Define one temporary vector per work-item.</span></div><div class="line"><a name="l00286"></a><span class="lineno">  286</span>&#160;    __local int4 tmp_local[<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a>];</div><div class="line"><a name="l00287"></a><span class="lineno">  287</span>&#160;    __local uchar max_local;</div><div class="line"><a name="l00288"></a><span class="lineno">  288</span>&#160;</div><div class="line"><a name="l00289"></a><span class="lineno">  289</span>&#160;    uchar4 uchar_min   = (uchar4)0;</div><div class="line"><a name="l00290"></a><span class="lineno">  290</span>&#160;    uchar4 max_val_vec = uchar_min;</div><div class="line"><a name="l00291"></a><span class="lineno">  291</span>&#160;</div><div class="line"><a name="l00292"></a><span class="lineno">  292</span>&#160;    <span class="comment">// Number of elements per work-item.</span></div><div class="line"><a name="l00293"></a><span class="lineno">  293</span>&#160;    <span class="keyword">const</span> uint row = width / <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a>;</div><div class="line"><a name="l00294"></a><span class="lineno">  294</span>&#160;    <span class="comment">// Number of iterations per work-item.</span></div><div class="line"><a name="l00295"></a><span class="lineno">  295</span>&#160;    <span class="keyword">const</span> uint width_ = row &gt;&gt; 2;</div><div class="line"><a name="l00296"></a><span class="lineno">  296</span>&#160;    <span class="comment">// Calculate max of row</span></div><div class="line"><a name="l00297"></a><span class="lineno">  297</span>&#160;    uint i = 0;</div><div class="line"><a name="l00298"></a><span class="lineno">  298</span>&#160;    <span class="keywordflow">for</span>(; i &lt; width_; i++)</div><div class="line"><a name="l00299"></a><span class="lineno">  299</span>&#160;    {</div><div class="line"><a name="l00300"></a><span class="lineno">  300</span>&#160;        uchar4 data_max = vload4(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4, 0));</div><div class="line"><a name="l00301"></a><span class="lineno">  301</span>&#160;        max_val_vec     = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(data_max, max_val_vec, uchar, 4);</div><div class="line"><a name="l00302"></a><span class="lineno">  302</span>&#160;    }</div><div class="line"><a name="l00303"></a><span class="lineno">  303</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_GRID_SIZE</span></div><div class="line"><a name="l00304"></a><span class="lineno">  304</span>&#160;    <span class="comment">// How many work-items needed to complete the computation.</span></div><div class="line"><a name="l00305"></a><span class="lineno">  305</span>&#160;    <span class="keywordtype">int</span> boundary_workitems = (width % (<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4)) / 4;</div><div class="line"><a name="l00306"></a><span class="lineno">  306</span>&#160;    <span class="keywordflow">if</span>(lid &lt; boundary_workitems)</div><div class="line"><a name="l00307"></a><span class="lineno">  307</span>&#160;    {</div><div class="line"><a name="l00308"></a><span class="lineno">  308</span>&#160;        uchar4 data_max = vload4(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4, 0));</div><div class="line"><a name="l00309"></a><span class="lineno">  309</span>&#160;        max_val_vec     = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(data_max, max_val_vec, uchar, 4);</div><div class="line"><a name="l00310"></a><span class="lineno">  310</span>&#160;    }</div><div class="line"><a name="l00311"></a><span class="lineno">  311</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_VECTOR_SIZE</span></div><div class="line"><a name="l00312"></a><span class="lineno">  312</span>&#160;    <span class="keywordflow">if</span>(boundary_workitems == 0)</div><div class="line"><a name="l00313"></a><span class="lineno">  313</span>&#160;    {</div><div class="line"><a name="l00314"></a><span class="lineno">  314</span>&#160;        boundary_workitems = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a>;</div><div class="line"><a name="l00315"></a><span class="lineno">  315</span>&#160;        i--;</div><div class="line"><a name="l00316"></a><span class="lineno">  316</span>&#160;    }</div><div class="line"><a name="l00317"></a><span class="lineno">  317</span>&#160;    <span class="keywordflow">if</span>(lid == (boundary_workitems - 1))</div><div class="line"><a name="l00318"></a><span class="lineno">  318</span>&#160;    {</div><div class="line"><a name="l00319"></a><span class="lineno">  319</span>&#160;        <span class="comment">// Handle non multiple of 4</span></div><div class="line"><a name="l00320"></a><span class="lineno">  320</span>&#160;        uchar4 data_max = vload4(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, (<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * i * 4) + 4, 0));</div><div class="line"><a name="l00321"></a><span class="lineno">  321</span>&#160;        uchar4 widx     = convert_uchar4(((uint4)(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * i * 4) + boundary_workitems * 4 + idx4) &lt; width);</div><div class="line"><a name="l00322"></a><span class="lineno">  322</span>&#160;        max_val_vec     = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec, select(uchar_min, data_max, widx), uchar, 4);</div><div class="line"><a name="l00323"></a><span class="lineno">  323</span>&#160;    }</div><div class="line"><a name="l00324"></a><span class="lineno">  324</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_VECTOR_SIZE */</span><span class="preprocessor"></span></div><div class="line"><a name="l00325"></a><span class="lineno">  325</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_GRID_SIZE */</span><span class="preprocessor"></span></div><div class="line"><a name="l00326"></a><span class="lineno">  326</span>&#160;    tmp_local[lid] = convert_int4(max_val_vec);</div><div class="line"><a name="l00327"></a><span class="lineno">  327</span>&#160;</div><div class="line"><a name="l00328"></a><span class="lineno">  328</span>&#160;    barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00329"></a><span class="lineno">  329</span>&#160;</div><div class="line"><a name="l00330"></a><span class="lineno">  330</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 256)</div><div class="line"><a name="l00331"></a><span class="lineno">  331</span>&#160;    {</div><div class="line"><a name="l00332"></a><span class="lineno">  332</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 128)</div><div class="line"><a name="l00333"></a><span class="lineno">  333</span>&#160;        {</div><div class="line"><a name="l00334"></a><span class="lineno">  334</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 128], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00335"></a><span class="lineno">  335</span>&#160;        }</div><div class="line"><a name="l00336"></a><span class="lineno">  336</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00337"></a><span class="lineno">  337</span>&#160;    }</div><div class="line"><a name="l00338"></a><span class="lineno">  338</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 128)</div><div class="line"><a name="l00339"></a><span class="lineno">  339</span>&#160;    {</div><div class="line"><a name="l00340"></a><span class="lineno">  340</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 64)</div><div class="line"><a name="l00341"></a><span class="lineno">  341</span>&#160;        {</div><div class="line"><a name="l00342"></a><span class="lineno">  342</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 64], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00343"></a><span class="lineno">  343</span>&#160;        }</div><div class="line"><a name="l00344"></a><span class="lineno">  344</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00345"></a><span class="lineno">  345</span>&#160;    }</div><div class="line"><a name="l00346"></a><span class="lineno">  346</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 64)</div><div class="line"><a name="l00347"></a><span class="lineno">  347</span>&#160;    {</div><div class="line"><a name="l00348"></a><span class="lineno">  348</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 32)</div><div class="line"><a name="l00349"></a><span class="lineno">  349</span>&#160;        {</div><div class="line"><a name="l00350"></a><span class="lineno">  350</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 32], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00351"></a><span class="lineno">  351</span>&#160;        }</div><div class="line"><a name="l00352"></a><span class="lineno">  352</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00353"></a><span class="lineno">  353</span>&#160;    }</div><div class="line"><a name="l00354"></a><span class="lineno">  354</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 32)</div><div class="line"><a name="l00355"></a><span class="lineno">  355</span>&#160;    {</div><div class="line"><a name="l00356"></a><span class="lineno">  356</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 16)</div><div class="line"><a name="l00357"></a><span class="lineno">  357</span>&#160;        {</div><div class="line"><a name="l00358"></a><span class="lineno">  358</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 16], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00359"></a><span class="lineno">  359</span>&#160;        }</div><div class="line"><a name="l00360"></a><span class="lineno">  360</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00361"></a><span class="lineno">  361</span>&#160;    }</div><div class="line"><a name="l00362"></a><span class="lineno">  362</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 16)</div><div class="line"><a name="l00363"></a><span class="lineno">  363</span>&#160;    {</div><div class="line"><a name="l00364"></a><span class="lineno">  364</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 8)</div><div class="line"><a name="l00365"></a><span class="lineno">  365</span>&#160;        {</div><div class="line"><a name="l00366"></a><span class="lineno">  366</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 8], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00367"></a><span class="lineno">  367</span>&#160;        }</div><div class="line"><a name="l00368"></a><span class="lineno">  368</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00369"></a><span class="lineno">  369</span>&#160;    }</div><div class="line"><a name="l00370"></a><span class="lineno">  370</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 8)</div><div class="line"><a name="l00371"></a><span class="lineno">  371</span>&#160;    {</div><div class="line"><a name="l00372"></a><span class="lineno">  372</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 4)</div><div class="line"><a name="l00373"></a><span class="lineno">  373</span>&#160;        {</div><div class="line"><a name="l00374"></a><span class="lineno">  374</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 4], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00375"></a><span class="lineno">  375</span>&#160;        }</div><div class="line"><a name="l00376"></a><span class="lineno">  376</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00377"></a><span class="lineno">  377</span>&#160;    }</div><div class="line"><a name="l00378"></a><span class="lineno">  378</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 4)</div><div class="line"><a name="l00379"></a><span class="lineno">  379</span>&#160;    {</div><div class="line"><a name="l00380"></a><span class="lineno">  380</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 2)</div><div class="line"><a name="l00381"></a><span class="lineno">  381</span>&#160;        {</div><div class="line"><a name="l00382"></a><span class="lineno">  382</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 2], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00383"></a><span class="lineno">  383</span>&#160;        }</div><div class="line"><a name="l00384"></a><span class="lineno">  384</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00385"></a><span class="lineno">  385</span>&#160;    }</div><div class="line"><a name="l00386"></a><span class="lineno">  386</span>&#160;    <span class="keywordflow">if</span>(lid == 0)</div><div class="line"><a name="l00387"></a><span class="lineno">  387</span>&#160;    {</div><div class="line"><a name="l00388"></a><span class="lineno">  388</span>&#160;        max_val_vec     = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(convert_uchar4(tmp_local[lid + 1]), convert_uchar4(tmp_local[lid]), uchar, 4);</div><div class="line"><a name="l00389"></a><span class="lineno">  389</span>&#160;        max_val_vec.s01 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec.s01, max_val_vec.s23, uchar, 2);</div><div class="line"><a name="l00390"></a><span class="lineno">  390</span>&#160;        max_val_vec.s0  = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec.s0, max_val_vec.s1, uchar, 1);</div><div class="line"><a name="l00391"></a><span class="lineno">  391</span>&#160;        max_local       = max_val_vec.s0;</div><div class="line"><a name="l00392"></a><span class="lineno">  392</span>&#160;    }</div><div class="line"><a name="l00393"></a><span class="lineno">  393</span>&#160;    barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00394"></a><span class="lineno">  394</span>&#160;</div><div class="line"><a name="l00395"></a><span class="lineno">  395</span>&#160;    <span class="comment">/* Second section */</span></div><div class="line"><a name="l00396"></a><span class="lineno">  396</span>&#160;</div><div class="line"><a name="l00397"></a><span class="lineno">  397</span>&#160;    <span class="comment">// Set sum vector</span></div><div class="line"><a name="l00398"></a><span class="lineno">  398</span>&#160;    int4 sum1D   = 0;</div><div class="line"><a name="l00399"></a><span class="lineno">  399</span>&#160;    <span class="keywordtype">int</span>  max_val = convert_int(max_local);</div><div class="line"><a name="l00400"></a><span class="lineno">  400</span>&#160;</div><div class="line"><a name="l00401"></a><span class="lineno">  401</span>&#160;    <span class="comment">// Shift values, exp and sum</span></div><div class="line"><a name="l00402"></a><span class="lineno">  402</span>&#160;    <span class="keywordflow">for</span>(i = 0; i &lt; width_; i++)</div><div class="line"><a name="l00403"></a><span class="lineno">  403</span>&#160;    {</div><div class="line"><a name="l00404"></a><span class="lineno">  404</span>&#160;        uchar4 data         = vload4(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4, 0));</div><div class="line"><a name="l00405"></a><span class="lineno">  405</span>&#160;        int4 data_fp        = convert_int4(data);</div><div class="line"><a name="l00406"></a><span class="lineno">  406</span>&#160;        int4 data_diff      = data_fp - max_val;</div><div class="line"><a name="l00407"></a><span class="lineno">  407</span>&#160;        int4 data_diff_mult = mult_by_quantized_multiplier_parallel(data_diff);</div><div class="line"><a name="l00408"></a><span class="lineno">  408</span>&#160;        data_fp             = <a class="code" href="helpers__asymm_8h.xhtml#a3a4f1b5d8f1cd67ac31bc62c9a6f4aa8">ASYMM_EXP_ON_NEGATIVE_VALUES</a>(data_diff_mult, SCALED_DIFF_INT_BITS, 4);</div><div class="line"><a name="l00409"></a><span class="lineno">  409</span>&#160;        data_fp             = <a class="code" href="helpers__asymm_8h.xhtml#a98585f1bb84dea90aecbf59785c46151">ASYMM_RESCALE</a>(data_fp, 0, EXP_ACCUMULATION_INT_BITS, 4);</div><div class="line"><a name="l00410"></a><span class="lineno">  410</span>&#160;        vstore4(data_diff, 0, (__global <span class="keywordtype">int</span> *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;dst, i * GRID_SIZE * 4, 0));</div><div class="line"><a name="l00411"></a><span class="lineno">  411</span>&#160;        sum1D = sum1D + select(0, data_fp, data_diff &gt;= (int4)(DIFF_MIN));</div><div class="line"><a name="l00412"></a><span class="lineno">  412</span>&#160;    }</div><div class="line"><a name="l00413"></a><span class="lineno">  413</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_GRID_SIZE</span></div><div class="line"><a name="l00414"></a><span class="lineno">  414</span>&#160;    boundary_workitems = (width % (<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4)) / 4;</div><div class="line"><a name="l00415"></a><span class="lineno">  415</span>&#160;    <span class="keywordflow">if</span>(lid &lt; boundary_workitems)</div><div class="line"><a name="l00416"></a><span class="lineno">  416</span>&#160;    {</div><div class="line"><a name="l00417"></a><span class="lineno">  417</span>&#160;        uchar4 data         = vload4(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4, 0));</div><div class="line"><a name="l00418"></a><span class="lineno">  418</span>&#160;        int4 data_fp        = convert_int4(data);</div><div class="line"><a name="l00419"></a><span class="lineno">  419</span>&#160;        int4 data_diff      = data_fp - max_val;</div><div class="line"><a name="l00420"></a><span class="lineno">  420</span>&#160;        int4 data_diff_mult = mult_by_quantized_multiplier_parallel(data_diff);</div><div class="line"><a name="l00421"></a><span class="lineno">  421</span>&#160;        data_fp             = <a class="code" href="helpers__asymm_8h.xhtml#a3a4f1b5d8f1cd67ac31bc62c9a6f4aa8">ASYMM_EXP_ON_NEGATIVE_VALUES</a>(data_diff_mult, SCALED_DIFF_INT_BITS, 4);</div><div class="line"><a name="l00422"></a><span class="lineno">  422</span>&#160;        data_fp             = <a class="code" href="helpers__asymm_8h.xhtml#a98585f1bb84dea90aecbf59785c46151">ASYMM_RESCALE</a>(data_fp, 0, EXP_ACCUMULATION_INT_BITS, 4);</div><div class="line"><a name="l00423"></a><span class="lineno">  423</span>&#160;        vstore4(data_diff, 0, (__global <span class="keywordtype">int</span> *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;dst, i * GRID_SIZE * 4, 0));</div><div class="line"><a name="l00424"></a><span class="lineno">  424</span>&#160;        sum1D = sum1D + select(0, data_fp, data_diff &gt;= (int4)(DIFF_MIN));</div><div class="line"><a name="l00425"></a><span class="lineno">  425</span>&#160;    }</div><div class="line"><a name="l00426"></a><span class="lineno">  426</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_VECTOR_SIZE</span></div><div class="line"><a name="l00427"></a><span class="lineno">  427</span>&#160;    <span class="keywordflow">if</span>(boundary_workitems == 0)</div><div class="line"><a name="l00428"></a><span class="lineno">  428</span>&#160;    {</div><div class="line"><a name="l00429"></a><span class="lineno">  429</span>&#160;        boundary_workitems = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a>;</div><div class="line"><a name="l00430"></a><span class="lineno">  430</span>&#160;        i--;</div><div class="line"><a name="l00431"></a><span class="lineno">  431</span>&#160;    }</div><div class="line"><a name="l00432"></a><span class="lineno">  432</span>&#160;    <span class="keywordflow">if</span>(lid == (boundary_workitems - 1))</div><div class="line"><a name="l00433"></a><span class="lineno">  433</span>&#160;    {</div><div class="line"><a name="l00434"></a><span class="lineno">  434</span>&#160;        <span class="comment">// Handle non multiple of vector size ((GRID_SIZE * i * 4) + 4, 0); move 4 float positions ahead, *4 is due to the stride</span></div><div class="line"><a name="l00435"></a><span class="lineno">  435</span>&#160;        uchar4 data         = vload4(0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4 + 4, 0));</div><div class="line"><a name="l00436"></a><span class="lineno">  436</span>&#160;        int4 data_fp        = convert_int4(data);</div><div class="line"><a name="l00437"></a><span class="lineno">  437</span>&#160;        int4 data_diff      = data_fp - max_val;</div><div class="line"><a name="l00438"></a><span class="lineno">  438</span>&#160;        int4 data_diff_mult = mult_by_quantized_multiplier_parallel(data_diff);</div><div class="line"><a name="l00439"></a><span class="lineno">  439</span>&#160;        data_fp             = <a class="code" href="helpers__asymm_8h.xhtml#a3a4f1b5d8f1cd67ac31bc62c9a6f4aa8">ASYMM_EXP_ON_NEGATIVE_VALUES</a>(data_diff_mult, SCALED_DIFF_INT_BITS, 4);</div><div class="line"><a name="l00440"></a><span class="lineno">  440</span>&#160;        data_fp             = <a class="code" href="helpers__asymm_8h.xhtml#a98585f1bb84dea90aecbf59785c46151">ASYMM_RESCALE</a>(data_fp, 0, EXP_ACCUMULATION_INT_BITS, 4);</div><div class="line"><a name="l00441"></a><span class="lineno">  441</span>&#160;        int4 widx           = convert_int4(((uint4)(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * i * 4) + boundary_workitems * 4 + idx4) &lt; width);</div><div class="line"><a name="l00442"></a><span class="lineno">  442</span>&#160;        data_fp             = select(0, data_fp, widx);</div><div class="line"><a name="l00443"></a><span class="lineno">  443</span>&#160;        vstore4(data_diff, 0, (__global <span class="keywordtype">int</span> *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;dst, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4 + 4, 0));</div><div class="line"><a name="l00444"></a><span class="lineno">  444</span>&#160;        sum1D = sum1D + select(0, data_fp, data_diff &gt;= (int4)(DIFF_MIN));</div><div class="line"><a name="l00445"></a><span class="lineno">  445</span>&#160;    }</div><div class="line"><a name="l00446"></a><span class="lineno">  446</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_VECTOR_SIZE */</span><span class="preprocessor"></span></div><div class="line"><a name="l00447"></a><span class="lineno">  447</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_GRID_SIZE */</span><span class="preprocessor"></span></div><div class="line"><a name="l00448"></a><span class="lineno">  448</span>&#160;    tmp_local[lid] = sum1D;</div><div class="line"><a name="l00449"></a><span class="lineno">  449</span>&#160;</div><div class="line"><a name="l00450"></a><span class="lineno">  450</span>&#160;    barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00451"></a><span class="lineno">  451</span>&#160;</div><div class="line"><a name="l00452"></a><span class="lineno">  452</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 256)</div><div class="line"><a name="l00453"></a><span class="lineno">  453</span>&#160;    {</div><div class="line"><a name="l00454"></a><span class="lineno">  454</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 128)</div><div class="line"><a name="l00455"></a><span class="lineno">  455</span>&#160;        {</div><div class="line"><a name="l00456"></a><span class="lineno">  456</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 128], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00457"></a><span class="lineno">  457</span>&#160;        }</div><div class="line"><a name="l00458"></a><span class="lineno">  458</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00459"></a><span class="lineno">  459</span>&#160;    }</div><div class="line"><a name="l00460"></a><span class="lineno">  460</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 128)</div><div class="line"><a name="l00461"></a><span class="lineno">  461</span>&#160;    {</div><div class="line"><a name="l00462"></a><span class="lineno">  462</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 64)</div><div class="line"><a name="l00463"></a><span class="lineno">  463</span>&#160;        {</div><div class="line"><a name="l00464"></a><span class="lineno">  464</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 64], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00465"></a><span class="lineno">  465</span>&#160;        }</div><div class="line"><a name="l00466"></a><span class="lineno">  466</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00467"></a><span class="lineno">  467</span>&#160;    }</div><div class="line"><a name="l00468"></a><span class="lineno">  468</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 64)</div><div class="line"><a name="l00469"></a><span class="lineno">  469</span>&#160;    {</div><div class="line"><a name="l00470"></a><span class="lineno">  470</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 32)</div><div class="line"><a name="l00471"></a><span class="lineno">  471</span>&#160;        {</div><div class="line"><a name="l00472"></a><span class="lineno">  472</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 32], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00473"></a><span class="lineno">  473</span>&#160;        }</div><div class="line"><a name="l00474"></a><span class="lineno">  474</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00475"></a><span class="lineno">  475</span>&#160;    }</div><div class="line"><a name="l00476"></a><span class="lineno">  476</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 32)</div><div class="line"><a name="l00477"></a><span class="lineno">  477</span>&#160;    {</div><div class="line"><a name="l00478"></a><span class="lineno">  478</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 16)</div><div class="line"><a name="l00479"></a><span class="lineno">  479</span>&#160;        {</div><div class="line"><a name="l00480"></a><span class="lineno">  480</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 16], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00481"></a><span class="lineno">  481</span>&#160;        }</div><div class="line"><a name="l00482"></a><span class="lineno">  482</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00483"></a><span class="lineno">  483</span>&#160;    }</div><div class="line"><a name="l00484"></a><span class="lineno">  484</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 16)</div><div class="line"><a name="l00485"></a><span class="lineno">  485</span>&#160;    {</div><div class="line"><a name="l00486"></a><span class="lineno">  486</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 8)</div><div class="line"><a name="l00487"></a><span class="lineno">  487</span>&#160;        {</div><div class="line"><a name="l00488"></a><span class="lineno">  488</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 8], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00489"></a><span class="lineno">  489</span>&#160;        }</div><div class="line"><a name="l00490"></a><span class="lineno">  490</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00491"></a><span class="lineno">  491</span>&#160;    }</div><div class="line"><a name="l00492"></a><span class="lineno">  492</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 8)</div><div class="line"><a name="l00493"></a><span class="lineno">  493</span>&#160;    {</div><div class="line"><a name="l00494"></a><span class="lineno">  494</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 4)</div><div class="line"><a name="l00495"></a><span class="lineno">  495</span>&#160;        {</div><div class="line"><a name="l00496"></a><span class="lineno">  496</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 4], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00497"></a><span class="lineno">  497</span>&#160;        }</div><div class="line"><a name="l00498"></a><span class="lineno">  498</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00499"></a><span class="lineno">  499</span>&#160;    }</div><div class="line"><a name="l00500"></a><span class="lineno">  500</span>&#160;    <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 4)</div><div class="line"><a name="l00501"></a><span class="lineno">  501</span>&#160;    {</div><div class="line"><a name="l00502"></a><span class="lineno">  502</span>&#160;        <span class="keywordflow">if</span>(lid &lt; 2)</div><div class="line"><a name="l00503"></a><span class="lineno">  503</span>&#160;        {</div><div class="line"><a name="l00504"></a><span class="lineno">  504</span>&#160;            tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 2], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00505"></a><span class="lineno">  505</span>&#160;        }</div><div class="line"><a name="l00506"></a><span class="lineno">  506</span>&#160;        barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00507"></a><span class="lineno">  507</span>&#160;    }</div><div class="line"><a name="l00508"></a><span class="lineno">  508</span>&#160;    <span class="keywordflow">if</span>(lid == 0)</div><div class="line"><a name="l00509"></a><span class="lineno">  509</span>&#160;    {</div><div class="line"><a name="l00510"></a><span class="lineno">  510</span>&#160;        sum1D = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 1], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00511"></a><span class="lineno">  511</span>&#160;        <span class="comment">// Perform max reduction</span></div><div class="line"><a name="l00512"></a><span class="lineno">  512</span>&#160;        sum1D.s01                  = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s01, sum1D.s23, <span class="keywordtype">int</span>, 2);</div><div class="line"><a name="l00513"></a><span class="lineno">  513</span>&#160;        sum1D.s0                   = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s0, sum1D.s1, <span class="keywordtype">int</span>, 1);</div><div class="line"><a name="l00514"></a><span class="lineno">  514</span>&#160;        *((__global <span class="keywordtype">int</span> *)sum.<a class="code" href="struct_image.xhtml#acf52c23cbd7424606c10a606524e3e32">ptr</a>) = sum1D.s0;</div><div class="line"><a name="l00515"></a><span class="lineno">  515</span>&#160;    }</div><div class="line"><a name="l00516"></a><span class="lineno">  516</span>&#160;}</div><div class="line"><a name="l00517"></a><span class="lineno">  517</span>&#160;</div><div class="line"><a name="l00549"></a><span class="lineno">  549</span>&#160;__kernel <span class="keywordtype">void</span> softmax_layer_norm_quantized(</div><div class="line"><a name="l00550"></a><span class="lineno">  550</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a6743f0a130e8311e6f5b1a23df102472">src</a>),</div><div class="line"><a name="l00551"></a><span class="lineno">  551</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>),</div><div class="line"><a name="l00552"></a><span class="lineno">  552</span>&#160;    <a class="code" href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>))</div><div class="line"><a name="l00553"></a><span class="lineno">  553</span>&#160;{</div><div class="line"><a name="l00554"></a><span class="lineno">  554</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a6743f0a130e8311e6f5b1a23df102472">src</a> = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(src);</div><div class="line"><a name="l00555"></a><span class="lineno">  555</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a> = <a class="code" href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(dst);</div><div class="line"><a name="l00556"></a><span class="lineno">  556</span>&#160;    <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a> = <a class="code" href="helpers_8h.xhtml#a071aa45af973feac43b14f62e54a6fce">CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP</a>(sum);</div><div class="line"><a name="l00557"></a><span class="lineno">  557</span>&#160;</div><div class="line"><a name="l00558"></a><span class="lineno">  558</span>&#160;    <span class="comment">// Load max value of 1D logits vector (row)</span></div><div class="line"><a name="l00559"></a><span class="lineno">  559</span>&#160;    <span class="keywordtype">int</span> sum_val = *((__global <span class="keywordtype">int</span> *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;sum, 0, get_global_id(1)));</div><div class="line"><a name="l00560"></a><span class="lineno">  560</span>&#160;</div><div class="line"><a name="l00561"></a><span class="lineno">  561</span>&#160;    <span class="comment">// It will be better to calculate this in prev layer and pass here as parameter</span></div><div class="line"><a name="l00562"></a><span class="lineno">  562</span>&#160;    uint  sum_val_u               = convert_uint(sum_val);</div><div class="line"><a name="l00563"></a><span class="lineno">  563</span>&#160;    <span class="keywordtype">int</span>   headroom_plus_one       = <a class="code" href="namespacearm__compute_1_1test_1_1fixed__point__arithmetic_1_1detail.xhtml#aa43c67a536c661ec28398d06bc8ba5af">clz</a>(sum_val_u);</div><div class="line"><a name="l00564"></a><span class="lineno">  564</span>&#160;    <span class="keywordtype">int</span>   num_bits_over_unit      = EXP_ACCUMULATION_INT_BITS - headroom_plus_one;</div><div class="line"><a name="l00565"></a><span class="lineno">  565</span>&#160;    <span class="keywordtype">int</span>   shifted_sum_minus_one_1 = convert_int((sum_val_u &lt;&lt; headroom_plus_one) - (1u &lt;&lt; 31));</div><div class="line"><a name="l00566"></a><span class="lineno">  566</span>&#160;    int16 shifted_sum_minus_one   = shifted_sum_minus_one_1;</div><div class="line"><a name="l00567"></a><span class="lineno">  567</span>&#160;    int16 shifted_scale           = <a class="code" href="helpers__asymm_8h.xhtml#ae77f34e1316d52c1ee84c35be9efb0d8">ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1</a>(shifted_sum_minus_one, 16);</div><div class="line"><a name="l00568"></a><span class="lineno">  568</span>&#160;</div><div class="line"><a name="l00569"></a><span class="lineno">  569</span>&#160;    <span class="comment">// It was already calculated in prev layer, should be stored into tmp output and reused</span></div><div class="line"><a name="l00570"></a><span class="lineno">  570</span>&#160;    int16 data_diff      = vload16(0, (__global <span class="keywordtype">int</span> *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;src, 0, 0));</div><div class="line"><a name="l00571"></a><span class="lineno">  571</span>&#160;    int16 data_diff_mult = data_diff;</div><div class="line"><a name="l00572"></a><span class="lineno">  572</span>&#160;<span class="preprocessor">#if defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT)</span></div><div class="line"><a name="l00573"></a><span class="lineno">  573</span>&#160;    <span class="keywordflow">if</span>(INPUT_BETA_MULTIPLIER &gt; 1)</div><div class="line"><a name="l00574"></a><span class="lineno">  574</span>&#160;    {</div><div class="line"><a name="l00575"></a><span class="lineno">  575</span>&#160;        data_diff_mult = <a class="code" href="helpers__asymm_8h.xhtml#a5483aefd5e07244661178bfd3f434448">ASYMM_MULT</a>(data_diff * (1 &lt;&lt; INPUT_BETA_LEFT_SHIFT), INPUT_BETA_MULTIPLIER, 16);</div><div class="line"><a name="l00576"></a><span class="lineno">  576</span>&#160;    }</div><div class="line"><a name="l00577"></a><span class="lineno">  577</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT) */</span><span class="preprocessor"></span></div><div class="line"><a name="l00578"></a><span class="lineno">  578</span>&#160;    int16 data = <a class="code" href="helpers__asymm_8h.xhtml#a3a4f1b5d8f1cd67ac31bc62c9a6f4aa8">ASYMM_EXP_ON_NEGATIVE_VALUES</a>(data_diff_mult, SCALED_DIFF_INT_BITS, 16);</div><div class="line"><a name="l00579"></a><span class="lineno">  579</span>&#160;</div><div class="line"><a name="l00580"></a><span class="lineno">  580</span>&#160;    data = <a class="code" href="helpers__asymm_8h.xhtml#a5483aefd5e07244661178bfd3f434448">ASYMM_MULT</a>(shifted_scale, data, 16);</div><div class="line"><a name="l00581"></a><span class="lineno">  581</span>&#160;    data = <a class="code" href="helpers__asymm_8h.xhtml#aa43fc359dea64362f3016384f4269845">ASYMM_ROUNDING_DIVIDE_BY_POW2</a>(data, num_bits_over_unit + 31 - 8, 16);</div><div class="line"><a name="l00582"></a><span class="lineno">  582</span>&#160;    data = select(0, data, data_diff &gt;= (int16)(DIFF_MIN));</div><div class="line"><a name="l00583"></a><span class="lineno">  583</span>&#160;    vstore16(convert_uchar16_sat(data), 0, (__global uchar *)<a class="code" href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;dst, 0, 0));</div><div class="line"><a name="l00584"></a><span class="lineno">  584</span>&#160;}</div><div class="line"><a name="l00585"></a><span class="lineno">  585</span>&#160;</div><div class="line"><a name="l00586"></a><span class="lineno">  586</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* defined(DIFF_MIN) */</span><span class="preprocessor"></span></div><div class="ttc" id="helpers_8h_xhtml_a071aa45af973feac43b14f62e54a6fce"><div class="ttname"><a href="helpers_8h.xhtml#a071aa45af973feac43b14f62e54a6fce">CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP</a></div><div class="ttdeci">#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name)</div><div class="ttdef"><b>Definition:</b> <a href="helpers_8h_source.xhtml#l00111">helpers.h:111</a></div></div>
+<div class="ttc" id="fixed__point_8h_xhtml_aa8d95ba04fc73845abc6045952cae5be"><div class="ttname"><a href="fixed__point_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a></div><div class="ttdeci">#define CONVERT(x, type)</div><div class="ttdef"><b>Definition:</b> <a href="fixed__point_8h_source.xhtml#l00098">fixed_point.h:98</a></div></div>
+<div class="ttc" id="helpers_8h_xhtml_a287e2fc366c312b468382c95bb90f91f"><div class="ttname"><a href="helpers_8h.xhtml#a287e2fc366c312b468382c95bb90f91f">VLOAD</a></div><div class="ttdeci">#define VLOAD(size)</div><div class="ttdef"><b>Definition:</b> <a href="helpers_8h_source.xhtml#l00040">helpers.h:40</a></div></div>
+<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a16110bd2b92003141dbaf8a44498ff82"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a16110bd2b92003141dbaf8a44498ff82">VEC_UINT</a></div><div class="ttdeci">#define VEC_UINT</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00063">softmax_layer_quantized.cl:63</a></div></div>
+<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a525a42d38133b1051b8924b456add4a1"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a525a42d38133b1051b8924b456add4a1">asymm_mult</a></div><div class="ttdeci">#define asymm_mult(a, b)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00056">softmax_layer_quantized.cl:56</a></div></div>
+<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_aa1dd94b8d98f1c6d790bdf0fc5de29e9"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#aa1dd94b8d98f1c6d790bdf0fc5de29e9">idx__</a></div><div class="ttdeci">__constant uint16 idx__</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00055">softmax_layer_quantized.cl:55</a></div></div>
+<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a7c78836761fa3b5b124efea237dac70f"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a></div><div class="ttdeci">#define VECTOR_SIZE</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00053">softmax_layer_quantized.cl:53</a></div></div>
+<div class="ttc" id="helpers__asymm_8h_xhtml_a5483aefd5e07244661178bfd3f434448"><div class="ttname"><a href="helpers__asymm_8h.xhtml#a5483aefd5e07244661178bfd3f434448">ASYMM_MULT</a></div><div class="ttdeci">#define ASYMM_MULT(a, b, size)</div><div class="ttdef"><b>Definition:</b> <a href="helpers__asymm_8h_source.xhtml#l00295">helpers_asymm.h:295</a></div></div>
+<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_abaa48ad818c44e415fd3f9dd0f27bf01"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a></div><div class="ttdeci">#define MAX_OP(x, y, type, size)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00026">softmax_layer_quantized.cl:26</a></div></div>
 <div class="ttc" id="reduction__operation_8cl_xhtml_ab0df00f5333da51860deb93deb44a782"><div class="ttname"><a href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a></div><div class="ttdeci">DATA_TYPE sum(__global const DATA_TYPE *input)</div><div class="ttdoc">Calculate sum of a vector. </div><div class="ttdef"><b>Definition:</b> <a href="reduction__operation_8cl_source.xhtml#l00052">reduction_operation.cl:52</a></div></div>
-<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a44206a4e5783c7aabacec88aad878c88"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a></div><div class="ttdeci">#define ADD_OP(x, y, type, size)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00028">softmax_layer_quantized.cl:28</a></div></div>
+<div class="ttc" id="helpers__asymm_8h_xhtml_aa43fc359dea64362f3016384f4269845"><div class="ttname"><a href="helpers__asymm_8h.xhtml#aa43fc359dea64362f3016384f4269845">ASYMM_ROUNDING_DIVIDE_BY_POW2</a></div><div class="ttdeci">#define ASYMM_ROUNDING_DIVIDE_BY_POW2(x, exponent, size)</div><div class="ttdef"><b>Definition:</b> <a href="helpers__asymm_8h_source.xhtml#l00294">helpers_asymm.h:294</a></div></div>
+<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a44206a4e5783c7aabacec88aad878c88"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a></div><div class="ttdeci">#define ADD_OP(x, y, type, size)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00027">softmax_layer_quantized.cl:27</a></div></div>
 <div class="ttc" id="helpers_8h_xhtml_a009469e4d9b8fce3b6d5e97d2077827d"><div class="ttname"><a href="helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a></div><div class="ttdeci">__global uchar * offset(const Image *img, int x, int y)</div><div class="ttdoc">Get the pointer position of a Image. </div><div class="ttdef"><b>Definition:</b> <a href="helpers_8h_source.xhtml#l00301">helpers.h:301</a></div></div>
-<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a79bd9092e1a23c550d4dc1aeb9b746ad"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a79bd9092e1a23c550d4dc1aeb9b746ad">softmax_layer_max_quantized</a></div><div class="ttdeci">__kernel void softmax_layer_max_quantized(__global uchar *src_ptr, uint src_stride_x, uint src_step_x, uint src_stride_y, uint src_step_y, uint src_stride_z, uint src_step_z, uint src_offset_first_element_in_bytes, __global uchar *dst_ptr, uint dst_stride_x, uint dst_step_x, uint dst_stride_y, uint dst_step_y, uint dst_stride_z, uint dst_step_z, uint dst_offset_first_element_in_bytes, uint width)</div><div class="ttdoc">Identifies the maximum value across the 1st dimension. </div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00055">softmax_layer_quantized.cl:55</a></div></div>
-<div class="ttc" id="asymm__helper_8h_xhtml_a4496e3d044dda78318d27aa7f9c0e810"><div class="ttname"><a href="asymm__helper_8h.xhtml#a4496e3d044dda78318d27aa7f9c0e810">asymm_rescale</a></div><div class="ttdeci">int16 asymm_rescale(int16 value, int src_integer_bits, int dst_integer_bits)</div><div class="ttdoc">Considering the integer value as fixed-point, change the number of integer bits and update value acco...</div><div class="ttdef"><b>Definition:</b> <a href="asymm__helper_8h_source.xhtml#l00269">asymm_helper.h:269</a></div></div>
-<div class="ttc" id="fixed__point_8h_xhtml_a26a8ca855cd14b1867173d301baf6c4f"><div class="ttname"><a href="fixed__point_8h.xhtml#a26a8ca855cd14b1867173d301baf6c4f">qs16</a></div><div class="ttdeci">short qs16</div><div class="ttdef"><b>Definition:</b> <a href="fixed__point_8h_source.xhtml#l00037">fixed_point.h:37</a></div></div>
-<div class="ttc" id="asymm__helper_8h_xhtml_ab98c50e16a18dd439b2b7ecdd1ecd81c"><div class="ttname"><a href="asymm__helper_8h.xhtml#ab98c50e16a18dd439b2b7ecdd1ecd81c">asymm_mult</a></div><div class="ttdeci">int16 asymm_mult(int16 a, int16 b)</div><div class="ttdoc">Fixed-point multiplication. </div><div class="ttdef"><b>Definition:</b> <a href="asymm__helper_8h_source.xhtml#l00167">asymm_helper.h:167</a></div></div>
-<div class="ttc" id="helpers_8h_xhtml"><div class="ttname"><a href="helpers_8h.xhtml">helpers.h</a></div></div>
-<div class="ttc" id="asymm__helper_8h_xhtml_a766e0a81889256cfa161f7c08951c0b8"><div class="ttname"><a href="asymm__helper_8h.xhtml#a766e0a81889256cfa161f7c08951c0b8">asymm_rounding_divide_by_pow2</a></div><div class="ttdeci">int16 asymm_rounding_divide_by_pow2(int16 x, int exponent)</div><div class="ttdoc">Correctly rounded to nearest division by a power of two. </div><div class="ttdef"><b>Definition:</b> <a href="asymm__helper_8h_source.xhtml#l00082">asymm_helper.h:82</a></div></div>
-<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_adbf67dcee294e673cf796f1ed8aeb6a4"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">arm_compute::test::validation::dst</a></div><div class="ttdeci">Tensor dst</div><div class="ttdef"><b>Definition:</b> <a href="validation_2_n_e_o_n_2_g_e_m_m_8cpp_source.xhtml#l00118">GEMM.cpp:118</a></div></div>
+<div class="ttc" id="helpers_8h_xhtml_acb282042d1edeeaa3cc979a206f78b54"><div class="ttname"><a href="helpers_8h.xhtml#acb282042d1edeeaa3cc979a206f78b54">VSTORE</a></div><div class="ttdeci">#define VSTORE(size)</div><div class="ttdef"><b>Definition:</b> <a href="helpers_8h_source.xhtml#l00043">helpers.h:43</a></div></div>
+<div class="ttc" id="helpers__asymm_8h_xhtml_a98585f1bb84dea90aecbf59785c46151"><div class="ttname"><a href="helpers__asymm_8h.xhtml#a98585f1bb84dea90aecbf59785c46151">ASYMM_RESCALE</a></div><div class="ttdeci">#define ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, size)</div><div class="ttdef"><b>Definition:</b> <a href="helpers__asymm_8h_source.xhtml#l00307">helpers_asymm.h:307</a></div></div>
+<div class="ttc" id="softmax__layer_8cl_xhtml_a4884a666a1e93fbf8c27bd7d2da3c8bb"><div class="ttname"><a href="softmax__layer_8cl.xhtml#a4884a666a1e93fbf8c27bd7d2da3c8bb">idx4</a></div><div class="ttdeci">__constant uint4 idx4</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer_8cl_source.xhtml#l00088">softmax_layer.cl:88</a></div></div>
+<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_adbf67dcee294e673cf796f1ed8aeb6a4"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">arm_compute::test::validation::dst</a></div><div class="ttdeci">CLTensor dst</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_convolution_8cpp_source.xhtml#l00137">Convolution.cpp:137</a></div></div>
+<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_ad57ea340cdcfeb2e1375b70c3ae59bae"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#ad57ea340cdcfeb2e1375b70c3ae59bae">asymm_rescale</a></div><div class="ttdeci">#define asymm_rescale(value, src_integer_bits, dst_integer_bits)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00058">softmax_layer_quantized.cl:58</a></div></div>
+<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_aee190caf3b3571e939ac129e12c368cd"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a></div><div class="ttdeci">#define VEC_INT</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00064">softmax_layer_quantized.cl:64</a></div></div>
 <div class="ttc" id="struct_image_xhtml"><div class="ttname"><a href="struct_image.xhtml">Image</a></div><div class="ttdoc">Structure to hold Image information. </div><div class="ttdef"><b>Definition:</b> <a href="helpers_8h_source.xhtml#l00140">helpers.h:140</a></div></div>
 <div class="ttc" id="helpers_8h_xhtml_a6b83038822d1ae7ab619b684ed3b7fc0"><div class="ttname"><a href="helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a></div><div class="ttdeci">#define TENSOR3D_DECLARATION(name)</div><div class="ttdef"><b>Definition:</b> <a href="helpers_8h_source.xhtml#l00074">helpers.h:74</a></div></div>
+<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a372393c380805985b813dbb16d589a64"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">LOG_VECTOR_SIZE</a></div><div class="ttdeci">#define LOG_VECTOR_SIZE</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00054">softmax_layer_quantized.cl:54</a></div></div>
 <div class="ttc" id="struct_image_xhtml_acf52c23cbd7424606c10a606524e3e32"><div class="ttname"><a href="struct_image.xhtml#acf52c23cbd7424606c10a606524e3e32">Image::ptr</a></div><div class="ttdeci">__global uchar * ptr</div><div class="ttdoc">Pointer to the starting postion of the buffer. </div><div class="ttdef"><b>Definition:</b> <a href="helpers_8h_source.xhtml#l00142">helpers.h:142</a></div></div>
+<div class="ttc" id="helpers__asymm_8h_xhtml"><div class="ttname"><a href="helpers__asymm_8h.xhtml">helpers_asymm.h</a></div></div>
+<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_af5987b09a234231612b2b1eded343025"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">VEC_UCHAR</a></div><div class="ttdeci">#define VEC_UCHAR</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00062">softmax_layer_quantized.cl:62</a></div></div>
+<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a08246606c233e7785a497c09672f366f"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a></div><div class="ttdeci">#define GRID_SIZE</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00031">softmax_layer_quantized.cl:31</a></div></div>
+<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a54aedfa17c5ac2567107d5f488b0f4af"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a54aedfa17c5ac2567107d5f488b0f4af">asymm_exp_on_negative_values</a></div><div class="ttdeci">#define asymm_exp_on_negative_values(a, k_integer_bits)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00057">softmax_layer_quantized.cl:57</a></div></div>
 <div class="ttc" id="helpers_8h_xhtml_a541f8db866a0fa93ee67d58ea31a7d0c"><div class="ttname"><a href="helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a></div><div class="ttdeci">#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name)</div><div class="ttdef"><b>Definition:</b> <a href="helpers_8h_source.xhtml#l00114">helpers.h:114</a></div></div>
 <div class="ttc" id="namespacearm__compute_1_1test_1_1fixed__point__arithmetic_1_1detail_xhtml_aa43c67a536c661ec28398d06bc8ba5af"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1fixed__point__arithmetic_1_1detail.xhtml#aa43c67a536c661ec28398d06bc8ba5af">arm_compute::test::fixed_point_arithmetic::detail::clz</a></div><div class="ttdeci">constexpr int clz(T value)</div><div class="ttdoc">Count the number of leading zero bits in the given value. </div><div class="ttdef"><b>Definition:</b> <a href="tests_2validation_2_fixed_point_8h_source.xhtml#l00276">FixedPoint.h:276</a></div></div>
-<div class="ttc" id="namespacearm__compute_1_1test_1_1fixed__point__arithmetic_1_1detail_xhtml_ad91bb73431b4de1f4946ed949d444849"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1fixed__point__arithmetic_1_1detail.xhtml#ad91bb73431b4de1f4946ed949d444849">arm_compute::test::fixed_point_arithmetic::detail::max</a></div><div class="ttdeci">fixed_point&lt; T &gt; max(fixed_point&lt; T &gt; x, fixed_point&lt; T &gt; y)</div><div class="ttdef"><b>Definition:</b> <a href="tests_2validation_2_fixed_point_8h_source.xhtml#l00889">FixedPoint.h:889</a></div></div>
-<div class="ttc" id="asymm__helper_8h_xhtml"><div class="ttname"><a href="asymm__helper_8h.xhtml">asymm_helper.h</a></div></div>
-<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a0712735973f172ac9efc7d48a31e47ad"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a0712735973f172ac9efc7d48a31e47ad">idx16</a></div><div class="ttdeci">__constant uint16 idx16</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00031">softmax_layer_quantized.cl:31</a></div></div>
+<div class="ttc" id="helpers__asymm_8h_xhtml_ae77f34e1316d52c1ee84c35be9efb0d8"><div class="ttname"><a href="helpers__asymm_8h.xhtml#ae77f34e1316d52c1ee84c35be9efb0d8">ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1</a></div><div class="ttdeci">#define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1(a, size)</div><div class="ttdef"><b>Definition:</b> <a href="helpers__asymm_8h_source.xhtml#l00304">helpers_asymm.h:304</a></div></div>
+<div class="ttc" id="helpers__asymm_8h_xhtml_a3a4f1b5d8f1cd67ac31bc62c9a6f4aa8"><div class="ttname"><a href="helpers__asymm_8h.xhtml#a3a4f1b5d8f1cd67ac31bc62c9a6f4aa8">ASYMM_EXP_ON_NEGATIVE_VALUES</a></div><div class="ttdeci">#define ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, size)</div><div class="ttdef"><b>Definition:</b> <a href="helpers__asymm_8h_source.xhtml#l00303">helpers_asymm.h:303</a></div></div>
+<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a6743f0a130e8311e6f5b1a23df102472"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a6743f0a130e8311e6f5b1a23df102472">arm_compute::test::validation::src</a></div><div class="ttdeci">convolution configure &amp; src</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_convolution_8cpp_source.xhtml#l00147">Convolution.cpp:147</a></div></div>
 </div><!-- fragment --></div><!-- contents -->
 </div><!-- doc-content -->
 <!-- start footer part -->
 <div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
   <ul>
     <li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.xhtml">src</a></li><li class="navelem"><a class="el" href="dir_aebb8dcc11953d78e620bbef0b9e2183.xhtml">core</a></li><li class="navelem"><a class="el" href="dir_8c278f79c760e5c5fbd911f9870614c1.xhtml">CL</a></li><li class="navelem"><a class="el" href="dir_25885286e9dad4fa105b7b25a8031bbf.xhtml">cl_kernels</a></li><li class="navelem"><a class="el" href="softmax__layer__quantized_8cl.xhtml">softmax_layer_quantized.cl</a></li>
-    <li class="footer">Generated on Thu Dec 14 2017 23:48:32 for Compute Library by
+    <li class="footer">Generated on Thu Feb 22 2018 15:45:19 for Compute Library by
     <a href="http://www.doxygen.org/index.html">
     <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.11 </li>
   </ul>