1 /*********************************************************************/
2 /* Copyright 2009, 2010 The University of Texas at Austin. */
3 /* All rights reserved. */
5 /* Redistribution and use in source and binary forms, with or */
6 /* without modification, are permitted provided that the following */
7 /* conditions are met: */
9 /* 1. Redistributions of source code must retain the above */
10 /* copyright notice, this list of conditions and the following */
13 /* 2. Redistributions in binary form must reproduce the above */
14 /* copyright notice, this list of conditions and the following */
15 /* disclaimer in the documentation and/or other materials */
16 /* provided with the distribution. */
18 /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
19 /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
20 /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
21 /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
22 /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
23 /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
24 /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
25 /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
26 /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
27 /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
28 /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
29 /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
30 /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
31 /* POSSIBILITY OF SUCH DAMAGE. */
33 /* The views and conclusions contained in the software and */
34 /* documentation are those of the authors and should not be */
35 /* interpreted as representing official policies, either expressed */
36 /* or implied, of The University of Texas at Austin. */
37 /*********************************************************************/
50 #define MB __asm__ __volatile__ ("mfence");
51 #define WMB __asm__ __volatile__ ("sfence");
57 static void __inline blas_lock(volatile BLASULONG *address){
62 while (*address) {YIELDING;};
66 : "=r"(ret), "=m"(*address)
67 : "0"(1), "m"(*address)
73 static __inline BLASULONG rpcc(void){
76 __asm__ __volatile__ ("rdtsc" : "=a" (a), "=d" (d));
78 return ((BLASULONG)a + ((BLASULONG)d << 32));
83 static __inline BLASULONG getstackaddr(void){
86 __asm__ __volatile__ ("movq %%rsp, %0"
87 : "=r"(addr) : : "memory");
92 static __inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx){
94 __asm__ __volatile__("cpuid"
104 static inline int WhereAmI(void){
105 int eax, ebx, ecx, edx;
108 cpuid(1, &eax, &ebx, &ecx, &edx);
109 apicid = BITMASK(ebx, 24, 0xff);
114 #ifdef CORE_BARCELONA
115 #define IFLUSH gotoblas_iflush()
116 #define IFLUSH_HALF gotoblas_iflush_half()
119 #ifdef ENABLE_SSE_EXCEPTION
121 #define IDEBUG_START \
123 unsigned int fp_sse_mode, new_fp_mode; \
124 __asm__ __volatile__ ("stmxcsr %0" : "=m" (fp_sse_mode) : ); \
125 new_fp_mode = fp_sse_mode & ~0xd00; \
126 __asm__ __volatile__ ("ldmxcsr %0" : : "m" (new_fp_mode) );
129 __asm__ __volatile__ ("ldmxcsr %0" : : "m" (fp_sse_mode) ); \
135 #define GET_IMAGE(res) __asm__ __volatile__("fstpt %0" : "=m"(res) : : "memory")
136 #elif defined(DOUBLE)
137 #define GET_IMAGE(res) __asm__ __volatile__("movsd %%xmm1, %0" : "=m"(res) : : "memory")
139 #define GET_IMAGE(res) __asm__ __volatile__("movss %%xmm1, %0" : "=m"(res) : : "memory")
142 #define GET_IMAGE_CANCEL
146 static __inline blasint blas_quickdivide(blasint x, blasint y){
150 extern unsigned int blas_quick_divide_table[];
152 static __inline int blas_quickdivide(unsigned int x, unsigned int y){
156 if (y <= 1) return x;
158 y = blas_quick_divide_table[y];
160 __asm__ __volatile__ ("mull %0" :"=d" (result) :"a"(x), "0" (y));
170 #define PAGESIZE ( 4 << 10)
172 #define HUGE_PAGESIZE ( 2 << 20)
174 #define BUFFER_SIZE (32 << 20)
178 #ifdef F_INTERFACE_G77
179 #define RETURN_BY_STACK
183 #ifdef F_INTERFACE_G95
184 #define RETURN_BY_PACKED
187 #ifdef F_INTERFACE_GFORT
190 #define RETURN_BY_REGS
192 #define RETURN_BY_STACK
195 #define RETURN_BY_PACKED
199 #ifdef F_INTERFACE_INTEL
200 #define RETURN_BY_STACK
203 #ifdef F_INTERFACE_FUJITSU
204 #define RETURN_BY_STACK
207 #ifdef F_INTERFACE_PGI
208 #define RETURN_BY_STACK
211 #ifdef F_INTERFACE_PATHSCALE
212 #define RETURN_BY_PACKED
215 #ifdef F_INTERFACE_SUN
216 #define RETURN_BY_PACKED
221 #if defined(PILEDRIVER) || defined(BULLDOZER)
222 //Enable some optimazation for barcelona.
223 #define BARCELONA_OPTIMIZATION
226 #if defined(HAVE_3DNOW)
228 #elif defined(HAVE_MMX)
236 #define BRANCH .byte 0x3e
237 #define NOBRANCH .byte 0x2e
238 #define PADDING .byte 0x66
256 #define LOCAL_BUFFER_SIZE QLOCAL_BUFFER_SIZE
258 #define LOCAL_BUFFER_SIZE DLOCAL_BUFFER_SIZE
260 #define LOCAL_BUFFER_SIZE SLOCAL_BUFFER_SIZE
264 #define LOCAL_BUFFER_SIZE XLOCAL_BUFFER_SIZE
266 #define LOCAL_BUFFER_SIZE ZLOCAL_BUFFER_SIZE
268 #define LOCAL_BUFFER_SIZE CLOCAL_BUFFER_SIZE
272 #if defined(OS_WINDOWS)
273 #if LOCAL_BUFFER_SIZE > 16384
274 #define STACK_TOUCHING \
275 movl $0, 4096 * 4(%rsp);\
276 movl $0, 4096 * 3(%rsp);\
277 movl $0, 4096 * 2(%rsp);\
278 movl $0, 4096 * 1(%rsp);
279 #elif LOCAL_BUFFER_SIZE > 12288
280 #define STACK_TOUCHING \
281 movl $0, 4096 * 3(%rsp);\
282 movl $0, 4096 * 2(%rsp);\
283 movl $0, 4096 * 1(%rsp);
284 #elif LOCAL_BUFFER_SIZE > 8192
285 #define STACK_TOUCHING \
286 movl $0, 4096 * 2(%rsp);\
287 movl $0, 4096 * 1(%rsp);
288 #elif LOCAL_BUFFER_SIZE > 4096
289 #define STACK_TOUCHING \
290 movl $0, 4096 * 1(%rsp);
292 #define STACK_TOUCHING
295 #define STACK_TOUCHING
299 #define movapd movaps
301 #define movlpd movlps
302 #define movhpd movhps
306 #define REALNAME ASMNAME
308 #define REALNAME ASMFNAME
312 #define PROLOGUE .text;.align 5; .globl REALNAME; REALNAME:
313 #define EPILOGUE .subsections_via_symbols
318 #define SAVEREGISTERS \
320 movups %xmm6, 0(%rsp);\
321 movups %xmm7, 16(%rsp);\
322 movups %xmm8, 32(%rsp);\
323 movups %xmm9, 48(%rsp);\
324 movups %xmm10, 64(%rsp);\
325 movups %xmm11, 80(%rsp);\
326 movups %xmm12, 96(%rsp);\
327 movups %xmm13, 112(%rsp);\
328 movups %xmm14, 128(%rsp);\
329 movups %xmm15, 144(%rsp)
331 #define RESTOREREGISTERS \
332 movups 0(%rsp), %xmm6;\
333 movups 16(%rsp), %xmm7;\
334 movups 32(%rsp), %xmm8;\
335 movups 48(%rsp), %xmm9;\
336 movups 64(%rsp), %xmm10;\
337 movups 80(%rsp), %xmm11;\
338 movups 96(%rsp), %xmm12;\
339 movups 112(%rsp), %xmm13;\
340 movups 128(%rsp), %xmm14;\
341 movups 144(%rsp), %xmm15;\
344 #define SAVEREGISTERS
345 #define RESTOREREGISTERS
348 #if defined(OS_WINDOWS) && !defined(C_PGI)
353 .def REALNAME;.scl 2;.type 32;.endef; \
358 #define EPILOGUE .end REALNAME
361 #if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_NETBSD) || defined(__ELF__) || defined(C_PGI)
366 .type REALNAME, @function; \
370 #define PROFCODE call *mcount@GOTPCREL(%rip)
375 #define EPILOGUE .size REALNAME, .-REALNAME
385 #elif defined(DOUBLE)
394 #define CMPEQPD cmpeqpd
395 #define COMISD comisd
403 #define MOVUPD movupd
414 #define CMPEQPD cmpeqps
415 #define COMISD comiss
423 #define MOVUPD movups
430 #define ALIGN_2 .align 2
431 #define ALIGN_3 .align 3
432 #define ALIGN_4 .align 4
433 #define ALIGN_5 .align 5
438 #define ALIGN_2 .align 4
442 #define ALIGN_3 .align 8
446 #define ALIGN_4 .align 16
450 #define ALIGN_5 .align 32
454 #define ALIGN_6 .align 64
458 // Because Clang didn't support ffreep, we directly use the opcode.
459 // Please check out http://www.sandpile.org/x86/opc_fpu.htm
461 #define ffreep .byte 0xdf, 0xc0 #