1 /*********************************************************************/
2 /* Copyright 2009, 2010 The University of Texas at Austin. */
3 /* All rights reserved. */
5 /* Redistribution and use in source and binary forms, with or */
6 /* without modification, are permitted provided that the following */
7 /* conditions are met: */
9 /* 1. Redistributions of source code must retain the above */
10 /* copyright notice, this list of conditions and the following */
13 /* 2. Redistributions in binary form must reproduce the above */
14 /* copyright notice, this list of conditions and the following */
15 /* disclaimer in the documentation and/or other materials */
16 /* provided with the distribution. */
18 /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
19 /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
20 /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
21 /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
22 /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
23 /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
24 /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
25 /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
26 /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
27 /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
28 /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
29 /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
30 /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
31 /* POSSIBILITY OF SUCH DAMAGE. */
33 /* The views and conclusions contained in the software and */
34 /* documentation are those of the authors and should not be */
35 /* interpreted as representing official policies, either expressed */
36 /* or implied, of The University of Texas at Austin. */
37 /*********************************************************************/
55 #define MB __asm__ __volatile__ ("mfence");
56 #define WMB __asm__ __volatile__ ("sfence");
64 #define MB do { __asm__ __volatile__("": : :"memory"); } while (0)
65 #define WMB do { __asm__ __volatile__("": : :"memory"); } while (0)
68 #define MB do {} while (0)
69 #define WMB do {} while (0)
73 static void __inline blas_lock(volatile BLASULONG *address){
83 while (*address) {YIELDING;}
88 : "=r"(ret), "=m"(*address)
89 : "0"(1), "m"(*address)
92 ret=InterlockedExchange64((volatile LONG64 *)(address), 1);
97 #define BLAS_LOCK_DEFINED
99 static __inline BLASULONG rpcc(void){
105 __asm__ __volatile__ ("rdtsc" : "=a" (a), "=d" (d));
107 return ((BLASULONG)a + ((BLASULONG)d << 32));
115 static __inline BLASULONG getstackaddr(void){
118 __asm__ __volatile__ ("movq %%rsp, %0"
119 : "=r"(addr) : : "memory");
125 static __inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx){
129 __cpuid(cpuinfo, op);
135 __asm__ __volatile__("mov $0, %%ecx;"
149 static __inline int WhereAmI(void){
150 int eax, ebx, ecx, edx;
153 cpuid(1, &eax, &ebx, &ecx, &edx);
154 apicid = BITMASK(ebx, 24, 0xff);
160 #ifdef CORE_BARCELONA
161 #define IFLUSH gotoblas_iflush()
162 #define IFLUSH_HALF gotoblas_iflush_half()
165 #ifdef ENABLE_SSE_EXCEPTION
167 #define IDEBUG_START \
169 unsigned int fp_sse_mode, new_fp_mode; \
170 __asm__ __volatile__ ("stmxcsr %0" : "=m" (fp_sse_mode) : ); \
171 new_fp_mode = fp_sse_mode & ~0xd00; \
172 __asm__ __volatile__ ("ldmxcsr %0" : : "m" (new_fp_mode) );
175 __asm__ __volatile__ ("ldmxcsr %0" : : "m" (fp_sse_mode) ); \
181 #define GET_IMAGE(res) __asm__ __volatile__("fstpt %0" : "=m"(res) : : "memory")
182 #elif defined(DOUBLE)
183 #define GET_IMAGE(res) __asm__ __volatile__("movsd %%xmm1, %0" : "=m"(res) : : "memory")
185 #define GET_IMAGE(res) __asm__ __volatile__("movss %%xmm1, %0" : "=m"(res) : : "memory")
188 #define GET_IMAGE_CANCEL
191 #if defined(USE64BITINT)
192 static __inline blasint blas_quickdivide(blasint x, blasint y){
195 #elif defined (C_MSVC)
196 static __inline BLASLONG blas_quickdivide(BLASLONG x, BLASLONG y){
200 extern unsigned int blas_quick_divide_table[];
202 static __inline unsigned int blas_quickdivide(unsigned int x, unsigned int y){
204 volatile unsigned int result;
206 if (y <= 1) return x;
208 #if (MAX_CPU_NUMBER > 64)
215 y = blas_quick_divide_table[y];
217 __asm__ __volatile__ ("mull %0" :"=d" (result), "+a"(x) : "0" (y));
226 #define PAGESIZE ( 4 << 10)
228 #define HUGE_PAGESIZE ( 2 << 20)
231 #define BUFFER_SIZE (32 << 22)
233 #define BUFFER_SIZE (32 << BUFFERSIZE)
238 #ifdef F_INTERFACE_G77
239 #define RETURN_BY_STACK
243 #ifdef F_INTERFACE_G95
244 #define RETURN_BY_PACKED
247 #ifdef F_INTERFACE_GFORT
250 #define RETURN_BY_REGS
252 #define RETURN_BY_STACK
255 #define RETURN_BY_PACKED
259 #ifdef F_INTERFACE_INTEL
260 #define RETURN_BY_STACK
263 #ifdef F_INTERFACE_FUJITSU
264 #define RETURN_BY_STACK
267 #ifdef F_INTERFACE_FLANG
268 #define RETURN_BY_STACK
271 #ifdef F_INTERFACE_PGI
272 #define RETURN_BY_STACK
275 #ifdef F_INTERFACE_PATHSCALE
276 #define RETURN_BY_PACKED
279 #ifdef F_INTERFACE_SUN
280 #define RETURN_BY_PACKED
285 #if defined(PILEDRIVER) || defined(BULLDOZER) || defined(STEAMROLLER) || defined(EXCAVATOR)
286 //Enable some optimization for barcelona.
287 #define BARCELONA_OPTIMIZATION
290 #if defined(HAVE_3DNOW)
292 #elif defined(HAVE_MMX)
300 #define BRANCH .byte 0x3e
301 #define NOBRANCH .byte 0x2e
302 #define PADDING .byte 0x66
320 #define LOCAL_BUFFER_SIZE QLOCAL_BUFFER_SIZE
322 #define LOCAL_BUFFER_SIZE DLOCAL_BUFFER_SIZE
324 #define LOCAL_BUFFER_SIZE SLOCAL_BUFFER_SIZE
328 #define LOCAL_BUFFER_SIZE XLOCAL_BUFFER_SIZE
330 #define LOCAL_BUFFER_SIZE ZLOCAL_BUFFER_SIZE
332 #define LOCAL_BUFFER_SIZE CLOCAL_BUFFER_SIZE
336 #if defined(OS_WINDOWS)
337 #if LOCAL_BUFFER_SIZE > 16384
338 #define STACK_TOUCHING \
339 movl $0, 4096 * 4(%rsp);\
340 movl $0, 4096 * 3(%rsp);\
341 movl $0, 4096 * 2(%rsp);\
342 movl $0, 4096 * 1(%rsp);
343 #elif LOCAL_BUFFER_SIZE > 12288
344 #define STACK_TOUCHING \
345 movl $0, 4096 * 3(%rsp);\
346 movl $0, 4096 * 2(%rsp);\
347 movl $0, 4096 * 1(%rsp);
348 #elif LOCAL_BUFFER_SIZE > 8192
349 #define STACK_TOUCHING \
350 movl $0, 4096 * 2(%rsp);\
351 movl $0, 4096 * 1(%rsp);
352 #elif LOCAL_BUFFER_SIZE > 4096
353 #define STACK_TOUCHING \
354 movl $0, 4096 * 1(%rsp);
356 #define STACK_TOUCHING
359 #define STACK_TOUCHING
363 #define movapd movaps
365 #define movlpd movlps
366 #define movhpd movhps
370 #define REALNAME ASMNAME
372 #define REALNAME ASMFNAME
376 #define PROLOGUE .text;.align 5; .globl REALNAME; REALNAME:
377 #define EPILOGUE .subsections_via_symbols
382 #define SAVEREGISTERS \
384 movups %xmm6, 0(%rsp);\
385 movups %xmm7, 16(%rsp);\
386 movups %xmm8, 32(%rsp);\
387 movups %xmm9, 48(%rsp);\
388 movups %xmm10, 64(%rsp);\
389 movups %xmm11, 80(%rsp);\
390 movups %xmm12, 96(%rsp);\
391 movups %xmm13, 112(%rsp);\
392 movups %xmm14, 128(%rsp);\
393 movups %xmm15, 144(%rsp)
395 #define RESTOREREGISTERS \
396 movups 0(%rsp), %xmm6;\
397 movups 16(%rsp), %xmm7;\
398 movups 32(%rsp), %xmm8;\
399 movups 48(%rsp), %xmm9;\
400 movups 64(%rsp), %xmm10;\
401 movups 80(%rsp), %xmm11;\
402 movups 96(%rsp), %xmm12;\
403 movups 112(%rsp), %xmm13;\
404 movups 128(%rsp), %xmm14;\
405 movups 144(%rsp), %xmm15;\
408 #define SAVEREGISTERS
409 #define RESTOREREGISTERS
412 #if defined(OS_WINDOWS) && !defined(C_PGI)
417 .def REALNAME;.scl 2;.type 32;.endef; \
422 #define EPILOGUE .end
425 #if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(__ELF__) || defined(C_PGI)
430 .type REALNAME, @function; \
434 #define PROFCODE call *mcount@GOTPCREL(%rip)
440 .size REALNAME, .-REALNAME; \
441 .section .note.GNU-stack,"",@progbits
452 #elif defined(DOUBLE)
461 #define CMPEQPD cmpeqpd
462 #define COMISD comisd
470 #define MOVUPD movupd
481 #define CMPEQPD cmpeqps
482 #define COMISD comiss
490 #define MOVUPD movups
497 #define ALIGN_2 .align 2
498 #define ALIGN_3 .align 3
499 #define ALIGN_4 .align 4
500 #define ALIGN_5 .align 5
505 #define ALIGN_2 .align 4
509 #define ALIGN_3 .align 8
513 #define ALIGN_4 .align 16
517 #define ALIGN_5 .align 32
521 #define ALIGN_6 .align 64
525 // Because Clang didn't support ffreep, we directly use the opcode.
526 // Please check out http://www.sandpile.org/x86/opc_fpu.htm
528 #define ffreep .byte 0xdf, 0xc0 #