2 * (C) Copyright IBM Corporation 2008
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * Real-time assembly generation interface for Cell B.E. SPEs.
29 * \author Ian Romanick <idr@us.ibm.com>
35 #include "pipe/p_compiler.h"
36 #include "util/u_memory.h"
37 #include "rtasm_ppc_spe.h"
42 * SPE instruction types
44 * There are 6 primary instruction encodings used on the Cell's SPEs. Each of
45 * the following unions encodes one type.
48 * If, at some point, we start generating SPE code from a little-endian host
49 * these unions will not work.
53 * Encode one output register with two input registers
67 * Encode one output register with three input registers
82 * Encode one output register with one input reg. and a 7-bit signed immed
96 * Encode one output register with one input reg. and an 8-bit signed immed
110 * Encode one output register with one input reg. and a 10-bit signed immed
112 union spe_inst_RI10 {
124 * Encode one output register with a 16-bit signed immediate
126 union spe_inst_RI16 {
137 * Encode one output register with a 18-bit signed immediate
139 union spe_inst_RI18 {
151 indent(const struct spe_function *p)
154 for (i = 0; i < p->indent; i++) {
161 rem_prefix(const char *longname)
177 /* cycle through four buffers to handle multiple calls per printf */
178 static char buf[4][10];
181 sprintf(buf[b], "$%d", reg);
189 emit_instruction(struct spe_function *p, uint32_t inst_bits)
192 return; /* out of memory, drop the instruction */
194 if (p->num_inst == p->max_inst) {
195 /* allocate larger buffer */
197 p->max_inst *= 2; /* 2x larger */
198 newbuf = align_malloc(p->max_inst * SPE_INST_SIZE, 16);
200 memcpy(newbuf, p->store, p->num_inst * SPE_INST_SIZE);
202 align_free(p->store);
211 p->store[p->num_inst++] = inst_bits;
216 static void emit_RR(struct spe_function *p, unsigned op, int rT,
217 int rA, int rB, const char *name)
219 union spe_inst_RR inst;
224 emit_instruction(p, inst.bits);
227 printf("%s\t%s, %s, %s\n",
228 rem_prefix(name), reg_name(rT), reg_name(rA), reg_name(rB));
233 static void emit_RRR(struct spe_function *p, unsigned op, int rT,
234 int rA, int rB, int rC, const char *name)
236 union spe_inst_RRR inst;
242 emit_instruction(p, inst.bits);
245 printf("%s\t%s, %s, %s, %s\n", rem_prefix(name), reg_name(rT),
246 reg_name(rA), reg_name(rB), reg_name(rC));
251 static void emit_RI7(struct spe_function *p, unsigned op, int rT,
252 int rA, int imm, const char *name)
254 union spe_inst_RI7 inst;
259 emit_instruction(p, inst.bits);
262 printf("%s\t%s, %s, 0x%x\n",
263 rem_prefix(name), reg_name(rT), reg_name(rA), imm);
269 static void emit_RI8(struct spe_function *p, unsigned op, int rT,
270 int rA, int imm, const char *name)
272 union spe_inst_RI8 inst;
277 emit_instruction(p, inst.bits);
280 printf("%s\t%s, %s, 0x%x\n",
281 rem_prefix(name), reg_name(rT), reg_name(rA), imm);
287 static void emit_RI10(struct spe_function *p, unsigned op, int rT,
288 int rA, int imm, const char *name)
290 union spe_inst_RI10 inst;
295 emit_instruction(p, inst.bits);
298 printf("%s\t%s, %s, 0x%x\n",
299 rem_prefix(name), reg_name(rT), reg_name(rA), imm);
304 /** As above, but do range checking on signed immediate value */
305 static void emit_RI10s(struct spe_function *p, unsigned op, int rT,
306 int rA, int imm, const char *name)
310 emit_RI10(p, op, rT, rA, imm, name);
314 static void emit_RI16(struct spe_function *p, unsigned op, int rT,
315 int imm, const char *name)
317 union spe_inst_RI16 inst;
321 emit_instruction(p, inst.bits);
324 printf("%s\t%s, 0x%x\n", rem_prefix(name), reg_name(rT), imm);
329 static void emit_RI18(struct spe_function *p, unsigned op, int rT,
330 int imm, const char *name)
332 union spe_inst_RI18 inst;
336 emit_instruction(p, inst.bits);
339 printf("%s\t%s, 0x%x\n", rem_prefix(name), reg_name(rT), imm);
344 #define EMIT(_name, _op) \
345 void _name (struct spe_function *p) \
347 emit_RR(p, _op, 0, 0, 0, __FUNCTION__); \
350 #define EMIT_(_name, _op) \
351 void _name (struct spe_function *p, int rT) \
353 emit_RR(p, _op, rT, 0, 0, __FUNCTION__); \
356 #define EMIT_R(_name, _op) \
357 void _name (struct spe_function *p, int rT, int rA) \
359 emit_RR(p, _op, rT, rA, 0, __FUNCTION__); \
362 #define EMIT_RR(_name, _op) \
363 void _name (struct spe_function *p, int rT, int rA, int rB) \
365 emit_RR(p, _op, rT, rA, rB, __FUNCTION__); \
368 #define EMIT_RRR(_name, _op) \
369 void _name (struct spe_function *p, int rT, int rA, int rB, int rC) \
371 emit_RRR(p, _op, rT, rA, rB, rC, __FUNCTION__); \
374 #define EMIT_RI7(_name, _op) \
375 void _name (struct spe_function *p, int rT, int rA, int imm) \
377 emit_RI7(p, _op, rT, rA, imm, __FUNCTION__); \
380 #define EMIT_RI8(_name, _op, bias) \
381 void _name (struct spe_function *p, int rT, int rA, int imm) \
383 emit_RI8(p, _op, rT, rA, bias - imm, __FUNCTION__); \
386 #define EMIT_RI10(_name, _op) \
387 void _name (struct spe_function *p, int rT, int rA, int imm) \
389 emit_RI10(p, _op, rT, rA, imm, __FUNCTION__); \
392 #define EMIT_RI10s(_name, _op) \
393 void _name (struct spe_function *p, int rT, int rA, int imm) \
395 emit_RI10s(p, _op, rT, rA, imm, __FUNCTION__); \
398 #define EMIT_RI16(_name, _op) \
399 void _name (struct spe_function *p, int rT, int imm) \
401 emit_RI16(p, _op, rT, imm, __FUNCTION__); \
404 #define EMIT_RI18(_name, _op) \
405 void _name (struct spe_function *p, int rT, int imm) \
407 emit_RI18(p, _op, rT, imm, __FUNCTION__); \
410 #define EMIT_I16(_name, _op) \
411 void _name (struct spe_function *p, int imm) \
413 emit_RI16(p, _op, 0, imm, __FUNCTION__); \
416 #include "rtasm_ppc_spe.h"
421 * Initialize an spe_function.
422 * \param code_size initial size of instruction buffer to allocate, in bytes.
423 * If zero, use a default.
425 void spe_init_func(struct spe_function *p, unsigned code_size)
433 p->max_inst = code_size / SPE_INST_SIZE;
434 p->store = align_malloc(code_size, 16);
437 memset(p->regs, 0, SPE_NUM_REGS * sizeof(p->regs[0]));
439 /* Conservatively treat R0 - R2 and R80 - R127 as non-volatile.
441 p->regs[0] = p->regs[1] = p->regs[2] = 1;
442 for (i = 80; i <= 127; i++) {
451 void spe_release_func(struct spe_function *p)
453 assert(p->num_inst <= p->max_inst);
454 if (p->store != NULL) {
455 align_free(p->store);
461 /** Return current code size in bytes. */
462 unsigned spe_code_size(const struct spe_function *p)
464 return p->num_inst * SPE_INST_SIZE;
469 * Allocate a SPE register.
470 * \return register index or -1 if none left.
472 int spe_allocate_available_register(struct spe_function *p)
475 for (i = 0; i < SPE_NUM_REGS; i++) {
476 if (p->regs[i] == 0) {
487 * Mark the given SPE register as "allocated".
489 int spe_allocate_register(struct spe_function *p, int reg)
491 assert(reg < SPE_NUM_REGS);
492 assert(p->regs[reg] == 0);
499 * Mark the given SPE register as "unallocated". Note that this should
500 * only be used on registers allocated in the current register set; an
501 * assertion will fail if an attempt is made to deallocate a register
502 * allocated in an earlier register set.
504 void spe_release_register(struct spe_function *p, int reg)
507 assert(reg < SPE_NUM_REGS);
508 assert(p->regs[reg] == 1);
514 * Start a new set of registers. This can be called if
515 * it will be difficult later to determine exactly what
516 * registers were actually allocated during a code generation
517 * sequence, and you really just want to deallocate all of them.
519 void spe_allocate_register_set(struct spe_function *p)
523 /* Keep track of the set count. If it ever wraps around to 0,
527 assert(p->set_count > 0);
529 /* Increment the allocation count of all registers currently
530 * allocated. Then any registers that are allocated in this set
531 * will be the only ones with a count of 1; they'll all be released
532 * when the register set is released.
534 for (i = 0; i < SPE_NUM_REGS; i++) {
540 void spe_release_register_set(struct spe_function *p)
544 /* If the set count drops below zero, we're in trouble. */
545 assert(p->set_count > 0);
548 /* Drop the allocation level of all registers. Any allocated
549 * during this register set will drop to 0 and then become
552 for (i = 0; i < SPE_NUM_REGS; i++) {
560 spe_get_registers_used(const struct spe_function *p, ubyte used[])
563 /* only count registers in the range available to callers */
564 for (i = 2; i < 80; i++) {
574 spe_print_code(struct spe_function *p, boolean enable)
581 spe_indent(struct spe_function *p, int spaces)
588 spe_comment(struct spe_function *p, int rel_indent, const char *s)
591 p->indent += rel_indent;
593 p->indent -= rel_indent;
601 * NOTE: offset is in bytes and the least significant 4 bits must be zero!
603 void spe_lqd(struct spe_function *p, int rT, int rA, int offset)
605 const boolean pSave = p->print;
607 /* offset must be a multiple of 16 */
608 assert(offset % 16 == 0);
609 /* offset must fit in 10-bit signed int field, after shifting */
610 assert((offset >> 4) <= 511);
611 assert((offset >> 4) >= -512);
614 emit_RI10(p, 0x034, rT, rA, offset >> 4, "spe_lqd");
619 printf("lqd\t%s, %d(%s)\n", reg_name(rT), offset, reg_name(rA));
626 * NOTE: offset is in bytes and the least significant 4 bits must be zero!
628 void spe_stqd(struct spe_function *p, int rT, int rA, int offset)
630 const boolean pSave = p->print;
632 /* offset must be a multiple of 16 */
633 assert(offset % 16 == 0);
634 /* offset must fit in 10-bit signed int field, after shifting */
635 assert((offset >> 4) <= 511);
636 assert((offset >> 4) >= -512);
639 emit_RI10(p, 0x024, rT, rA, offset >> 4, "spe_stqd");
644 printf("stqd\t%s, %d(%s)\n", reg_name(rT), offset, reg_name(rA));
650 * For branch instructions:
651 * \param d if 1, disable interupts if branch is taken
652 * \param e if 1, enable interupts if branch is taken
653 * If d and e are both zero, don't change interupt status (right?)
656 /** Branch Indirect to address in rA */
657 void spe_bi(struct spe_function *p, int rA, int d, int e)
659 emit_RI7(p, 0x1a8, 0, rA, (d << 5) | (e << 4), __FUNCTION__);
662 /** Interupt Return */
663 void spe_iret(struct spe_function *p, int rA, int d, int e)
665 emit_RI7(p, 0x1aa, 0, rA, (d << 5) | (e << 4), __FUNCTION__);
668 /** Branch indirect and set link on external data */
669 void spe_bisled(struct spe_function *p, int rT, int rA, int d,
672 emit_RI7(p, 0x1ab, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
675 /** Branch indirect and set link. Save PC in rT, jump to rA. */
676 void spe_bisl(struct spe_function *p, int rT, int rA, int d,
679 emit_RI7(p, 0x1a9, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
682 /** Branch indirect if zero word. If rT.word[0]==0, jump to rA. */
683 void spe_biz(struct spe_function *p, int rT, int rA, int d, int e)
685 emit_RI7(p, 0x128, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
688 /** Branch indirect if non-zero word. If rT.word[0]!=0, jump to rA. */
689 void spe_binz(struct spe_function *p, int rT, int rA, int d, int e)
691 emit_RI7(p, 0x129, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
694 /** Branch indirect if zero halfword. If rT.halfword[1]==0, jump to rA. */
695 void spe_bihz(struct spe_function *p, int rT, int rA, int d, int e)
697 emit_RI7(p, 0x12a, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
700 /** Branch indirect if non-zero halfword. If rT.halfword[1]!=0, jump to rA. */
701 void spe_bihnz(struct spe_function *p, int rT, int rA, int d, int e)
703 emit_RI7(p, 0x12b, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
707 /* Hint-for-branch instructions
716 /* Control instructions
720 EMIT_RR (spe_stopd, 0x140);
721 EMIT_ (spe_nop, 0x201);
723 EMIT_ (spe_dsync, 0x003);
724 EMIT_R (spe_mfspr, 0x00c);
725 EMIT_R (spe_mtspr, 0x10c);
730 ** Helper / "macro" instructions.
731 ** Use somewhat verbose names as a reminder that these aren't native
737 spe_load_float(struct spe_function *p, int rT, float x)
742 else if (x == 0.5f) {
743 spe_ilhu(p, rT, 0x3f00);
745 else if (x == 1.0f) {
746 spe_ilhu(p, rT, 0x3f80);
748 else if (x == -1.0f) {
749 spe_ilhu(p, rT, 0xbf80);
757 spe_ilhu(p, rT, bits.u >> 16);
758 spe_iohl(p, rT, bits.u & 0xffff);
764 spe_load_int(struct spe_function *p, int rT, int i)
766 if (-32768 <= i && i <= 32767) {
770 spe_ilhu(p, rT, i >> 16);
772 spe_iohl(p, rT, i & 0xffff);
776 void spe_load_uint(struct spe_function *p, int rT, uint ui)
778 /* If the whole value is in the lower 18 bits, use ila, which
779 * doesn't sign-extend. Otherwise, if the two halfwords of
780 * the constant are identical, use ilh. Otherwise, if every byte of
781 * the desired value is 0x00 or 0xff, we can use Form Select Mask for
782 * Bytes Immediate (fsmbi) to load the value in a single instruction.
783 * Otherwise, in the general case, we have to use ilhu followed by iohl.
785 if ((ui & 0x0003ffff) == ui) {
788 else if ((ui >> 16) == (ui & 0xffff)) {
789 spe_ilh(p, rT, ui & 0xffff);
792 ((ui & 0x000000ff) == 0 || (ui & 0x000000ff) == 0x000000ff) &&
793 ((ui & 0x0000ff00) == 0 || (ui & 0x0000ff00) == 0x0000ff00) &&
794 ((ui & 0x00ff0000) == 0 || (ui & 0x00ff0000) == 0x00ff0000) &&
795 ((ui & 0xff000000) == 0 || (ui & 0xff000000) == 0xff000000)
798 /* fsmbi duplicates each bit in the given mask eight times,
799 * using a 16-bit value to initialize a 16-byte quadword.
800 * Each 4-bit nybble of the mask corresponds to a full word
801 * of the result; look at the value and figure out the mask
802 * (replicated for each word in the quadword), and then
803 * form the "select mask" to get the value.
805 if ((ui & 0x000000ff) == 0x000000ff) mask |= 0x1111;
806 if ((ui & 0x0000ff00) == 0x0000ff00) mask |= 0x2222;
807 if ((ui & 0x00ff0000) == 0x00ff0000) mask |= 0x4444;
808 if ((ui & 0xff000000) == 0xff000000) mask |= 0x8888;
809 spe_fsmbi(p, rT, mask);
812 /* The general case: this usually uses two instructions, but
813 * may use only one if the low-order 16 bits of each word are 0.
815 spe_ilhu(p, rT, ui >> 16);
817 spe_iohl(p, rT, ui & 0xffff);
822 * This function is constructed identically to spe_xor_uint() below.
823 * Changes to one should be made in the other.
826 spe_and_uint(struct spe_function *p, int rT, int rA, uint ui)
828 /* If we can, emit a single instruction, either And Byte Immediate
829 * (which uses the same constant across each byte), And Halfword Immediate
830 * (which sign-extends a 10-bit immediate to 16 bits and uses that
831 * across each halfword), or And Word Immediate (which sign-extends
832 * a 10-bit immediate to 32 bits).
834 * Otherwise, we'll need to use a temporary register.
838 /* If the upper 23 bits are all 0s or all 1s, sign extension
839 * will work and we can use And Word Immediate
841 tmp = ui & 0xfffffe00;
842 if (tmp == 0xfffffe00 || tmp == 0) {
843 spe_andi(p, rT, rA, ui & 0x000003ff);
847 /* If the ui field is symmetric along halfword boundaries and
848 * the upper 7 bits of each halfword are all 0s or 1s, we
849 * can use And Halfword Immediate
851 tmp = ui & 0xfe00fe00;
852 if ((tmp == 0xfe00fe00 || tmp == 0) && ((ui >> 16) == (ui & 0x0000ffff))) {
853 spe_andhi(p, rT, rA, ui & 0x000003ff);
857 /* If the ui field is symmetric in each byte, then we can use
858 * the And Byte Immediate instruction.
860 tmp = ui & 0x000000ff;
861 if ((ui >> 24) == tmp && ((ui >> 16) & 0xff) == tmp && ((ui >> 8) & 0xff) == tmp) {
862 spe_andbi(p, rT, rA, tmp);
866 /* Otherwise, we'll have to use a temporary register. */
867 int tmp_reg = spe_allocate_available_register(p);
868 spe_load_uint(p, tmp_reg, ui);
869 spe_and(p, rT, rA, tmp_reg);
870 spe_release_register(p, tmp_reg);
875 * This function is constructed identically to spe_and_uint() above.
876 * Changes to one should be made in the other.
879 spe_xor_uint(struct spe_function *p, int rT, int rA, uint ui)
881 /* If we can, emit a single instruction, either Exclusive Or Byte
882 * Immediate (which uses the same constant across each byte), Exclusive
883 * Or Halfword Immediate (which sign-extends a 10-bit immediate to
884 * 16 bits and uses that across each halfword), or Exclusive Or Word
885 * Immediate (which sign-extends a 10-bit immediate to 32 bits).
887 * Otherwise, we'll need to use a temporary register.
891 /* If the upper 23 bits are all 0s or all 1s, sign extension
892 * will work and we can use Exclusive Or Word Immediate
894 tmp = ui & 0xfffffe00;
895 if (tmp == 0xfffffe00 || tmp == 0) {
896 spe_xori(p, rT, rA, ui & 0x000003ff);
900 /* If the ui field is symmetric along halfword boundaries and
901 * the upper 7 bits of each halfword are all 0s or 1s, we
902 * can use Exclusive Or Halfword Immediate
904 tmp = ui & 0xfe00fe00;
905 if ((tmp == 0xfe00fe00 || tmp == 0) && ((ui >> 16) == (ui & 0x0000ffff))) {
906 spe_xorhi(p, rT, rA, ui & 0x000003ff);
910 /* If the ui field is symmetric in each byte, then we can use
911 * the Exclusive Or Byte Immediate instruction.
913 tmp = ui & 0x000000ff;
914 if ((ui >> 24) == tmp && ((ui >> 16) & 0xff) == tmp && ((ui >> 8) & 0xff) == tmp) {
915 spe_xorbi(p, rT, rA, tmp);
919 /* Otherwise, we'll have to use a temporary register. */
920 int tmp_reg = spe_allocate_available_register(p);
921 spe_load_uint(p, tmp_reg, ui);
922 spe_xor(p, rT, rA, tmp_reg);
923 spe_release_register(p, tmp_reg);
927 spe_compare_equal_uint(struct spe_function *p, int rT, int rA, uint ui)
929 /* If the comparison value is 9 bits or less, it fits inside a
930 * Compare Equal Word Immediate instruction.
932 if ((ui & 0x000001ff) == ui) {
933 spe_ceqi(p, rT, rA, ui);
935 /* Otherwise, we're going to have to load a word first. */
937 int tmp_reg = spe_allocate_available_register(p);
938 spe_load_uint(p, tmp_reg, ui);
939 spe_ceq(p, rT, rA, tmp_reg);
940 spe_release_register(p, tmp_reg);
945 spe_compare_greater_uint(struct spe_function *p, int rT, int rA, uint ui)
947 /* If the comparison value is 10 bits or less, it fits inside a
948 * Compare Logical Greater Than Word Immediate instruction.
950 if ((ui & 0x000003ff) == ui) {
951 spe_clgti(p, rT, rA, ui);
953 /* Otherwise, we're going to have to load a word first. */
955 int tmp_reg = spe_allocate_available_register(p);
956 spe_load_uint(p, tmp_reg, ui);
957 spe_clgt(p, rT, rA, tmp_reg);
958 spe_release_register(p, tmp_reg);
963 spe_splat(struct spe_function *p, int rT, int rA)
965 /* Use a temporary, just in case rT == rA */
966 int tmp_reg = spe_allocate_available_register(p);
967 /* Duplicate bytes 0, 1, 2, and 3 across the whole register */
968 spe_ila(p, tmp_reg, 0x00010203);
969 spe_shufb(p, rT, rA, rA, tmp_reg);
970 spe_release_register(p, tmp_reg);
975 spe_complement(struct spe_function *p, int rT, int rA)
977 spe_nor(p, rT, rA, rA);
982 spe_move(struct spe_function *p, int rT, int rA)
984 /* Use different instructions depending on the instruction address
985 * to take advantage of the dual pipelines.
988 spe_shlqbyi(p, rT, rA, 0); /* odd pipe */
990 spe_ori(p, rT, rA, 0); /* even pipe */
995 spe_zero(struct spe_function *p, int rT)
997 spe_xor(p, rT, rT, rT);
1002 spe_splat_word(struct spe_function *p, int rT, int rA, int word)
1009 spe_ila(p, tmp1, 66051);
1010 spe_shufb(p, rT, rA, rA, tmp1);
1013 /* XXX review this, we may not need the rotqbyi instruction */
1015 int tmp2 = spe_allocate_available_register(p);
1017 spe_ila(p, tmp1, 66051);
1018 spe_rotqbyi(p, tmp2, rA, 4 * word);
1019 spe_shufb(p, rT, tmp2, tmp2, tmp1);
1021 spe_release_register(p, tmp2);
1026 * For each 32-bit float element of rA and rB, choose the smaller of the
1027 * two, compositing them into the rT register.
1029 * The Float Compare Greater Than (fcgt) instruction will put 1s into
1030 * compare_reg where rA > rB, and 0s where rA <= rB.
1032 * Then the Select Bits (selb) instruction will take bits from rA where
1033 * compare_reg is 0, and from rB where compare_reg is 1; i.e., from rA
1034 * where rA <= rB and from rB where rB > rA, which is exactly the
1037 * The compare_reg could in many cases be the same as rT, unless
1038 * rT == rA || rt == rB. But since this is common in constructions
1039 * like "x = min(x, a)", we always allocate a new register to be safe.
1042 spe_float_min(struct spe_function *p, int rT, int rA, int rB)
1044 int compare_reg = spe_allocate_available_register(p);
1045 spe_fcgt(p, compare_reg, rA, rB);
1046 spe_selb(p, rT, rA, rB, compare_reg);
1047 spe_release_register(p, compare_reg);
1051 * For each 32-bit float element of rA and rB, choose the greater of the
1052 * two, compositing them into the rT register.
1054 * The logic is similar to that of spe_float_min() above; the only
1055 * difference is that the registers on spe_selb() have been reversed,
1056 * so that the larger of the two is selected instead of the smaller.
1059 spe_float_max(struct spe_function *p, int rT, int rA, int rB)
1061 int compare_reg = spe_allocate_available_register(p);
1062 spe_fcgt(p, compare_reg, rA, rB);
1063 spe_selb(p, rT, rB, rA, compare_reg);
1064 spe_release_register(p, compare_reg);
1067 #endif /* GALLIUM_CELL */