midct.s 6.23 KB

#include <rsp.h>
#include <rcp.h>
#include <sptask.h>

	.base           TASKBASE

.name	p0,	$v0
.name	q0,	$v1
.name	p2,	$v2
.name	q2,	$v3
.name	p1,	$v4
.name	q1,	$v5
.name	p3,	$v6
.name	q3,	$v7


.name	a00,	$v24
.name	a08,	$v25
.name	a10,	$v26
.name	a18,	$v27

.name	dum,	$v31
.name	consts,	$v30

#define SCALEUP	0
#define ZERO	1
#define NEG1	2
#define SCALEDN	3
#define ONE	4

.name	amat0l,	$20
.name	amat0h,	$21
.name	amat1l,	$22
.name	amat1h,	$23

.name	caddr,	$24

.name	row0,	$1
.name	row1,	$2
.name	row2,	$3
.name	row3,	$4
.name	row4,	$5
.name	row5,	$6
.name	row6,	$7
.name	row7,	$8

.name	x00,	$v8
.name	x01,	$v9
.name	x02,	$v10
.name	x03,	$v11

.name	x10,	$v12
.name	x11,	$v13
.name	x12,	$v14
.name	x13,	$v15

.name	x20,	$v16
.name	x21,	$v17
.name	x22,	$v18
.name	x23,	$v19
.name	x24,	$v20
.name	x25,	$v21
.name	x26,	$v22
.name	x27,	$v23

idct_start:	lqv a00,    0(amat0l)
		lqv a08,    0(amat0h)

		lqv a10,    0(amat1l)
		lqv a18,    0(amat1h)

		lqv consts, 0(caddr)

	nop
	nop
	nop
	nop

idct_loop:
	nop
	nop
	nop
	nop
	nop

	lqv x20, 0(row0)
	lqv x21, 0(row4)
	lqv x22, 0(row2)
	lqv x23, 0(row6)

	lqv x24, 0(row1)
	lqv x25, 0(row5)
	lqv x26, 0(row3)
	lqv x27, 0(row7)

	vmudh p0, x20, consts[SCALEUP]
	vmudh p1, x21, consts[SCALEUP]
	vmudh p2, x22, consts[SCALEUP]
	vmudh p3, x23, consts[SCALEUP]

	vmudh q0, x24, consts[SCALEUP]
	vmudh q1, x25, consts[SCALEUP]
	vmudh q2, x26, consts[SCALEUP]
	vmudh q3, x27, consts[SCALEUP]

	vmulf dum, p0, a00[0]
	vmacf dum, p1, a00[1]
	vmacf dum, p2, a00[2]
	vmacf x00, p3, a00[3]

	vmulf dum, p0, a00[4]
	vmacf dum, p1, a00[5]
	vmacf dum, p2, a00[6]
	vmacf x01, p3, a00[7]

	vmulf dum, p0, a08[0]
	vmacf dum, p1, a08[1]
	vmacf dum, p2, a08[2]
	vmacf x02, p3, a08[3]

	vmulf dum, p0, a08[4]
	vmacf dum, p1, a08[5]
	vmacf dum, p2, a08[6]
	vmacf x03, p3, a08[7]


	vmulf dum, q0, a10[0]		/* 2nd Matrix */
	vmacf dum, q1, a10[1]
	vmacf dum, q2, a10[2]
	vmacf x10, q3, a10[3]

	vmulf dum, q0, a10[4]
	vmacf dum, q1, a10[5]
	vmacf dum, q2, a10[6]
	vmacf x11, q3, a10[7]

	vmulf dum, q0, a18[0]
	vmacf dum, q1, a18[1]
	vmacf dum, q2, a18[2]
	vmacf x12, q3, a18[3]

	vmulf dum, q0, a18[4]
	vmacf dum, q1, a18[5]
	vmacf dum, q2, a18[6]
	vmacf x13, q3, a18[7]

	vadd x20, x00, x13		/* Butterflies with no rounding */
	vadd x21, x01, x12
	vadd x22, x02, x11
	vadd x23, x03, x10
	vsub x24, x03, x10
	vsub x25, x02, x11
	vsub x26, x01, x12
	vsub x27, x00, x13

		/* Transpose */

.name	at0,	$10
.name	at1,	$11
.name	at2,	$12
.name	at3,	$13
.name	at4,	$14
.name	at5,	$15
.name	at6,	$16
.name	at7,	$17
	
	nop
	nop
	nop
	nop

	stv	x20[0], 0(at0)
	stv	x21[2], 0(at1)
	stv	x22[4], 0(at2)
	stv	x23[6], 0(at3)
	stv	x24[8], 0(at4)
	stv	x25[10], 0(at5)
	stv	x26[12], 0(at6)
	stv	x27[14], 0(at7)

	nop
	nop
	nop
	nop

	
	ltv	p0[0], 0(at0)
	ltv	q0[14], 0(at1)
	ltv	p2[12], 0(at2)
	ltv	q2[10], 0(at3)
	ltv	p1[8], 0(at4)
	ltv	q1[6], 0(at5)
	ltv	p3[4], 0(at6)
	ltv	q3[2], 0(at7)

		/* 2nd Pass */

	nop
	nop
	nop
	nop

	vmulf dum, p0, a00[0]
	vmacf dum, p1, a00[1]
	vmacf dum, p2, a00[2]
	vmacf x00, p3, a00[3]

	vmulf dum, p0, a00[4]
	vmacf dum, p1, a00[5]
	vmacf dum, p2, a00[6]
	vmacf x01, p3, a00[7]

	vmulf dum, p0, a08[0]
	vmacf dum, p1, a08[1]
	vmacf dum, p2, a08[2]
	vmacf x02, p3, a08[3]

	vmulf dum, p0, a08[4]
	vmacf dum, p1, a08[5]
	vmacf dum, p2, a08[6]
	vmacf x03, p3, a08[7]


	vmulf dum, q0, a10[0]		/* 2nd Matrix */
	vmacf dum, q1, a10[1]
	vmacf dum, q2, a10[2]
	vmacf x10, q3, a10[3]

	vmulf dum, q0, a10[4]
	vmacf dum, q1, a10[5]
	vmacf dum, q2, a10[6]
	vmacf x11, q3, a10[7]

	vmulf dum, q0, a18[0]
	vmacf dum, q1, a18[1]
	vmacf dum, q2, a18[2]
	vmacf x12, q3, a18[3]

	vmulf dum, q0, a18[4]
	vmacf dum, q1, a18[5]
	vmacf dum, q2, a18[6]
	vmacf x13, q3, a18[7]

	vadd x20, x00, x13		/* Butterflies with no rounding */
	vadd x21, x01, x12
	vadd x22, x02, x11
	vadd x23, x03, x10
	vsub x24, x03, x10
	vsub x25, x02, x11
	vsub x26, x01, x12
	vsub x27, x00, x13

/*

	Final Rounding and Scaling
*/

#define bit0	x00
#define bit1	x01
#define bit2	x02
#define bit3	x03

#define bit4	x10
#define bit5	x11
#define bit6	x12
#define bit7	x13

	/*  The x2's now have 6 bits of fraction that needs to be
	    properley rounded.  Symmetric rounding is very important
	    here since a bias will affect about 1% (1/128) of the
	    values.

	    The following gives symmetric rounding:

	    if( x2 < 0 )
		result = (x2 + 0x1f) >> 6;
	    else
		result = (x2 + 0x20) >> 6;
	    
	    I have implemented this via:

	    accum = 0
	    accum += x2*(-1)
	    bit   = accum>>15;	(-1 if x2 > 0, else 0 )

	    accum = (x2 + 0x20) * (1<<(15-6));
	    accum += bit
	    result = accum >> 15;
	*/

	vmudh bit0, x20, consts[ZERO]
	vmacf bit0, x20, consts[NEG1]	/* Sets bit0 to -1 if x20>0 */
	vmudh bit1, x21, consts[ZERO]
	vmacf bit1, x21, consts[NEG1]
	vmudh bit2, x22, consts[ZERO]
	vmacf bit2, x22, consts[NEG1]
	vmudh bit3, x23, consts[ZERO]
	vmacf bit3, x23, consts[NEG1]
	vmudh bit4, x24, consts[ZERO]
	vmacf bit4, x24, consts[NEG1]
	vmudh bit5, x25, consts[ZERO]
	vmacf bit5, x25, consts[NEG1]
	vmudh bit6, x26, consts[ZERO]
	vmacf bit6, x26, consts[NEG1]
	vmudh bit7, x27, consts[ZERO]
	vmacf bit7, x27, consts[NEG1]

	vmulf dum,  x20, consts[SCALEDN]
	vmacf x20, bit0, consts[ONE]	/* Decriments x20>>6 if x20>0 */
	vmulf dum,  x21, consts[SCALEDN]
	vmacf x21, bit1, consts[ONE]
	vmulf dum,  x22, consts[SCALEDN]
	vmacf x22, bit2, consts[ONE]
	vmulf dum,  x23, consts[SCALEDN]
	vmacf x23, bit3, consts[ONE]
	vmulf dum,  x24, consts[SCALEDN]
	vmacf x24, bit4, consts[ONE]
	vmulf dum,  x25, consts[SCALEDN]
	vmacf x25, bit5, consts[ONE]
	vmulf dum,  x26, consts[SCALEDN]
	vmacf x26, bit6, consts[ONE]
	vmulf dum,  x27, consts[SCALEDN]
	vmacf x27, bit7, consts[ONE]

	nop
	nop
	nop
	nop

		/* Transpose */
	
	stv	x20[0], 0(at0)
	stv	x21[2], 0(at1)
	stv	x22[4], 0(at2)
	stv	x23[6], 0(at3)
	stv	x24[8], 0(at4)
	stv	x25[10], 0(at5)
	stv	x26[12], 0(at6)
	stv	x27[14], 0(at7)
	
	nop
	nop
	nop
	nop

	ltv	p0[0], 0(at0)
	ltv	q0[14], 0(at1)
	ltv	p2[12], 0(at2)
	ltv	q2[10], 0(at3)
	ltv	p1[8], 0(at4)
	ltv	q1[6], 0(at5)
	ltv	p3[4], 0(at6)
	ltv	q3[2], 0(at7)

	nop
	nop
	nop
	nop

	sqv	p0[0], 0(row0)
	sqv	q0[0], 0(row1)
	sqv	p2[0], 0(row2)
	sqv	q2[0], 0(row3)
	sqv	p1[0], 0(row4)
	sqv	q1[0], 0(row5)
	sqv	p3[0], 0(row6)
	sqv	q3[0], 0(row7)

	nop
	nop
	nop
	nop

		/* Save it back or blend in reference */
	
	nop
	nop
	nop
	nop
	nop

	j	idct_loop
	nop
	nop
	nop