SH4 FIPR Optimizations: Difference between revisions

From dreamcast.wiki
Jump to navigation Jump to search
No edit summary
No edit summary
 
Line 44: Line 44:
fmov.s fr7, @%[result] ! PIPELINE STALL!!!!
fmov.s fr7, @%[result] ! PIPELINE STALL!!!!
</pre>
</pre>
Now this is very very bad. FIPR has 5-8 cycles of latency, so every fucking call to FIPR, since the very next instruction tries to use the result before its been calculated, the entire pipeline must stall waiting for the result... FOR EVERY FIPR CALL.
Now this is very very bad. FIPR has 4-5 cycles of latency, so every fucking call to FIPR, since the very next instruction tries to use the result before its been calculated, the entire pipeline must stall waiting for the result... FOR EVERY FIPR CALL.
So you're losing MASSIVE perf benefits there.
So you're losing MASSIVE perf benefits there.
The solution? You have to pipeline your FIPRs so that while the previous FIPR call is still calculating, you're loading up and issuing the next FIPR call.
The solution? You have to pipeline your FIPRs so that while the previous FIPR call is still calculating, you're loading up and issuing the next FIPR call.

Latest revision as of 14:25, 26 July 2025

Yo, guys. At like 1AM @ian micheal got me looking at pl_mpeg's audio decoder to see if I could see any potential gainz... So here is its innermost hottest audio synthesis loop:

for (int i = 32; i; --i) {
    float u;
    u = pl_fipr(d[0], d[1], d[2], d[3], v1[0], v2[0], v1[128], v2[128]);
    u += pl_fipr(d[4], d[5], d[6], d[7], v1[256], v2[256], v1[384], v2[384]);
    u += pl_fipr(d[8], d[9], d[10], d[11], v1[512], v2[512], v1[640], v2[640]);
    u += pl_fipr(d[12], d[13], d[14], d[15], v1[768], v2[768], v1[896], v2[896]);
    d += 32;
    v1++;
    v2++;
    *out++ = (short)((int)u >> 16);
}

Which... you'd think would be preeeetty efficient, right? 4 back-to-back FIPRs? I mean, it is hella gainzy compared to not using FIPR.

But there are two problems with back-to-back FIPR-y, I wanna teach anyone interested:

1) Very often one of the vector arguments stays constant between FIPR calls, but unfortunately the compiler is too dumb to not reload all 8 registers between calls regardless.

  • LUCKILY every argument to these FIPRs is unique so this is not applicable, but... very often that's a perf destroyer.

2) THE COMPILER CANNOT PIPELINE FIPR FOR SHIT.

  • VERY applicable here. You know what the ASM looks like for these FIPR calls? Something like this:
! load first vector arg into fv0 (nothing wrong with this)
fmov.s @%[d]+, fr0
fmov.s @%[d}+, fr1
fmov.s @%[d]+, fr2
fmov.s @%[d]+, fr3

! load second vector arg into fv4 (nothing wrong with this)
fmov.s @%[v1], fr4
add    %[offset], @[v1]
fmov.s @%[v2], fr5
add    %[offset], @[v2]
fmov.s @%[v1], fr6
fmov.s @%[v2], fr7

! issue actual FIPR calculation
fipr fv0, fv4

! VERY NEXT INSTRUCTION TRY TO STORE THE RESULT
fmov.s fr7, @%[result] ! PIPELINE STALL!!!!

Now this is very very bad. FIPR has 4-5 cycles of latency, so every fucking call to FIPR, since the very next instruction tries to use the result before its been calculated, the entire pipeline must stall waiting for the result... FOR EVERY FIPR CALL. So you're losing MASSIVE perf benefits there. The solution? You have to pipeline your FIPRs so that while the previous FIPR call is still calculating, you're loading up and issuing the next FIPR call.

So I wrote a new routine that replaces that inner loop body doing manually pipelined FIPR calls... This should be way better:

for (int i = 32; i; --i) {
#if 0 // Old FIPR path which didn't pipeline for shit.
    float u;
    u = pl_fipr(d[0], d[1], d[2], d[3], v1[0], v2[0], v1[128], v2[128]);
    u += pl_fipr(d[4], d[5], d[6], d[7], v1[256], v2[256], v1[384], v2[384]);
    u += pl_fipr(d[8], d[9], d[10], d[11], v1[512], v2[512], v1[640], v2[640]);
    u += pl_fipr(d[12], d[13], d[14], d[15], v1[768], v2[768], v1[896], v2[896]);
#else // New hand-written FIPR path with manual pipelining
    float u = shz_pl_inner_loop(d, v1, v2);
#endif
    d += 32;
    v1++;
    v2++;
    *out++ = (short)((int)u >> 16);
}

Where the new implementation is this inline ASM:

__always_inline 
float shz_pl_inner_loop(const float *d, const float *v1, const float *v2) {
    float fp_scratch[2];
    uint32_t int_scratch;

    asm volatile(R"(
        ! Swap to back-bank so we don't need to clobber any FP regs.
        frchg

        ! Load first vector into fv0 for first FIPR.
        xor     %[s], %[s]
        fmov.s  @%[d]+, fr0
        add     #64, %[s]
        fmov.s  @%[d]+, fr1
        add     #64, %[s]
        fmov.s  @%[d]+, fr2
        add     #16, %[r]
        fmov.s  @%[d]+, fr3

        ! Load second vector into fv4 for first FIPR
        fmov.s  @%[v1], fr4
        add     %[s], %[v1]
        fmov.s  @%[v2], fr5
        add     %[s], %[v2]
        fmov.s  @%[v1], fr6
        add     %[s], %[v1]
        fmov.s  @%[v2], fr7
        add     %[s], %[v2]

        ! Issue first FIPR
        fipr    fv0, fv4
        ! DO NOT SAVE THE RESULT YET

        ! Load first vector into fv8 for second FIPR.
        fmov.s  @%[d]+, fr8
        fmov.s  @%[d]+, fr9
        fmov.s  @%[d]+, fr10
        fmov.s  @%[d]+, fr11

        ! Load second vector into fv12 for second FIPR.
        fmov.s  @%[v1], fr12
        add     %[s], %[v1]
        fmov.s  @%[v2], fr13
        add     %[s], %[v2]
        fmov.s  @%[v1], fr14
        add     %[s], %[v1]
        fmov.s  @%[v2], fr15
        add     %[s], %[v2]

        ! Issue second FIPR
        fipr    fv8, fv12
        ! Store result from FIRST FIPR now that it's ready
        fmov.s  fr7, @-%[r]

        ! Load first vector into fv0 for third FIPR
        fmov.s  @%[d]+, fr0
        fmov.s  @%[d]+, fr1
        fmov.s  @%[d]+, fr2
        fmov.s  @%[d]+, fr3

        ! Load second vector into fv4 for third FIPR
        fmov.s  @%[v1], fr4
        add     %[s], %[v1]
        fmov.s  @%[v2], fr5
        add     %[s], %[v2]
        fmov.s  @%[v1], fr6
        add     %[s], %[v1]
        fmov.s  @%[v2], fr7
        add     %[s], %[v2]
        
        ! Issue third FIPR
        fipr    fv0, fv4
        ! Store result from SECOND FIPR now that it's ready.
        fmov.s  fr15, @-%[r]

        ! Load first vector into fv8 for fourth FIPR
        fmov.s  @%[d]+, fr8
        fmov.s  @%[d]+, fr9
        fmov.s  @%[d]+, fr10
        fmov.s  @%[d]+, fr11

        ! Load second vector into fv12 for fourth FIPR
        fmov.s  @%[v1], fr12
        add     %[s], %[v1]
        fmov.s  @%[v2], fr13
        add     %[s], %[v2]
        fmov.s  @%[v1], fr14
        fmov.s  @%[v2], fr15

        ! Issue fourth FIPR
        fipr    fv8, fv12

        ! Add up results from previous FIPRs while we wait
        fmov.s  @%[r]+, fr0
        fmov.s  @%[r]+, fr1
        fadd    fr1, fr0
        fadd    fr7, fr0
        add     #-8, %[r]

        ! Add result from fourth FIPR now that it's ready
        fadd    fr15, fr0

        ! Store final result
        fmov.s  fr0, @%[r]

        ! Swap back to primary FP register bank
        frchg
    )"
    : [d] "+&r" (d), [v1] "+r" (v1), [v2] "+r" (v2),
      [r] "+r" (fp_scratch), [s] "=&r" (int_scratch),
      "=m" (*fp_scratch));

    return fp_scratch[0];
}