]> rtime.felk.cvut.cz Git - frescor/ffmpeg.git/commitdiff
ARM: Use fewer register in NEON put_pixels _y2 and _xy2
authorconrad <conrad@9553f0bf-9b14-0410-a0b8-cfaf0461ba5b>
Wed, 29 Apr 2009 11:38:09 +0000 (11:38 +0000)
committerconrad <conrad@9553f0bf-9b14-0410-a0b8-cfaf0461ba5b>
Wed, 29 Apr 2009 11:38:09 +0000 (11:38 +0000)
Approved by Mans on IRC

git-svn-id: file:///var/local/repositories/ffmpeg/trunk@18713 9553f0bf-9b14-0410-a0b8-cfaf0461ba5b

libavcodec/arm/dsputil_neon_s.S

index a55e05f3dc4d2fd20221743d64e2520808ccb2d7..303b11c089fd8c8d8067538050c98c180a9d182e 100644 (file)
         .endm
 
         .macro pixels16_y2 vhadd=vrhadd.u8
-        push            {lr}
-        add             ip,  r1,  r2
-        lsl             lr,  r2,  #1
-        vld1.64         {d0, d1},  [r1], lr
-        vld1.64         {d2, d3},  [ip], lr
+        vld1.64         {d0, d1},  [r1], r2
+        vld1.64         {d2, d3},  [r1], r2
 1:      subs            r3,  r3,  #2
         \vhadd          q2,  q0,  q1
-        vld1.64         {d0, d1},  [r1],      lr
+        vld1.64         {d0, d1},  [r1], r2
         \vhadd          q3,  q0,  q1
-        vld1.64         {d2, d3},  [ip],      lr
+        vld1.64         {d2, d3},  [r1], r2
         pld             [r1]
-        pld             [ip]
+        pld             [r1, r2]
         vst1.64         {d4, d5},  [r0,:128], r2
         vst1.64         {d6, d7},  [r0,:128], r2
         bne             1b
-        pop             {pc}
+        bx              lr
         .endm
 
         .macro pixels16_xy2 vshrn=vrshrn.u16 no_rnd=0
-        push            {lr}
-        lsl             lr,  r2,  #1
-        add             ip,  r1,  r2
-        vld1.64         {d0-d2},   [r1], lr
-        vld1.64         {d4-d6},   [ip], lr
+        vld1.64         {d0-d2},   [r1], r2
+        vld1.64         {d4-d6},   [r1], r2
 .if \no_rnd
         vmov.i16        q13, #1
 .endif
         pld             [r1]
-        pld             [ip]
+        pld             [r1, r2]
         vext.8          q1,  q0,  q1,  #1
         vext.8          q3,  q2,  q3,  #1
         vaddl.u8        q8,  d0,  d2
         vaddl.u8        q9,  d4,  d6
         vaddl.u8        q11, d5,  d7
 1:      subs            r3,  r3,  #2
-        vld1.64         {d0-d2},   [r1], lr
+        vld1.64         {d0-d2},   [r1], r2
         vadd.u16        q12, q8,  q9
         pld             [r1]
 .if \no_rnd
 .endif
         \vshrn          d29, q1,  #2
         vaddl.u8        q8,  d0,  d30
-        vld1.64         {d2-d4},   [ip], lr
+        vld1.64         {d2-d4},   [r1], r2
         vaddl.u8        q10, d1,  d31
         vst1.64         {d28,d29}, [r0,:128], r2
         vadd.u16        q12, q8,  q9
-        pld             [ip]
+        pld             [r1, r2]
 .if \no_rnd
         vadd.u16        q12, q12, q13
 .endif
         vaddl.u8        q11, d3,  d5
         vst1.64         {d30,d31}, [r0,:128], r2
         bgt             1b
-        pop             {pc}
+        bx              lr
         .endm
 
         .macro pixels8
         .endm
 
         .macro pixels8_y2 vhadd=vrhadd.u8
-        push            {lr}
-        add             ip,  r1,  r2
-        lsl             lr,  r2,  #1
-        vld1.64         {d0},      [r1], lr
-        vld1.64         {d1},      [ip], lr
+        vld1.64         {d0},      [r1], r2
+        vld1.64         {d1},      [r1], r2
 1:      subs            r3,  r3,  #2
         \vhadd          d4,  d0,  d1
-        vld1.64         {d0},      [r1],     lr
+        vld1.64         {d0},      [r1], r2
         \vhadd          d5,  d0,  d1
-        vld1.64         {d1},      [ip],     lr
+        vld1.64         {d1},      [r1], r2
         pld             [r1]
-        pld             [ip]
+        pld             [r1, r2]
         vst1.64         {d4},      [r0,:64], r2
         vst1.64         {d5},      [r0,:64], r2
         bne             1b
-        pop             {pc}
+        bx              lr
         .endm
 
         .macro pixels8_xy2 vshrn=vrshrn.u16 no_rnd=0
-        push            {lr}
-        lsl             lr,  r2,  #1
-        add             ip,  r1,  r2
-        vld1.64         {d0, d1},  [r1], lr
-        vld1.64         {d2, d3},  [ip], lr
+        vld1.64         {d0, d1},  [r1], r2
+        vld1.64         {d2, d3},  [r1], r2
 .if \no_rnd
         vmov.i16        q11, #1
 .endif
         pld             [r1]
-        pld             [ip]
+        pld             [r1, r2]
         vext.8          d4,  d0,  d1,  #1
         vext.8          d6,  d2,  d3,  #1
         vaddl.u8        q8,  d0,  d4
         vaddl.u8        q9,  d2,  d6
 1:      subs            r3,  r3,  #2
-        vld1.64         {d0, d1},  [r1], lr
+        vld1.64         {d0, d1},  [r1], r2
         pld             [r1]
         vadd.u16        q10, q8,  q9
         vext.8          d4,  d0,  d1,  #1
 .endif
         vaddl.u8        q8,  d0,  d4
         \vshrn          d5,  q10, #2
-        vld1.64         {d2, d3},  [ip], lr
+        vld1.64         {d2, d3},  [r1], r2
         vadd.u16        q10, q8,  q9
-        pld             [ip]
+        pld             [r1, r2]
 .if \no_rnd
         vadd.u16        q10, q10, q11
 .endif
         vaddl.u8        q9,  d2,  d6
         vst1.64         {d7},      [r0,:64], r2
         bgt             1b
-        pop             {pc}
+        bx              lr
         .endm
 
         .macro pixfunc pfx name suf rnd_op args:vararg