From ccde1257952d2c073e51ecba6180060570ffa41f Mon Sep 17 00:00:00 2001 From: Martin Storsjo Date: Fri, 13 Apr 2012 14:16:54 +0300 Subject: [PATCH] avcenc: Properly indent assembly blocks Also line break multiline assembly blocks - previously they were virtually unreadable. Change-Id: Icb269909b78746e26b28ab7dcb6979c4655a0b0c --- .../codecs/avc/enc/src/sad_halfpel_inline.h | 16 +++++- .../libstagefright/codecs/avc/enc/src/sad_inline.h | 50 +++++++++++++++--- .../codecs/avc/enc/src/sad_mb_offset.h | 60 ++++++++++++++++++---- 3 files changed, 108 insertions(+), 18 deletions(-) diff --git a/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h b/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h index bb4a5108c1..4eb9b004e9 100644 --- a/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h +++ b/media/libstagefright/codecs/avc/enc/src/sad_halfpel_inline.h @@ -77,14 +77,26 @@ extern "C" __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { -__asm__ volatile("rsbs %1, %1, %2, asr #1\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad), "=r"(tmp): "r"(tmp2)); + __asm__ volatile( + "rsbs %1, %1, %2, asr #1\n\t" + "rsbmi %1, %1, #0\n\t" + "add %0, %0, %1" + : "=r"(sad), "=r"(tmp) + : "r"(tmp2) + ); return sad; } __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { -__asm__ volatile("rsbs %1, %2, %1, asr #2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad), "=r"(tmp): "r"(tmp2)); + __asm__ volatile( + "rsbs %1, %2, %1, asr #2\n\t" + "rsbmi %1, %1, #0\n\t" + "add %0, %0, %1" + : "=r"(sad), "=r"(tmp) + : "r"(tmp2) + ); return sad; } diff --git a/media/libstagefright/codecs/avc/enc/src/sad_inline.h b/media/libstagefright/codecs/avc/enc/src/sad_inline.h index f6c3554434..6695d630fa 100644 --- a/media/libstagefright/codecs/avc/enc/src/sad_inline.h +++ b/media/libstagefright/codecs/avc/enc/src/sad_inline.h @@ -343,7 +343,13 @@ SadMBOffset1: __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2) { -__asm__ volatile("rsbs %1, %1, %2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(sad): "r"(tmp), "r"(tmp2)); + __asm__ volatile( + "rsbs %1, %1, %2\n\t" + "rsbmi %1, %1, #0\n\t" + "add %0, %0, %1" + : "=r"(sad) + : "r"(tmp), "r"(tmp2) + ); return sad; } @@ -351,7 +357,18 @@ __asm__ volatile("rsbs %1, %1, %2\n\trsbmi %1, %1, #0\n\tadd %0, %0, %1": "=r"(s { int32 x7; -__asm__ volatile("EOR %1, %2, %0\n\tSUBS %0, %2, %0\n\tEOR %1, %1, %0\n\tAND %1, %3, %1, lsr #1\n\tORRCC %1, %1, #0x80000000\n\tRSB %1, %1, %1, lsl #8\n\tADD %0, %0, %1, asr #7\n\tEOR %0, %0, %1, asr #7": "=r"(src1), "=&r"(x7): "r"(src2), "r"(mask)); + __asm__ volatile( + "EOR %1, %2, %0\n\t" + "SUBS %0, %2, %0\n\t" + "EOR %1, %1, %0\n\t" + "AND %1, %3, %1, lsr #1\n\t" + "ORRCC %1, %1, #0x80000000\n\t" + "RSB %1, %1, %1, lsl #8\n\t" + "ADD %0, %0, %1, asr #7\n\t" + "EOR %0, %0, %1, asr #7" + : "=r"(src1), "=&r"(x7) + : "r"(src2), "r"(mask) + ); return src1; } @@ -360,12 +377,31 @@ __asm__ volatile("EOR %1, %2, %0\n\tSUBS %0, %2, %0\n\tEOR %1, %1, %0\n\tAND % { int32 x7; -__asm__ volatile("EOR %1, %2, %0\n\tADDS %0, %2, %0\n\tEOR %1, %1, %0\n\tANDS %1, %3, %1, rrx\n\tRSB %1, %1, %1, lsl #8\n\tSUB %0, %0, %1, asr #7\n\tEOR %0, %0, %1, asr #7": "=r"(src1), "=&r"(x7): "r"(src2), "r"(mask)); + __asm__ volatile( + "EOR %1, %2, %0\n\t" + "ADDS %0, %2, %0\n\t" + "EOR %1, %1, %0\n\t" + "ANDS %1, %3, %1, rrx\n\t" + "RSB %1, %1, %1, lsl #8\n\t" + "SUB %0, %0, %1, asr #7\n\t" + "EOR %0, %0, %1, asr #7" + : "=r"(src1), "=&r"(x7) + : "r"(src2), "r"(mask) + ); return src1; } -#define sum_accumulate __asm__ volatile("SBC %0, %0, %1\n\tBIC %1, %4, %1\n\tADD %2, %2, %1, lsr #8\n\tSBC %0, %0, %3\n\tBIC %3, %4, %3\n\tADD %2, %2, %3, lsr #8": "=&r" (x5), "=&r" (x10), "=&r" (x4), "=&r" (x11): "r" (x6)); +#define sum_accumulate __asm__ volatile( \ + "SBC %0, %0, %1\n\t" \ + "BIC %1, %4, %1\n\t" \ + "ADD %2, %2, %1, lsr #8\n\t" \ + "SBC %0, %0, %3\n\t" \ + "BIC %3, %4, %3\n\t" \ + "ADD %2, %2, %3, lsr #8" \ + : "=&r" (x5), "=&r" (x10), "=&r" (x4), "=&r" (x11) \ + : "r" (x6) \ + ); #define NUMBER 3 #define SHIFT 24 @@ -407,7 +443,7 @@ __asm__ volatile("EOR %1, %2, %0\n\tADDS %0, %2, %0\n\tEOR %1, %1, %0\n\tANDS x8 = 16; /// -__asm__ volatile("MVN %0, #0xFF00": "=r"(x6)); + __asm__ volatile("MVN %0, #0xFF00": "=r"(x6)); LOOP_SAD0: /****** process 8 pixels ******/ @@ -431,10 +467,10 @@ LOOP_SAD0: /****** process 8 pixels ******/ x11 = *((int32*)(ref + 4)); -__asm__ volatile("LDR %0, [%1], %2": "=&r"(x10), "=r"(ref): "r"(lx)); + __asm__ volatile("LDR %0, [%1], %2": "=&r"(x10), "=r"(ref): "r"(lx)); //x10 = *((int32*)ref); ref+=lx; x14 = *((int32*)(blk + 4)); -__asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk)); + __asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk)); /* process x11 & x14 */ x11 = sad_4pixel(x11, x14, x9); diff --git a/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h b/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h index 8a7fe22ac8..0165360249 100644 --- a/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h +++ b/media/libstagefright/codecs/avc/enc/src/sad_mb_offset.h @@ -230,7 +230,7 @@ __inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin) x4 = x5 = 0; x8 = 16; //<<===========******* -__asm__ volatile("MVN %0, #0xFF0000": "=r"(x6)); + __asm__ volatile("MVN %0, #0xFF0000": "=r"(x6)); #if (NUMBER==3) LOOP_SAD3: @@ -239,7 +239,7 @@ LOOP_SAD2: #elif (NUMBER==1) LOOP_SAD1: #endif -__asm__ volatile("BIC %0, %0, #3": "=r"(ref)); + __asm__ volatile("BIC %0, %0, #3": "=r"(ref)); /****** process 8 pixels ******/ x11 = *((int32*)(ref + 12)); x12 = *((int32*)(ref + 16)); @@ -247,11 +247,32 @@ __asm__ volatile("BIC %0, %0, #3": "=r"(ref)); x14 = *((int32*)(blk + 12)); #if (SHIFT==8) -__asm__ volatile("MVN %0, %0, lsr #8\n\tBIC %0, %0, %1,lsl #24\n\tMVN %1, %1,lsr #8\n\tBIC %1, %1, %2,lsl #24": "=&r"(x10), "=&r"(x11): "r"(x12)); + __asm__ volatile( + "MVN %0, %0, lsr #8\n\t" + "BIC %0, %0, %1, lsl #24\n\t" + "MVN %1, %1, lsr #8\n\t" + "BIC %1, %1, %2, lsl #24" + : "=&r"(x10), "=&r"(x11) + : "r"(x12) + ); #elif (SHIFT==16) -__asm__ volatile("MVN %0, %0, lsr #16\n\tBIC %0, %0, %1,lsl #16\n\tMVN %1, %1,lsr #16\n\tBIC %1, %1, %2,lsl #16": "=&r"(x10), "=&r"(x11): "r"(x12)); + __asm__ volatile( + "MVN %0, %0, lsr #16\n\t" + "BIC %0, %0, %1, lsl #16\n\t" + "MVN %1, %1, lsr #16\n\t" + "BIC %1, %1, %2, lsl #16" + : "=&r"(x10), "=&r"(x11) + : "r"(x12) + ); #elif (SHIFT==24) -__asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1, %1,lsr #24\n\tBIC %1, %1, %2,lsl #8": "=&r"(x10), "=&r"(x11): "r"(x12)); + __asm__ volatile( + "MVN %0, %0, lsr #24\n\t" + "BIC %0, %0, %1, lsl #8\n\t" + "MVN %1, %1, lsr #24\n\t" + "BIC %1, %1, %2, lsl #8" + : "=&r"(x10), "=&r"(x11) + : "r"(x12) + ); #endif x12 = *((int32*)(blk + 8)); @@ -271,13 +292,34 @@ __asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1, x14 = *((int32*)(blk + 4)); #if (SHIFT==8) -__asm__ volatile("MVN %0, %0, lsr #8\n\tBIC %0, %0, %1,lsl #24\n\tMVN %1, %1,lsr #8\n\tBIC %1, %1, %2,lsl #24": "=&r"(x10), "=&r"(x11): "r"(x12)); + __asm__ volatile( + "MVN %0, %0, lsr #8\n\t" + "BIC %0, %0, %1, lsl #24\n\t" + "MVN %1, %1, lsr #8\n\t" + "BIC %1, %1, %2, lsl #24" + : "=&r"(x10), "=&r"(x11) + : "r"(x12) + ); #elif (SHIFT==16) -__asm__ volatile("MVN %0, %0, lsr #16\n\tBIC %0, %0, %1,lsl #16\n\tMVN %1, %1,lsr #16\n\tBIC %1, %1, %2,lsl #16": "=&r"(x10), "=&r"(x11): "r"(x12)); + __asm__ volatile( + "MVN %0, %0, lsr #16\n\t" + "BIC %0, %0, %1, lsl #16\n\t" + "MVN %1, %1, lsr #16\n\t" + "BIC %1, %1, %2, lsl #16" + : "=&r"(x10), "=&r"(x11) + : "r"(x12) + ); #elif (SHIFT==24) -__asm__ volatile("MVN %0, %0, lsr #24\n\tBIC %0, %0, %1,lsl #8\n\tMVN %1, %1,lsr #24\n\tBIC %1, %1, %2,lsl #8": "=&r"(x10), "=&r"(x11): "r"(x12)); + __asm__ volatile( + "MVN %0, %0, lsr #24\n\t" + "BIC %0, %0, %1, lsl #8\n\t" + "MVN %1, %1, lsr #24\n\t" + "BIC %1, %1, %2, lsl #8" + : "=&r"(x10), "=&r"(x11) + : "r"(x12) + ); #endif -__asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk)); + __asm__ volatile("LDR %0, [%1], #16": "=&r"(x12), "=r"(blk)); /* process x11 & x14 */ x11 = sad_4pixelN(x11, x14, x9); -- 2.11.0