#include "swscale.h"
#include "swscale_internal.h"
- DECLARE_ALIGNED(8, const uint8_t, dither_2x2_4)[][8]={
-DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_4)[2][8]={
++DECLARE_ALIGNED(8, const uint8_t, ff_dither_2x2_4)[][8] = {
{ 1, 3, 1, 3, 1, 3, 1, 3, },
{ 2, 0, 2, 0, 2, 0, 2, 0, },
+{ 1, 3, 1, 3, 1, 3, 1, 3, },
};
- DECLARE_ALIGNED(8, const uint8_t, dither_2x2_8)[][8]={
-DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_8)[2][8]={
++DECLARE_ALIGNED(8, const uint8_t, ff_dither_2x2_8)[][8] = {
{ 6, 2, 6, 2, 6, 2, 6, 2, },
{ 0, 4, 0, 4, 0, 4, 0, 4, },
+{ 6, 2, 6, 2, 6, 2, 6, 2, },
};
- DECLARE_ALIGNED(8, const uint8_t, dither_4x4_16)[][8]={
-DECLARE_ALIGNED(8, const uint8_t, ff_dither_4x4_16)[4][8] = {
++DECLARE_ALIGNED(8, const uint8_t, ff_dither_4x4_16)[][8] = {
{ 8, 4, 11, 7, 8, 4, 11, 7, },
{ 2, 14, 1, 13, 2, 14, 1, 13, },
{ 10, 6, 9, 5, 10, 6, 9, 5, },
{ 0, 12, 3, 15, 0, 12, 3, 15, },
+{ 8, 4, 11, 7, 8, 4, 11, 7, },
};
- DECLARE_ALIGNED(8, const uint8_t, dither_8x8_32)[][8]={
-DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_32)[8][8] = {
++DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_32)[][8] = {
{ 17, 9, 23, 15, 16, 8, 22, 14, },
{ 5, 29, 3, 27, 4, 28, 2, 26, },
{ 21, 13, 19, 11, 20, 12, 18, 10, },
{ 4, 28, 2, 26, 5, 29, 3, 27, },
{ 20, 12, 18, 10, 21, 13, 19, 11, },
{ 1, 25, 7, 31, 0, 24, 6, 30, },
+{ 17, 9, 23, 15, 16, 8, 22, 14, },
};
- DECLARE_ALIGNED(8, const uint8_t, dither_8x8_73)[][8]={
-DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_73)[8][8] = {
++DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_73)[][8] = {
{ 0, 55, 14, 68, 3, 58, 17, 72, },
{ 37, 18, 50, 32, 40, 22, 54, 35, },
{ 9, 64, 5, 59, 13, 67, 8, 63, },
};
#if 1
- DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[][8]={
-DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[8][8] = {
++DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
{117, 62, 158, 103, 113, 58, 155, 100, },
{ 34, 199, 21, 186, 31, 196, 17, 182, },
{144, 89, 131, 76, 141, 86, 127, 72, },
};
#elif 1
// tries to correct a gamma of 1.5
- DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[][8]={
-DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[8][8] = {
++DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
{ 0, 143, 18, 200, 2, 156, 25, 215, },
{ 78, 28, 125, 64, 89, 36, 138, 74, },
{ 10, 180, 3, 161, 16, 195, 8, 175, },
};
#elif 1
// tries to correct a gamma of 2.0
- DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[][8]={
-DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[8][8] = {
++DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
{ 0, 124, 8, 193, 0, 140, 12, 213, },
{ 55, 14, 104, 42, 66, 19, 119, 52, },
{ 3, 168, 1, 145, 6, 187, 3, 162, },
};
#else
// tries to correct a gamma of 2.5
- DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[][8]={
-DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[8][8] = {
++DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
{ 0, 107, 3, 187, 0, 125, 6, 212, },
{ 39, 7, 86, 28, 49, 11, 102, 36, },
{ 1, 158, 0, 131, 3, 180, 1, 151, },
const int16_t **alpSrc, uint8_t *dest, int dstW,
int y, enum AVPixelFormat target)
{
- const uint8_t * const d128=dither_8x8_220[y&7];
+ const uint8_t * const d128 = ff_dither_8x8_220[y&7];
int i;
unsigned acc = 0;
+ int err = 0;
for (i = 0; i < dstW; i += 2) {
int j;
const int16_t *abuf0, uint8_t *dest, int dstW,
int uvalpha, int y, enum AVPixelFormat target)
{
- const uint8_t * const d128 = dither_8x8_220[y & 7];
+ const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
int i;
+ if (c->dither == SWS_DITHER_ED) {
+ int err = 0;
+ int acc = 0;
+ for (i = 0; i < dstW; i +=2) {
+ int Y;
+
+ Y = ((buf0[i + 0] + 64) >> 7);
+ Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
+ c->dither_error[0][i] = err;
+ acc = 2*acc + (Y >= 128);
+ Y -= 220*(acc&1);
+
+ err = ((buf0[i + 1] + 64) >> 7);
+ err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
+ c->dither_error[0][i+1] = Y;
+ acc = 2*acc + (err >= 128);
+ err -= 220*(acc&1);
+
+ if ((i & 7) == 6)
+ output_pixel(*dest++, acc);
+ }
+ c->dither_error[0][i] = err;
+ } else {
for (i = 0; i < dstW; i += 8) {
int acc = 0;
-
- accumulate_bit(acc, (buf0[i + 0] >> 7) + d128[0]);
- accumulate_bit(acc, (buf0[i + 1] >> 7) + d128[1]);
- accumulate_bit(acc, (buf0[i + 2] >> 7) + d128[2]);
- accumulate_bit(acc, (buf0[i + 3] >> 7) + d128[3]);
- accumulate_bit(acc, (buf0[i + 4] >> 7) + d128[4]);
- accumulate_bit(acc, (buf0[i + 5] >> 7) + d128[5]);
- accumulate_bit(acc, (buf0[i + 6] >> 7) + d128[6]);
- accumulate_bit(acc, (buf0[i + 7] >> 7) + d128[7]);
+ accumulate_bit(acc, ((buf0[i + 0] + 64) >> 7) + d128[0]);
+ accumulate_bit(acc, ((buf0[i + 1] + 64) >> 7) + d128[1]);
+ accumulate_bit(acc, ((buf0[i + 2] + 64) >> 7) + d128[2]);
+ accumulate_bit(acc, ((buf0[i + 3] + 64) >> 7) + d128[3]);
+ accumulate_bit(acc, ((buf0[i + 4] + 64) >> 7) + d128[4]);
+ accumulate_bit(acc, ((buf0[i + 5] + 64) >> 7) + d128[5]);
+ accumulate_bit(acc, ((buf0[i + 6] + 64) >> 7) + d128[6]);
+ accumulate_bit(acc, ((buf0[i + 7] + 64) >> 7) + d128[7]);
output_pixel(*dest++, acc);
}
int dr1, dg1, db1, dr2, dg2, db2;
if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565) {
-- dr1 = dither_2x2_8[ y & 1 ][0];
-- dg1 = dither_2x2_4[ y & 1 ][0];
-- db1 = dither_2x2_8[(y & 1) ^ 1][0];
-- dr2 = dither_2x2_8[ y & 1 ][1];
-- dg2 = dither_2x2_4[ y & 1 ][1];
-- db2 = dither_2x2_8[(y & 1) ^ 1][1];
++ dr1 = ff_dither_2x2_8[ y & 1 ][0];
++ dg1 = ff_dither_2x2_4[ y & 1 ][0];
++ db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
++ dr2 = ff_dither_2x2_8[ y & 1 ][1];
++ dg2 = ff_dither_2x2_4[ y & 1 ][1];
++ db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
} else if (target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555) {
-- dr1 = dither_2x2_8[ y & 1 ][0];
-- dg1 = dither_2x2_8[ y & 1 ][1];
-- db1 = dither_2x2_8[(y & 1) ^ 1][0];
-- dr2 = dither_2x2_8[ y & 1 ][1];
-- dg2 = dither_2x2_8[ y & 1 ][0];
-- db2 = dither_2x2_8[(y & 1) ^ 1][1];
++ dr1 = ff_dither_2x2_8[ y & 1 ][0];
++ dg1 = ff_dither_2x2_8[ y & 1 ][1];
++ db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
++ dr2 = ff_dither_2x2_8[ y & 1 ][1];
++ dg2 = ff_dither_2x2_8[ y & 1 ][0];
++ db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
} else {
- dr1 = dither_4x4_16[ y & 3 ][0];
- dg1 = dither_4x4_16[ y & 3 ][1];
- db1 = dither_4x4_16[(y & 3) ^ 3][0];
- dr2 = dither_4x4_16[ y & 3 ][1];
- dg2 = dither_4x4_16[ y & 3 ][0];
- db2 = dither_4x4_16[(y & 3) ^ 3][1];
+ dr1 = ff_dither_4x4_16[ y & 3 ][0];
+ dg1 = ff_dither_4x4_16[ y & 3 ][1];
+ db1 = ff_dither_4x4_16[(y & 3) ^ 3][0];
+ dr2 = ff_dither_4x4_16[ y & 3 ][1];
+ dg2 = ff_dither_4x4_16[ y & 3 ][0];
+ db2 = ff_dither_4x4_16[(y & 3) ^ 3][1];
}
dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
#include "rgb2rgb.h"
#include "swscale.h"
#include "swscale_internal.h"
+#include "libavutil/pixdesc.h"
- extern const uint8_t dither_2x2_4[3][8];
- extern const uint8_t dither_2x2_8[3][8];
- extern const uint8_t dither_4x4_16[5][8];
- extern const uint8_t dither_8x8_32[9][8];
- extern const uint8_t dither_8x8_73[9][8];
- extern const uint8_t dither_8x8_220[9][8];
-
const int32_t ff_yuv2rgb_coeffs[8][4] = {
{ 117504, 138453, 13954, 34903 }, /* no sequence_display_extension */
{ 117504, 138453, 13954, 34903 }, /* ITU-R Rec. 709 (1990) */
PUTBGR24(dst_2, py_2, 0);
ENDYUV2RGBFUNC()
-// This is exactly the same code as yuv2rgb_c_32 except for the types of
-// r, g, b, dst_1, dst_2
-YUV2RGBFUNC(yuv2rgb_c_16, uint16_t, 0)
+YUV2RGBFUNC(yuv2rgb_c_16_ordered_dither, uint16_t, 0)
- const uint8_t *d16 = dither_2x2_8[y & 1];
- const uint8_t *e16 = dither_2x2_4[y & 1];
- const uint8_t *f16 = dither_2x2_8[(y & 1)^1];
++ const uint8_t *d16 = ff_dither_2x2_8[y & 1];
++ const uint8_t *e16 = ff_dither_2x2_4[y & 1];
++ const uint8_t *f16 = ff_dither_2x2_8[(y & 1)^1];
+
+#define PUTRGB16(dst, src, i, o) \
+ Y = src[2 * i]; \
+ dst[2 * i] = r[Y + d16[0 + o]] + \
+ g[Y + e16[0 + o]] + \
+ b[Y + f16[0 + o]]; \
+ Y = src[2 * i + 1]; \
+ dst[2 * i + 1] = r[Y + d16[1 + o]] + \
+ g[Y + e16[1 + o]] + \
+ b[Y + f16[1 + o]];
LOADCHROMA(0);
- PUTRGB(dst_1, py_1, 0);
- PUTRGB(dst_2, py_2, 0);
+ PUTRGB16(dst_1, py_1, 0, 0);
+ PUTRGB16(dst_2, py_2, 0, 0 + 8);
LOADCHROMA(1);
- PUTRGB(dst_2, py_2, 1);
- PUTRGB(dst_1, py_1, 1);
+ PUTRGB16(dst_2, py_2, 1, 2 + 8);
+ PUTRGB16(dst_1, py_1, 1, 2);
LOADCHROMA(2);
- PUTRGB(dst_1, py_1, 2);
- PUTRGB(dst_2, py_2, 2);
+ PUTRGB16(dst_1, py_1, 2, 4);
+ PUTRGB16(dst_2, py_2, 2, 4 + 8);
LOADCHROMA(3);
- PUTRGB(dst_2, py_2, 3);
- PUTRGB(dst_1, py_1, 3);
+ PUTRGB16(dst_2, py_2, 3, 6 + 8);
+ PUTRGB16(dst_1, py_1, 3, 6);
+CLOSEYUV2RGBFUNC(8)
+
+YUV2RGBFUNC(yuv2rgb_c_15_ordered_dither, uint16_t, 0)
- const uint8_t *d16 = dither_2x2_8[y & 1];
- const uint8_t *e16 = dither_2x2_8[(y & 1)^1];
++ const uint8_t *d16 = ff_dither_2x2_8[y & 1];
++ const uint8_t *e16 = ff_dither_2x2_8[(y & 1)^1];
+
+#define PUTRGB15(dst, src, i, o) \
+ Y = src[2 * i]; \
+ dst[2 * i] = r[Y + d16[0 + o]] + \
+ g[Y + d16[1 + o]] + \
+ b[Y + e16[0 + o]]; \
+ Y = src[2 * i + 1]; \
+ dst[2 * i + 1] = r[Y + d16[1 + o]] + \
+ g[Y + d16[0 + o]] + \
+ b[Y + e16[1 + o]];
+ LOADCHROMA(0);
+ PUTRGB15(dst_1, py_1, 0, 0);
+ PUTRGB15(dst_2, py_2, 0, 0 + 8);
+
+ LOADCHROMA(1);
+ PUTRGB15(dst_2, py_2, 1, 2 + 8);
+ PUTRGB15(dst_1, py_1, 1, 2);
+
+ LOADCHROMA(2);
+ PUTRGB15(dst_1, py_1, 2, 4);
+ PUTRGB15(dst_2, py_2, 2, 4 + 8);
+
+ LOADCHROMA(3);
+ PUTRGB15(dst_2, py_2, 3, 6 + 8);
+ PUTRGB15(dst_1, py_1, 3, 6);
CLOSEYUV2RGBFUNC(8)
// r, g, b, dst_1, dst_2
CLOSEYUV2RGBFUNC(8)
YUV2RGBFUNC(yuv2rgb_c_1_ordered_dither, uint8_t, 0)
- const uint8_t *d128 = dither_8x8_220[y & 7];
+ const uint8_t *d128 = ff_dither_8x8_220[y & 7];
char out_1 = 0, out_2 = 0;
- g = c->table_gU[128] + c->table_gV[128];
+ g = c->table_gU[128 + YUVRGB_TABLE_HEADROOM] + c->table_gV[128 + YUVRGB_TABLE_HEADROOM];
#define PUTRGB1(out, src, i, o) \
Y = src[2 * i]; \