00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00031 #include "avcodec.h"
00032 #include "get_bits.h"
00033 #include "put_bits.h"
00034 #include "dsputil.h"
00035 #include "thread.h"
00036
00037 #define VLC_BITS 11
00038
00039 #if HAVE_BIGENDIAN
00040 #define B 3
00041 #define G 2
00042 #define R 1
00043 #define A 0
00044 #else
00045 #define B 0
00046 #define G 1
00047 #define R 2
00048 #define A 3
00049 #endif
00050
00051 typedef enum Predictor{
00052 LEFT= 0,
00053 PLANE,
00054 MEDIAN,
00055 } Predictor;
00056
00057 typedef struct HYuvContext{
00058 AVCodecContext *avctx;
00059 Predictor predictor;
00060 GetBitContext gb;
00061 PutBitContext pb;
00062 int interlaced;
00063 int decorrelate;
00064 int bitstream_bpp;
00065 int version;
00066 int yuy2;
00067 int bgr32;
00068 int width, height;
00069 int flags;
00070 int context;
00071 int picture_number;
00072 int last_slice_end;
00073 uint8_t *temp[3];
00074 uint64_t stats[3][256];
00075 uint8_t len[3][256];
00076 uint32_t bits[3][256];
00077 uint32_t pix_bgr_map[1<<VLC_BITS];
00078 VLC vlc[6];
00079 AVFrame picture;
00080 uint8_t *bitstream_buffer;
00081 unsigned int bitstream_buffer_size;
00082 DSPContext dsp;
00083 }HYuvContext;
00084
00085 #define classic_shift_luma_table_size 42
00086 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
00087 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
00088 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
00089 69,68, 0
00090 };
00091
00092 #define classic_shift_chroma_table_size 59
00093 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
00094 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
00095 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
00096 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
00097 };
00098
00099 static const unsigned char classic_add_luma[256] = {
00100 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
00101 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
00102 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
00103 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
00104 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
00105 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
00106 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
00107 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
00108 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
00109 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
00110 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
00111 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
00112 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
00113 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
00114 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
00115 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
00116 };
00117
00118 static const unsigned char classic_add_chroma[256] = {
00119 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
00120 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
00121 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
00122 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
00123 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
00124 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
00125 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
00126 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
00127 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
00128 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
00129 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
00130 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
00131 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
00132 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
00133 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
00134 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
00135 };
00136
00137 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
00138 int i;
00139 if(w<32){
00140 for(i=0; i<w; i++){
00141 const int temp= src[i];
00142 dst[i]= temp - left;
00143 left= temp;
00144 }
00145 return left;
00146 }else{
00147 for(i=0; i<16; i++){
00148 const int temp= src[i];
00149 dst[i]= temp - left;
00150 left= temp;
00151 }
00152 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
00153 return src[w-1];
00154 }
00155 }
00156
00157 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
00158 int i;
00159 int r,g,b;
00160 r= *red;
00161 g= *green;
00162 b= *blue;
00163 for(i=0; i<FFMIN(w,4); i++){
00164 const int rt= src[i*4+R];
00165 const int gt= src[i*4+G];
00166 const int bt= src[i*4+B];
00167 dst[i*4+R]= rt - r;
00168 dst[i*4+G]= gt - g;
00169 dst[i*4+B]= bt - b;
00170 r = rt;
00171 g = gt;
00172 b = bt;
00173 }
00174 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
00175 *red= src[(w-1)*4+R];
00176 *green= src[(w-1)*4+G];
00177 *blue= src[(w-1)*4+B];
00178 }
00179
00180 static int read_len_table(uint8_t *dst, GetBitContext *gb){
00181 int i, val, repeat;
00182
00183 for(i=0; i<256;){
00184 repeat= get_bits(gb, 3);
00185 val = get_bits(gb, 5);
00186 if(repeat==0)
00187 repeat= get_bits(gb, 8);
00188
00189 if(i+repeat > 256 || get_bits_left(gb) < 0) {
00190 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
00191 return -1;
00192 }
00193 while (repeat--)
00194 dst[i++] = val;
00195 }
00196 return 0;
00197 }
00198
00199 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
00200 int len, index;
00201 uint32_t bits=0;
00202
00203 for(len=32; len>0; len--){
00204 for(index=0; index<256; index++){
00205 if(len_table[index]==len)
00206 dst[index]= bits++;
00207 }
00208 if(bits & 1){
00209 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
00210 return -1;
00211 }
00212 bits >>= 1;
00213 }
00214 return 0;
00215 }
00216
00217 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00218 typedef struct {
00219 uint64_t val;
00220 int name;
00221 } HeapElem;
00222
00223 static void heap_sift(HeapElem *h, int root, int size)
00224 {
00225 while(root*2+1 < size) {
00226 int child = root*2+1;
00227 if(child < size-1 && h[child].val > h[child+1].val)
00228 child++;
00229 if(h[root].val > h[child].val) {
00230 FFSWAP(HeapElem, h[root], h[child]);
00231 root = child;
00232 } else
00233 break;
00234 }
00235 }
00236
00237 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
00238 HeapElem h[256];
00239 int up[2*256];
00240 int len[2*256];
00241 int offset, i, next;
00242 int size = 256;
00243
00244 for(offset=1; ; offset<<=1){
00245 for(i=0; i<size; i++){
00246 h[i].name = i;
00247 h[i].val = (stats[i] << 8) + offset;
00248 }
00249 for(i=size/2-1; i>=0; i--)
00250 heap_sift(h, i, size);
00251
00252 for(next=size; next<size*2-1; next++){
00253
00254 uint64_t min1v = h[0].val;
00255 up[h[0].name] = next;
00256 h[0].val = INT64_MAX;
00257 heap_sift(h, 0, size);
00258 up[h[0].name] = next;
00259 h[0].name = next;
00260 h[0].val += min1v;
00261 heap_sift(h, 0, size);
00262 }
00263
00264 len[2*size-2] = 0;
00265 for(i=2*size-3; i>=size; i--)
00266 len[i] = len[up[i]] + 1;
00267 for(i=0; i<size; i++) {
00268 dst[i] = len[up[i]] + 1;
00269 if(dst[i] >= 32) break;
00270 }
00271 if(i==size) break;
00272 }
00273 }
00274 #endif
00275
00276 static int generate_joint_tables(HYuvContext *s){
00277 uint16_t symbols[1<<VLC_BITS];
00278 uint16_t bits[1<<VLC_BITS];
00279 uint8_t len[1<<VLC_BITS];
00280 int ret;
00281
00282 if(s->bitstream_bpp < 24){
00283 int p, i, y, u;
00284 for(p=0; p<3; p++){
00285 for(i=y=0; y<256; y++){
00286 int len0 = s->len[0][y];
00287 int limit = VLC_BITS - len0;
00288 if(limit <= 0)
00289 continue;
00290 for(u=0; u<256; u++){
00291 int len1 = s->len[p][u];
00292 if(len1 > limit)
00293 continue;
00294 len[i] = len0 + len1;
00295 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
00296 symbols[i] = (y<<8) + u;
00297 if(symbols[i] != 0xffff)
00298 i++;
00299 }
00300 }
00301 ff_free_vlc(&s->vlc[3+p]);
00302 if ((ret = ff_init_vlc_sparse(&s->vlc[3 + p], VLC_BITS, i, len, 1, 1,
00303 bits, 2, 2, symbols, 2, 2, 0)) < 0)
00304 return ret;
00305 }
00306 }else{
00307 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
00308 int i, b, g, r, code;
00309 int p0 = s->decorrelate;
00310 int p1 = !s->decorrelate;
00311
00312
00313
00314 for(i=0, g=-16; g<16; g++){
00315 int len0 = s->len[p0][g&255];
00316 int limit0 = VLC_BITS - len0;
00317 if(limit0 < 2)
00318 continue;
00319 for(b=-16; b<16; b++){
00320 int len1 = s->len[p1][b&255];
00321 int limit1 = limit0 - len1;
00322 if(limit1 < 1)
00323 continue;
00324 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
00325 for(r=-16; r<16; r++){
00326 int len2 = s->len[2][r&255];
00327 if(len2 > limit1)
00328 continue;
00329 len[i] = len0 + len1 + len2;
00330 bits[i] = (code << len2) + s->bits[2][r&255];
00331 if(s->decorrelate){
00332 map[i][G] = g;
00333 map[i][B] = g+b;
00334 map[i][R] = g+r;
00335 }else{
00336 map[i][B] = g;
00337 map[i][G] = b;
00338 map[i][R] = r;
00339 }
00340 i++;
00341 }
00342 }
00343 }
00344 ff_free_vlc(&s->vlc[3]);
00345 if ((ret = init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1,
00346 bits, 2, 2, 0)) < 0)
00347 return ret;
00348 }
00349 return 0;
00350 }
00351
00352 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
00353 GetBitContext gb;
00354 int i, ret;
00355
00356 if ((ret = init_get_bits(&gb, src, length * 8)) < 0)
00357 return ret;
00358
00359 for(i=0; i<3; i++){
00360 if ((ret = read_len_table(s->len[i], &gb)) < 0)
00361 return ret;
00362 if ((ret = generate_bits_table(s->bits[i], s->len[i])) < 0)
00363 return ret;
00364 ff_free_vlc(&s->vlc[i]);
00365 if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
00366 s->bits[i], 4, 4, 0)) < 0)
00367 return ret;
00368 }
00369
00370 if ((ret = generate_joint_tables(s)) < 0)
00371 return ret;
00372
00373 return (get_bits_count(&gb)+7)/8;
00374 }
00375
00376 static int read_old_huffman_tables(HYuvContext *s){
00377 #if 1
00378 GetBitContext gb;
00379 int i, ret;
00380
00381 if ((ret = init_get_bits(&gb, classic_shift_luma,
00382 classic_shift_luma_table_size * 8)) < 0)
00383 return ret;
00384 if ((ret = read_len_table(s->len[0], &gb)) < 0)
00385 return ret;
00386 if ((ret = init_get_bits(&gb, classic_shift_chroma,
00387 classic_shift_chroma_table_size * 8)) < 0)
00388 return ret;
00389 if ((ret = read_len_table(s->len[1], &gb)) < 0)
00390 return ret;
00391
00392 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
00393 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
00394
00395 if(s->bitstream_bpp >= 24){
00396 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
00397 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
00398 }
00399 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
00400 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
00401
00402 for(i=0; i<3; i++){
00403 ff_free_vlc(&s->vlc[i]);
00404 if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
00405 s->bits[i], 4, 4, 0)) < 0)
00406 return ret;
00407 }
00408
00409 if ((ret = generate_joint_tables(s)) < 0)
00410 return ret;
00411
00412 return 0;
00413 #else
00414 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
00415 return -1;
00416 #endif
00417 }
00418
00419 static av_cold void alloc_temp(HYuvContext *s){
00420 int i;
00421
00422 if(s->bitstream_bpp<24){
00423 for(i=0; i<3; i++){
00424 s->temp[i]= av_malloc(s->width + 16);
00425 }
00426 }else{
00427 s->temp[0]= av_mallocz(4*s->width + 16);
00428 }
00429 }
00430
00431 static av_cold int common_init(AVCodecContext *avctx){
00432 HYuvContext *s = avctx->priv_data;
00433
00434 s->avctx= avctx;
00435 s->flags= avctx->flags;
00436
00437 dsputil_init(&s->dsp, avctx);
00438
00439 s->width= avctx->width;
00440 s->height= avctx->height;
00441 assert(s->width>0 && s->height>0);
00442
00443 return 0;
00444 }
00445
00446 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
00447 static av_cold int decode_init(AVCodecContext *avctx)
00448 {
00449 HYuvContext *s = avctx->priv_data;
00450 int ret;
00451
00452 common_init(avctx);
00453 memset(s->vlc, 0, 3*sizeof(VLC));
00454
00455 avctx->coded_frame= &s->picture;
00456 s->interlaced= s->height > 288;
00457
00458 s->bgr32=1;
00459
00460
00461 if(avctx->extradata_size){
00462 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
00463 s->version=1;
00464 else
00465 s->version=2;
00466 }else
00467 s->version=0;
00468
00469 if(s->version==2){
00470 int method, interlace;
00471
00472 if (avctx->extradata_size < 4)
00473 return -1;
00474
00475 method= ((uint8_t*)avctx->extradata)[0];
00476 s->decorrelate= method&64 ? 1 : 0;
00477 s->predictor= method&63;
00478 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
00479 if(s->bitstream_bpp==0)
00480 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
00481 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
00482 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
00483 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
00484
00485 if ((ret = read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
00486 avctx->extradata_size - 4)) < 0)
00487 return ret;
00488 }else{
00489 switch(avctx->bits_per_coded_sample&7){
00490 case 1:
00491 s->predictor= LEFT;
00492 s->decorrelate= 0;
00493 break;
00494 case 2:
00495 s->predictor= LEFT;
00496 s->decorrelate= 1;
00497 break;
00498 case 3:
00499 s->predictor= PLANE;
00500 s->decorrelate= avctx->bits_per_coded_sample >= 24;
00501 break;
00502 case 4:
00503 s->predictor= MEDIAN;
00504 s->decorrelate= 0;
00505 break;
00506 default:
00507 s->predictor= LEFT;
00508 s->decorrelate= 0;
00509 break;
00510 }
00511 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
00512 s->context= 0;
00513
00514 if ((ret = read_old_huffman_tables(s)) < 0)
00515 return ret;
00516 }
00517
00518 switch(s->bitstream_bpp){
00519 case 12:
00520 avctx->pix_fmt = PIX_FMT_YUV420P;
00521 break;
00522 case 16:
00523 if(s->yuy2){
00524 avctx->pix_fmt = PIX_FMT_YUYV422;
00525 }else{
00526 avctx->pix_fmt = PIX_FMT_YUV422P;
00527 }
00528 break;
00529 case 24:
00530 case 32:
00531 if(s->bgr32){
00532 avctx->pix_fmt = PIX_FMT_RGB32;
00533 }else{
00534 avctx->pix_fmt = PIX_FMT_BGR24;
00535 }
00536 break;
00537 default:
00538 return AVERROR_INVALIDDATA;
00539 }
00540
00541 if (s->predictor == MEDIAN && avctx->pix_fmt == PIX_FMT_YUV422P &&
00542 avctx->width % 4) {
00543 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 4 "
00544 "for this combination of colorspace and predictor type.\n");
00545 return AVERROR_INVALIDDATA;
00546 }
00547
00548 alloc_temp(s);
00549
00550
00551
00552 return 0;
00553 }
00554
00555 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
00556 {
00557 HYuvContext *s = avctx->priv_data;
00558 int i, ret;
00559
00560 avctx->coded_frame= &s->picture;
00561 alloc_temp(s);
00562
00563 for (i = 0; i < 6; i++)
00564 s->vlc[i].table = NULL;
00565
00566 if(s->version==2){
00567 if ((ret = read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
00568 avctx->extradata_size)) < 0)
00569 return ret;
00570 }else{
00571 if ((ret = read_old_huffman_tables(s)) < 0)
00572 return ret;
00573 }
00574
00575 return 0;
00576 }
00577 #endif
00578
00579 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00580 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
00581 int i;
00582 int index= 0;
00583
00584 for(i=0; i<256;){
00585 int val= len[i];
00586 int repeat=0;
00587
00588 for(; i<256 && len[i]==val && repeat<255; i++)
00589 repeat++;
00590
00591 assert(val < 32 && val >0 && repeat<256 && repeat>0);
00592 if(repeat>7){
00593 buf[index++]= val;
00594 buf[index++]= repeat;
00595 }else{
00596 buf[index++]= val | (repeat<<5);
00597 }
00598 }
00599
00600 return index;
00601 }
00602
00603 static av_cold int encode_init(AVCodecContext *avctx)
00604 {
00605 HYuvContext *s = avctx->priv_data;
00606 int i, j;
00607
00608 common_init(avctx);
00609
00610 avctx->extradata= av_mallocz(1024*30);
00611 avctx->stats_out= av_mallocz(1024*30);
00612 s->version=2;
00613
00614 avctx->coded_frame= &s->picture;
00615
00616 switch(avctx->pix_fmt){
00617 case PIX_FMT_YUV420P:
00618 s->bitstream_bpp= 12;
00619 break;
00620 case PIX_FMT_YUV422P:
00621 s->bitstream_bpp= 16;
00622 break;
00623 case PIX_FMT_RGB32:
00624 s->bitstream_bpp= 24;
00625 break;
00626 default:
00627 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
00628 return -1;
00629 }
00630 avctx->bits_per_coded_sample= s->bitstream_bpp;
00631 s->decorrelate= s->bitstream_bpp >= 24;
00632 s->predictor= avctx->prediction_method;
00633 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
00634 if(avctx->context_model==1){
00635 s->context= avctx->context_model;
00636 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
00637 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
00638 return -1;
00639 }
00640 }else s->context= 0;
00641
00642 if(avctx->codec->id==CODEC_ID_HUFFYUV){
00643 if(avctx->pix_fmt==PIX_FMT_YUV420P){
00644 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
00645 return -1;
00646 }
00647 if(avctx->context_model){
00648 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
00649 return -1;
00650 }
00651 if(s->interlaced != ( s->height > 288 ))
00652 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
00653 }
00654
00655 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
00656 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
00657 return -1;
00658 }
00659
00660 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
00661 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
00662 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
00663 if(s->context)
00664 ((uint8_t*)avctx->extradata)[2]|= 0x40;
00665 ((uint8_t*)avctx->extradata)[3]= 0;
00666 s->avctx->extradata_size= 4;
00667
00668 if(avctx->stats_in){
00669 char *p= avctx->stats_in;
00670
00671 for(i=0; i<3; i++)
00672 for(j=0; j<256; j++)
00673 s->stats[i][j]= 1;
00674
00675 for(;;){
00676 for(i=0; i<3; i++){
00677 char *next;
00678
00679 for(j=0; j<256; j++){
00680 s->stats[i][j]+= strtol(p, &next, 0);
00681 if(next==p) return -1;
00682 p=next;
00683 }
00684 }
00685 if(p[0]==0 || p[1]==0 || p[2]==0) break;
00686 }
00687 }else{
00688 for(i=0; i<3; i++)
00689 for(j=0; j<256; j++){
00690 int d= FFMIN(j, 256-j);
00691
00692 s->stats[i][j]= 100000000/(d+1);
00693 }
00694 }
00695
00696 for(i=0; i<3; i++){
00697 generate_len_table(s->len[i], s->stats[i]);
00698
00699 if(generate_bits_table(s->bits[i], s->len[i])<0){
00700 return -1;
00701 }
00702
00703 s->avctx->extradata_size+=
00704 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
00705 }
00706
00707 if(s->context){
00708 for(i=0; i<3; i++){
00709 int pels = s->width*s->height / (i?40:10);
00710 for(j=0; j<256; j++){
00711 int d= FFMIN(j, 256-j);
00712 s->stats[i][j]= pels/(d+1);
00713 }
00714 }
00715 }else{
00716 for(i=0; i<3; i++)
00717 for(j=0; j<256; j++)
00718 s->stats[i][j]= 0;
00719 }
00720
00721
00722
00723 alloc_temp(s);
00724
00725 s->picture_number=0;
00726
00727 return 0;
00728 }
00729 #endif
00730
00731
00732
00733 #define READ_2PIX(dst0, dst1, plane1){\
00734 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
00735 if(code != 0xffff){\
00736 dst0 = code>>8;\
00737 dst1 = code;\
00738 }else{\
00739 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
00740 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
00741 }\
00742 }
00743
00744 static void decode_422_bitstream(HYuvContext *s, int count){
00745 int i;
00746
00747 count/=2;
00748
00749 if(count >= (get_bits_left(&s->gb))/(31*4)){
00750 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
00751 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
00752 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
00753 }
00754 }else{
00755 for(i=0; i<count; i++){
00756 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
00757 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
00758 }
00759 }
00760 }
00761
00762 static void decode_gray_bitstream(HYuvContext *s, int count){
00763 int i;
00764
00765 count/=2;
00766
00767 if(count >= (get_bits_left(&s->gb))/(31*2)){
00768 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
00769 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
00770 }
00771 }else{
00772 for(i=0; i<count; i++){
00773 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
00774 }
00775 }
00776 }
00777
00778 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00779 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
00780 int i;
00781 const uint8_t *y = s->temp[0] + offset;
00782 const uint8_t *u = s->temp[1] + offset/2;
00783 const uint8_t *v = s->temp[2] + offset/2;
00784
00785 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
00786 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00787 return -1;
00788 }
00789
00790 #define LOAD4\
00791 int y0 = y[2*i];\
00792 int y1 = y[2*i+1];\
00793 int u0 = u[i];\
00794 int v0 = v[i];
00795
00796 count/=2;
00797 if(s->flags&CODEC_FLAG_PASS1){
00798 for(i=0; i<count; i++){
00799 LOAD4;
00800 s->stats[0][y0]++;
00801 s->stats[1][u0]++;
00802 s->stats[0][y1]++;
00803 s->stats[2][v0]++;
00804 }
00805 }
00806 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
00807 return 0;
00808 if(s->context){
00809 for(i=0; i<count; i++){
00810 LOAD4;
00811 s->stats[0][y0]++;
00812 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
00813 s->stats[1][u0]++;
00814 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
00815 s->stats[0][y1]++;
00816 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00817 s->stats[2][v0]++;
00818 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
00819 }
00820 }else{
00821 for(i=0; i<count; i++){
00822 LOAD4;
00823 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
00824 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
00825 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00826 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
00827 }
00828 }
00829 return 0;
00830 }
00831
00832 static int encode_gray_bitstream(HYuvContext *s, int count){
00833 int i;
00834
00835 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
00836 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00837 return -1;
00838 }
00839
00840 #define LOAD2\
00841 int y0 = s->temp[0][2*i];\
00842 int y1 = s->temp[0][2*i+1];
00843 #define STAT2\
00844 s->stats[0][y0]++;\
00845 s->stats[0][y1]++;
00846 #define WRITE2\
00847 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
00848 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00849
00850 count/=2;
00851 if(s->flags&CODEC_FLAG_PASS1){
00852 for(i=0; i<count; i++){
00853 LOAD2;
00854 STAT2;
00855 }
00856 }
00857 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
00858 return 0;
00859
00860 if(s->context){
00861 for(i=0; i<count; i++){
00862 LOAD2;
00863 STAT2;
00864 WRITE2;
00865 }
00866 }else{
00867 for(i=0; i<count; i++){
00868 LOAD2;
00869 WRITE2;
00870 }
00871 }
00872 return 0;
00873 }
00874 #endif
00875
00876 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
00877 int i;
00878 for(i=0; i<count; i++){
00879 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
00880 if(code != -1){
00881 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
00882 }else if(decorrelate){
00883 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
00884 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
00885 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
00886 }else{
00887 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
00888 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
00889 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
00890 }
00891 if(alpha)
00892 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
00893 }
00894 }
00895
00896 static void decode_bgr_bitstream(HYuvContext *s, int count){
00897 if(s->decorrelate){
00898 if(s->bitstream_bpp==24)
00899 decode_bgr_1(s, count, 1, 0);
00900 else
00901 decode_bgr_1(s, count, 1, 1);
00902 }else{
00903 if(s->bitstream_bpp==24)
00904 decode_bgr_1(s, count, 0, 0);
00905 else
00906 decode_bgr_1(s, count, 0, 1);
00907 }
00908 }
00909
00910 static int encode_bgr_bitstream(HYuvContext *s, int count){
00911 int i;
00912
00913 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
00914 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00915 return -1;
00916 }
00917
00918 #define LOAD3\
00919 int g= s->temp[0][4*i+G];\
00920 int b= (s->temp[0][4*i+B] - g) & 0xff;\
00921 int r= (s->temp[0][4*i+R] - g) & 0xff;
00922 #define STAT3\
00923 s->stats[0][b]++;\
00924 s->stats[1][g]++;\
00925 s->stats[2][r]++;
00926 #define WRITE3\
00927 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
00928 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
00929 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
00930
00931 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
00932 for(i=0; i<count; i++){
00933 LOAD3;
00934 STAT3;
00935 }
00936 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
00937 for(i=0; i<count; i++){
00938 LOAD3;
00939 STAT3;
00940 WRITE3;
00941 }
00942 }else{
00943 for(i=0; i<count; i++){
00944 LOAD3;
00945 WRITE3;
00946 }
00947 }
00948 return 0;
00949 }
00950
00951 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
00952 static void draw_slice(HYuvContext *s, int y){
00953 int h, cy, i;
00954 int offset[AV_NUM_DATA_POINTERS];
00955
00956 if(s->avctx->draw_horiz_band==NULL)
00957 return;
00958
00959 h= y - s->last_slice_end;
00960 y -= h;
00961
00962 if(s->bitstream_bpp==12){
00963 cy= y>>1;
00964 }else{
00965 cy= y;
00966 }
00967
00968 offset[0] = s->picture.linesize[0]*y;
00969 offset[1] = s->picture.linesize[1]*cy;
00970 offset[2] = s->picture.linesize[2]*cy;
00971 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
00972 offset[i] = 0;
00973 emms_c();
00974
00975 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
00976
00977 s->last_slice_end= y + h;
00978 }
00979
00980 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
00981 const uint8_t *buf = avpkt->data;
00982 int buf_size = avpkt->size;
00983 HYuvContext *s = avctx->priv_data;
00984 const int width= s->width;
00985 const int width2= s->width>>1;
00986 const int height= s->height;
00987 int fake_ystride, fake_ustride, fake_vstride;
00988 AVFrame * const p= &s->picture;
00989 int table_size = 0, ret;
00990
00991 AVFrame *picture = data;
00992
00993 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
00994 if (!s->bitstream_buffer)
00995 return AVERROR(ENOMEM);
00996
00997 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
00998 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
00999
01000 if(p->data[0])
01001 ff_thread_release_buffer(avctx, p);
01002
01003 p->reference= 0;
01004 if ((ret = ff_thread_get_buffer(avctx, p)) < 0) {
01005 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
01006 return ret;
01007 }
01008
01009 if(s->context){
01010 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
01011 if(table_size < 0)
01012 return table_size;
01013 }
01014
01015 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
01016 return -1;
01017
01018 if ((ret = init_get_bits(&s->gb, s->bitstream_buffer + table_size,
01019 (buf_size - table_size) * 8)) < 0)
01020 return ret;
01021
01022 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
01023 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
01024 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
01025
01026 s->last_slice_end= 0;
01027
01028 if(s->bitstream_bpp<24){
01029 int y, cy;
01030 int lefty, leftu, leftv;
01031 int lefttopy, lefttopu, lefttopv;
01032
01033 if(s->yuy2){
01034 p->data[0][3]= get_bits(&s->gb, 8);
01035 p->data[0][2]= get_bits(&s->gb, 8);
01036 p->data[0][1]= get_bits(&s->gb, 8);
01037 p->data[0][0]= get_bits(&s->gb, 8);
01038
01039 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
01040 return -1;
01041 }else{
01042
01043 leftv= p->data[2][0]= get_bits(&s->gb, 8);
01044 lefty= p->data[0][1]= get_bits(&s->gb, 8);
01045 leftu= p->data[1][0]= get_bits(&s->gb, 8);
01046 p->data[0][0]= get_bits(&s->gb, 8);
01047
01048 switch(s->predictor){
01049 case LEFT:
01050 case PLANE:
01051 decode_422_bitstream(s, width-2);
01052 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
01053 if(!(s->flags&CODEC_FLAG_GRAY)){
01054 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
01055 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
01056 }
01057
01058 for(cy=y=1; y<s->height; y++,cy++){
01059 uint8_t *ydst, *udst, *vdst;
01060
01061 if(s->bitstream_bpp==12){
01062 decode_gray_bitstream(s, width);
01063
01064 ydst= p->data[0] + p->linesize[0]*y;
01065
01066 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
01067 if(s->predictor == PLANE){
01068 if(y>s->interlaced)
01069 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
01070 }
01071 y++;
01072 if(y>=s->height) break;
01073 }
01074
01075 draw_slice(s, y);
01076
01077 ydst= p->data[0] + p->linesize[0]*y;
01078 udst= p->data[1] + p->linesize[1]*cy;
01079 vdst= p->data[2] + p->linesize[2]*cy;
01080
01081 decode_422_bitstream(s, width);
01082 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
01083 if(!(s->flags&CODEC_FLAG_GRAY)){
01084 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
01085 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
01086 }
01087 if(s->predictor == PLANE){
01088 if(cy>s->interlaced){
01089 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
01090 if(!(s->flags&CODEC_FLAG_GRAY)){
01091 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
01092 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
01093 }
01094 }
01095 }
01096 }
01097 draw_slice(s, height);
01098
01099 break;
01100 case MEDIAN:
01101
01102 decode_422_bitstream(s, width-2);
01103 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
01104 if(!(s->flags&CODEC_FLAG_GRAY)){
01105 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
01106 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
01107 }
01108
01109 cy=y=1;
01110
01111
01112 if(s->interlaced){
01113 decode_422_bitstream(s, width);
01114 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
01115 if(!(s->flags&CODEC_FLAG_GRAY)){
01116 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
01117 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
01118 }
01119 y++; cy++;
01120 }
01121
01122
01123 decode_422_bitstream(s, 4);
01124 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
01125 if(!(s->flags&CODEC_FLAG_GRAY)){
01126 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
01127 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
01128 }
01129
01130
01131 lefttopy= p->data[0][3];
01132 decode_422_bitstream(s, width-4);
01133 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
01134 if(!(s->flags&CODEC_FLAG_GRAY)){
01135 lefttopu= p->data[1][1];
01136 lefttopv= p->data[2][1];
01137 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
01138 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
01139 }
01140 y++; cy++;
01141
01142 for(; y<height; y++,cy++){
01143 uint8_t *ydst, *udst, *vdst;
01144
01145 if(s->bitstream_bpp==12){
01146 while(2*cy > y){
01147 decode_gray_bitstream(s, width);
01148 ydst= p->data[0] + p->linesize[0]*y;
01149 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
01150 y++;
01151 }
01152 if(y>=height) break;
01153 }
01154 draw_slice(s, y);
01155
01156 decode_422_bitstream(s, width);
01157
01158 ydst= p->data[0] + p->linesize[0]*y;
01159 udst= p->data[1] + p->linesize[1]*cy;
01160 vdst= p->data[2] + p->linesize[2]*cy;
01161
01162 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
01163 if(!(s->flags&CODEC_FLAG_GRAY)){
01164 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
01165 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
01166 }
01167 }
01168
01169 draw_slice(s, height);
01170 break;
01171 }
01172 }
01173 }else{
01174 int y;
01175 int leftr, leftg, leftb, lefta;
01176 const int last_line= (height-1)*p->linesize[0];
01177
01178 if(s->bitstream_bpp==32){
01179 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
01180 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
01181 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
01182 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
01183 }else{
01184 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
01185 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
01186 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
01187 lefta= p->data[0][last_line+A]= 255;
01188 skip_bits(&s->gb, 8);
01189 }
01190
01191 if(s->bgr32){
01192 switch(s->predictor){
01193 case LEFT:
01194 case PLANE:
01195 decode_bgr_bitstream(s, width-1);
01196 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
01197
01198 for(y=s->height-2; y>=0; y--){
01199 decode_bgr_bitstream(s, width);
01200
01201 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
01202 if(s->predictor == PLANE){
01203 if(s->bitstream_bpp!=32) lefta=0;
01204 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
01205 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
01206 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
01207 }
01208 }
01209 }
01210 draw_slice(s, height);
01211 break;
01212 default:
01213 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
01214 }
01215 }else{
01216
01217 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
01218 return -1;
01219 }
01220 }
01221 emms_c();
01222
01223 *picture= *p;
01224 *data_size = sizeof(AVFrame);
01225
01226 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
01227 }
01228 #endif
01229
01230 static int common_end(HYuvContext *s){
01231 int i;
01232
01233 for(i=0; i<3; i++){
01234 av_freep(&s->temp[i]);
01235 }
01236 return 0;
01237 }
01238
01239 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
01240 static av_cold int decode_end(AVCodecContext *avctx)
01241 {
01242 HYuvContext *s = avctx->priv_data;
01243 int i;
01244
01245 if (s->picture.data[0])
01246 avctx->release_buffer(avctx, &s->picture);
01247
01248 common_end(s);
01249 av_freep(&s->bitstream_buffer);
01250
01251 for(i=0; i<6; i++){
01252 ff_free_vlc(&s->vlc[i]);
01253 }
01254
01255 return 0;
01256 }
01257 #endif
01258
01259 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
01260 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
01261 HYuvContext *s = avctx->priv_data;
01262 AVFrame *pict = data;
01263 const int width= s->width;
01264 const int width2= s->width>>1;
01265 const int height= s->height;
01266 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
01267 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
01268 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
01269 AVFrame * const p= &s->picture;
01270 int i, j, size=0;
01271
01272 *p = *pict;
01273 p->pict_type= AV_PICTURE_TYPE_I;
01274 p->key_frame= 1;
01275
01276 if(s->context){
01277 for(i=0; i<3; i++){
01278 generate_len_table(s->len[i], s->stats[i]);
01279 if(generate_bits_table(s->bits[i], s->len[i])<0)
01280 return -1;
01281 size+= store_table(s, s->len[i], &buf[size]);
01282 }
01283
01284 for(i=0; i<3; i++)
01285 for(j=0; j<256; j++)
01286 s->stats[i][j] >>= 1;
01287 }
01288
01289 init_put_bits(&s->pb, buf+size, buf_size-size);
01290
01291 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
01292 int lefty, leftu, leftv, y, cy;
01293
01294 put_bits(&s->pb, 8, leftv= p->data[2][0]);
01295 put_bits(&s->pb, 8, lefty= p->data[0][1]);
01296 put_bits(&s->pb, 8, leftu= p->data[1][0]);
01297 put_bits(&s->pb, 8, p->data[0][0]);
01298
01299 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
01300 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
01301 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
01302
01303 encode_422_bitstream(s, 2, width-2);
01304
01305 if(s->predictor==MEDIAN){
01306 int lefttopy, lefttopu, lefttopv;
01307 cy=y=1;
01308 if(s->interlaced){
01309 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
01310 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
01311 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
01312
01313 encode_422_bitstream(s, 0, width);
01314 y++; cy++;
01315 }
01316
01317 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
01318 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
01319 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
01320
01321 encode_422_bitstream(s, 0, 4);
01322
01323 lefttopy= p->data[0][3];
01324 lefttopu= p->data[1][1];
01325 lefttopv= p->data[2][1];
01326 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
01327 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
01328 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
01329 encode_422_bitstream(s, 0, width-4);
01330 y++; cy++;
01331
01332 for(; y<height; y++,cy++){
01333 uint8_t *ydst, *udst, *vdst;
01334
01335 if(s->bitstream_bpp==12){
01336 while(2*cy > y){
01337 ydst= p->data[0] + p->linesize[0]*y;
01338 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
01339 encode_gray_bitstream(s, width);
01340 y++;
01341 }
01342 if(y>=height) break;
01343 }
01344 ydst= p->data[0] + p->linesize[0]*y;
01345 udst= p->data[1] + p->linesize[1]*cy;
01346 vdst= p->data[2] + p->linesize[2]*cy;
01347
01348 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
01349 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
01350 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
01351
01352 encode_422_bitstream(s, 0, width);
01353 }
01354 }else{
01355 for(cy=y=1; y<height; y++,cy++){
01356 uint8_t *ydst, *udst, *vdst;
01357
01358
01359 if(s->bitstream_bpp==12){
01360 ydst= p->data[0] + p->linesize[0]*y;
01361
01362 if(s->predictor == PLANE && s->interlaced < y){
01363 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
01364
01365 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
01366 }else{
01367 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
01368 }
01369 encode_gray_bitstream(s, width);
01370 y++;
01371 if(y>=height) break;
01372 }
01373
01374 ydst= p->data[0] + p->linesize[0]*y;
01375 udst= p->data[1] + p->linesize[1]*cy;
01376 vdst= p->data[2] + p->linesize[2]*cy;
01377
01378 if(s->predictor == PLANE && s->interlaced < cy){
01379 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
01380 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
01381 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
01382
01383 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
01384 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
01385 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
01386 }else{
01387 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
01388 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
01389 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
01390 }
01391
01392 encode_422_bitstream(s, 0, width);
01393 }
01394 }
01395 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
01396 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
01397 const int stride = -p->linesize[0];
01398 const int fake_stride = -fake_ystride;
01399 int y;
01400 int leftr, leftg, leftb;
01401
01402 put_bits(&s->pb, 8, leftr= data[R]);
01403 put_bits(&s->pb, 8, leftg= data[G]);
01404 put_bits(&s->pb, 8, leftb= data[B]);
01405 put_bits(&s->pb, 8, 0);
01406
01407 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
01408 encode_bgr_bitstream(s, width-1);
01409
01410 for(y=1; y<s->height; y++){
01411 uint8_t *dst = data + y*stride;
01412 if(s->predictor == PLANE && s->interlaced < y){
01413 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
01414 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
01415 }else{
01416 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
01417 }
01418 encode_bgr_bitstream(s, width);
01419 }
01420 }else{
01421 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
01422 }
01423 emms_c();
01424
01425 size+= (put_bits_count(&s->pb)+31)/8;
01426 put_bits(&s->pb, 16, 0);
01427 put_bits(&s->pb, 15, 0);
01428 size/= 4;
01429
01430 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
01431 int j;
01432 char *p= avctx->stats_out;
01433 char *end= p + 1024*30;
01434 for(i=0; i<3; i++){
01435 for(j=0; j<256; j++){
01436 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
01437 p+= strlen(p);
01438 s->stats[i][j]= 0;
01439 }
01440 snprintf(p, end-p, "\n");
01441 p++;
01442 }
01443 } else
01444 avctx->stats_out[0] = '\0';
01445 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
01446 flush_put_bits(&s->pb);
01447 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
01448 }
01449
01450 s->picture_number++;
01451
01452 return size*4;
01453 }
01454
01455 static av_cold int encode_end(AVCodecContext *avctx)
01456 {
01457 HYuvContext *s = avctx->priv_data;
01458
01459 common_end(s);
01460
01461 av_freep(&avctx->extradata);
01462 av_freep(&avctx->stats_out);
01463
01464 return 0;
01465 }
01466 #endif
01467
01468 #if CONFIG_HUFFYUV_DECODER
01469 AVCodec ff_huffyuv_decoder = {
01470 .name = "huffyuv",
01471 .type = AVMEDIA_TYPE_VIDEO,
01472 .id = CODEC_ID_HUFFYUV,
01473 .priv_data_size = sizeof(HYuvContext),
01474 .init = decode_init,
01475 .close = decode_end,
01476 .decode = decode_frame,
01477 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
01478 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
01479 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
01480 };
01481 #endif
01482
01483 #if CONFIG_FFVHUFF_DECODER
01484 AVCodec ff_ffvhuff_decoder = {
01485 .name = "ffvhuff",
01486 .type = AVMEDIA_TYPE_VIDEO,
01487 .id = CODEC_ID_FFVHUFF,
01488 .priv_data_size = sizeof(HYuvContext),
01489 .init = decode_init,
01490 .close = decode_end,
01491 .decode = decode_frame,
01492 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
01493 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
01494 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
01495 };
01496 #endif
01497
01498 #if CONFIG_HUFFYUV_ENCODER
01499 AVCodec ff_huffyuv_encoder = {
01500 .name = "huffyuv",
01501 .type = AVMEDIA_TYPE_VIDEO,
01502 .id = CODEC_ID_HUFFYUV,
01503 .priv_data_size = sizeof(HYuvContext),
01504 .init = encode_init,
01505 .encode = encode_frame,
01506 .close = encode_end,
01507 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
01508 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
01509 };
01510 #endif
01511
01512 #if CONFIG_FFVHUFF_ENCODER
01513 AVCodec ff_ffvhuff_encoder = {
01514 .name = "ffvhuff",
01515 .type = AVMEDIA_TYPE_VIDEO,
01516 .id = CODEC_ID_FFVHUFF,
01517 .priv_data_size = sizeof(HYuvContext),
01518 .init = encode_init,
01519 .encode = encode_frame,
01520 .close = encode_end,
01521 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
01522 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
01523 };
01524 #endif