00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/mathematics.h"
00032 #include "libavutil/opt.h"
00033 #include "avcodec.h"
00034 #include "dsputil.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "h263.h"
00038 #include "mjpegenc.h"
00039 #include "msmpeg4.h"
00040 #include "faandct.h"
00041 #include "thread.h"
00042 #include "aandcttab.h"
00043 #include "flv.h"
00044 #include "mpeg4video.h"
00045 #include "internal.h"
00046 #include <limits.h>
00047
00048
00049
00050
00051 static int encode_picture(MpegEncContext *s, int picture_number);
00052 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
00053 static int sse_mb(MpegEncContext *s);
00054 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
00055 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
00056
00057
00058
00059
00060
00061
00062 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
00063 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
00064
00065 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
00066 uint16_t (*qmat16)[2][64],
00067 const uint16_t *quant_matrix,
00068 int bias, int qmin, int qmax, int intra)
00069 {
00070 int qscale;
00071 int shift = 0;
00072
00073 for (qscale = qmin; qscale <= qmax; qscale++) {
00074 int i;
00075 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
00076 dsp->fdct == ff_jpeg_fdct_islow_10
00077 #ifdef FAAN_POSTSCALE
00078 || dsp->fdct == ff_faandct
00079 #endif
00080 ) {
00081 for (i = 0; i < 64; i++) {
00082 const int j = dsp->idct_permutation[i];
00083
00084
00085
00086
00087
00088
00089 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
00090 (qscale * quant_matrix[j]));
00091 }
00092 } else if (dsp->fdct == fdct_ifast
00093 #ifndef FAAN_POSTSCALE
00094 || dsp->fdct == ff_faandct
00095 #endif
00096 ) {
00097 for (i = 0; i < 64; i++) {
00098 const int j = dsp->idct_permutation[i];
00099
00100
00101
00102
00103
00104
00105 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
00106 (ff_aanscales[i] * qscale *
00107 quant_matrix[j]));
00108 }
00109 } else {
00110 for (i = 0; i < 64; i++) {
00111 const int j = dsp->idct_permutation[i];
00112
00113
00114
00115
00116
00117 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
00118 (qscale * quant_matrix[j]));
00119
00120
00121 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
00122 (qscale * quant_matrix[j]);
00123
00124 if (qmat16[qscale][0][i] == 0 ||
00125 qmat16[qscale][0][i] == 128 * 256)
00126 qmat16[qscale][0][i] = 128 * 256 - 1;
00127 qmat16[qscale][1][i] =
00128 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
00129 qmat16[qscale][0][i]);
00130 }
00131 }
00132
00133 for (i = intra; i < 64; i++) {
00134 int64_t max = 8191;
00135 if (dsp->fdct == fdct_ifast
00136 #ifndef FAAN_POSTSCALE
00137 || dsp->fdct == ff_faandct
00138 #endif
00139 ) {
00140 max = (8191LL * ff_aanscales[i]) >> 14;
00141 }
00142 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
00143 shift++;
00144 }
00145 }
00146 }
00147 if (shift) {
00148 av_log(NULL, AV_LOG_INFO,
00149 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
00150 QMAT_SHIFT - shift);
00151 }
00152 }
00153
00154 static inline void update_qscale(MpegEncContext *s)
00155 {
00156 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
00157 (FF_LAMBDA_SHIFT + 7);
00158 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
00159
00160 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
00161 FF_LAMBDA_SHIFT;
00162 }
00163
00164 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
00165 {
00166 int i;
00167
00168 if (matrix) {
00169 put_bits(pb, 1, 1);
00170 for (i = 0; i < 64; i++) {
00171 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
00172 }
00173 } else
00174 put_bits(pb, 1, 0);
00175 }
00176
00180 void ff_init_qscale_tab(MpegEncContext *s)
00181 {
00182 int8_t * const qscale_table = s->current_picture.f.qscale_table;
00183 int i;
00184
00185 for (i = 0; i < s->mb_num; i++) {
00186 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
00187 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
00188 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
00189 s->avctx->qmax);
00190 }
00191 }
00192
00193 static void copy_picture_attributes(MpegEncContext *s,
00194 AVFrame *dst,
00195 AVFrame *src)
00196 {
00197 int i;
00198
00199 dst->pict_type = src->pict_type;
00200 dst->quality = src->quality;
00201 dst->coded_picture_number = src->coded_picture_number;
00202 dst->display_picture_number = src->display_picture_number;
00203
00204 dst->pts = src->pts;
00205 dst->interlaced_frame = src->interlaced_frame;
00206 dst->top_field_first = src->top_field_first;
00207
00208 if (s->avctx->me_threshold) {
00209 if (!src->motion_val[0])
00210 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
00211 if (!src->mb_type)
00212 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
00213 if (!src->ref_index[0])
00214 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
00215 if (src->motion_subsample_log2 != dst->motion_subsample_log2)
00216 av_log(s->avctx, AV_LOG_ERROR,
00217 "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
00218 src->motion_subsample_log2, dst->motion_subsample_log2);
00219
00220 memcpy(dst->mb_type, src->mb_type,
00221 s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
00222
00223 for (i = 0; i < 2; i++) {
00224 int stride = ((16 * s->mb_width ) >>
00225 src->motion_subsample_log2) + 1;
00226 int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
00227
00228 if (src->motion_val[i] &&
00229 src->motion_val[i] != dst->motion_val[i]) {
00230 memcpy(dst->motion_val[i], src->motion_val[i],
00231 2 * stride * height * sizeof(int16_t));
00232 }
00233 if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
00234 memcpy(dst->ref_index[i], src->ref_index[i],
00235 s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
00236 }
00237 }
00238 }
00239 }
00240
00241 static void update_duplicate_context_after_me(MpegEncContext *dst,
00242 MpegEncContext *src)
00243 {
00244 #define COPY(a) dst->a= src->a
00245 COPY(pict_type);
00246 COPY(current_picture);
00247 COPY(f_code);
00248 COPY(b_code);
00249 COPY(qscale);
00250 COPY(lambda);
00251 COPY(lambda2);
00252 COPY(picture_in_gop_number);
00253 COPY(gop_picture_number);
00254 COPY(frame_pred_frame_dct);
00255 COPY(progressive_frame);
00256 COPY(partitioned_frame);
00257 #undef COPY
00258 }
00259
00264 static void MPV_encode_defaults(MpegEncContext *s)
00265 {
00266 int i;
00267 MPV_common_defaults(s);
00268
00269 for (i = -16; i < 16; i++) {
00270 default_fcode_tab[i + MAX_MV] = 1;
00271 }
00272 s->me.mv_penalty = default_mv_penalty;
00273 s->fcode_tab = default_fcode_tab;
00274 }
00275
00276
00277 av_cold int MPV_encode_init(AVCodecContext *avctx)
00278 {
00279 MpegEncContext *s = avctx->priv_data;
00280 int i;
00281 int chroma_h_shift, chroma_v_shift;
00282
00283 MPV_encode_defaults(s);
00284
00285 switch (avctx->codec_id) {
00286 case CODEC_ID_MPEG2VIDEO:
00287 if (avctx->pix_fmt != PIX_FMT_YUV420P &&
00288 avctx->pix_fmt != PIX_FMT_YUV422P) {
00289 av_log(avctx, AV_LOG_ERROR,
00290 "only YUV420 and YUV422 are supported\n");
00291 return -1;
00292 }
00293 break;
00294 case CODEC_ID_LJPEG:
00295 if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
00296 avctx->pix_fmt != PIX_FMT_YUVJ422P &&
00297 avctx->pix_fmt != PIX_FMT_YUVJ444P &&
00298 avctx->pix_fmt != PIX_FMT_BGRA &&
00299 ((avctx->pix_fmt != PIX_FMT_YUV420P &&
00300 avctx->pix_fmt != PIX_FMT_YUV422P &&
00301 avctx->pix_fmt != PIX_FMT_YUV444P) ||
00302 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
00303 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
00304 return -1;
00305 }
00306 break;
00307 case CODEC_ID_MJPEG:
00308 if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
00309 avctx->pix_fmt != PIX_FMT_YUVJ422P &&
00310 ((avctx->pix_fmt != PIX_FMT_YUV420P &&
00311 avctx->pix_fmt != PIX_FMT_YUV422P) ||
00312 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
00313 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
00314 return -1;
00315 }
00316 break;
00317 default:
00318 if (avctx->pix_fmt != PIX_FMT_YUV420P) {
00319 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
00320 return -1;
00321 }
00322 }
00323
00324 switch (avctx->pix_fmt) {
00325 case PIX_FMT_YUVJ422P:
00326 case PIX_FMT_YUV422P:
00327 s->chroma_format = CHROMA_422;
00328 break;
00329 case PIX_FMT_YUVJ420P:
00330 case PIX_FMT_YUV420P:
00331 default:
00332 s->chroma_format = CHROMA_420;
00333 break;
00334 }
00335
00336 s->bit_rate = avctx->bit_rate;
00337 s->width = avctx->width;
00338 s->height = avctx->height;
00339 if (avctx->gop_size > 600 &&
00340 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
00341 av_log(avctx, AV_LOG_ERROR,
00342 "Warning keyframe interval too large! reducing it ...\n");
00343 avctx->gop_size = 600;
00344 }
00345 s->gop_size = avctx->gop_size;
00346 s->avctx = avctx;
00347 s->flags = avctx->flags;
00348 s->flags2 = avctx->flags2;
00349 s->max_b_frames = avctx->max_b_frames;
00350 s->codec_id = avctx->codec->id;
00351 s->luma_elim_threshold = avctx->luma_elim_threshold;
00352 s->chroma_elim_threshold = avctx->chroma_elim_threshold;
00353 s->strict_std_compliance = avctx->strict_std_compliance;
00354 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00355 if (avctx->flags & CODEC_FLAG_PART)
00356 s->data_partitioning = 1;
00357 #endif
00358 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
00359 s->mpeg_quant = avctx->mpeg_quant;
00360 s->rtp_mode = !!avctx->rtp_payload_size;
00361 s->intra_dc_precision = avctx->intra_dc_precision;
00362 s->user_specified_pts = AV_NOPTS_VALUE;
00363
00364 if (s->gop_size <= 1) {
00365 s->intra_only = 1;
00366 s->gop_size = 12;
00367 } else {
00368 s->intra_only = 0;
00369 }
00370
00371 s->me_method = avctx->me_method;
00372
00373
00374 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
00375
00376 s->adaptive_quant = (s->avctx->lumi_masking ||
00377 s->avctx->dark_masking ||
00378 s->avctx->temporal_cplx_masking ||
00379 s->avctx->spatial_cplx_masking ||
00380 s->avctx->p_masking ||
00381 s->avctx->border_masking ||
00382 (s->flags & CODEC_FLAG_QP_RD)) &&
00383 !s->fixed_qscale;
00384
00385 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
00386 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00387 s->alternate_scan = !!(s->flags & CODEC_FLAG_ALT_SCAN);
00388 s->intra_vlc_format = !!(s->flags2 & CODEC_FLAG2_INTRA_VLC);
00389 s->q_scale_type = !!(s->flags2 & CODEC_FLAG2_NON_LINEAR_QUANT);
00390 s->obmc = !!(s->flags & CODEC_FLAG_OBMC);
00391 #endif
00392
00393 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
00394 av_log(avctx, AV_LOG_ERROR,
00395 "a vbv buffer size is needed, "
00396 "for encoding with a maximum bitrate\n");
00397 return -1;
00398 }
00399
00400 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
00401 av_log(avctx, AV_LOG_INFO,
00402 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
00403 }
00404
00405 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
00406 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
00407 return -1;
00408 }
00409
00410 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
00411 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
00412 return -1;
00413 }
00414
00415 if (avctx->rc_max_rate &&
00416 avctx->rc_max_rate == avctx->bit_rate &&
00417 avctx->rc_max_rate != avctx->rc_min_rate) {
00418 av_log(avctx, AV_LOG_INFO,
00419 "impossible bitrate constraints, this will fail\n");
00420 }
00421
00422 if (avctx->rc_buffer_size &&
00423 avctx->bit_rate * (int64_t)avctx->time_base.num >
00424 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
00425 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
00426 return -1;
00427 }
00428
00429 if (!s->fixed_qscale &&
00430 avctx->bit_rate * av_q2d(avctx->time_base) >
00431 avctx->bit_rate_tolerance) {
00432 av_log(avctx, AV_LOG_ERROR,
00433 "bitrate tolerance too small for bitrate\n");
00434 return -1;
00435 }
00436
00437 if (s->avctx->rc_max_rate &&
00438 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
00439 (s->codec_id == CODEC_ID_MPEG1VIDEO ||
00440 s->codec_id == CODEC_ID_MPEG2VIDEO) &&
00441 90000LL * (avctx->rc_buffer_size - 1) >
00442 s->avctx->rc_max_rate * 0xFFFFLL) {
00443 av_log(avctx, AV_LOG_INFO,
00444 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
00445 "specified vbv buffer is too large for the given bitrate!\n");
00446 }
00447
00448 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4 &&
00449 s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P &&
00450 s->codec_id != CODEC_ID_FLV1) {
00451 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
00452 return -1;
00453 }
00454
00455 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
00456 av_log(avctx, AV_LOG_ERROR,
00457 "OBMC is only supported with simple mb decision\n");
00458 return -1;
00459 }
00460
00461 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00462 if (s->obmc && s->codec_id != CODEC_ID_H263 &&
00463 s->codec_id != CODEC_ID_H263P) {
00464 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
00465 return -1;
00466 }
00467 #endif
00468
00469 if (s->quarter_sample && s->codec_id != CODEC_ID_MPEG4) {
00470 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
00471 return -1;
00472 }
00473
00474 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00475 if (s->data_partitioning && s->codec_id != CODEC_ID_MPEG4) {
00476 av_log(avctx, AV_LOG_ERROR,
00477 "data partitioning not supported by codec\n");
00478 return -1;
00479 }
00480 #endif
00481
00482 if (s->max_b_frames &&
00483 s->codec_id != CODEC_ID_MPEG4 &&
00484 s->codec_id != CODEC_ID_MPEG1VIDEO &&
00485 s->codec_id != CODEC_ID_MPEG2VIDEO) {
00486 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
00487 return -1;
00488 }
00489
00490 if ((s->codec_id == CODEC_ID_MPEG4 ||
00491 s->codec_id == CODEC_ID_H263 ||
00492 s->codec_id == CODEC_ID_H263P) &&
00493 (avctx->sample_aspect_ratio.num > 255 ||
00494 avctx->sample_aspect_ratio.den > 255)) {
00495 av_log(avctx, AV_LOG_ERROR,
00496 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
00497 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
00498 return -1;
00499 }
00500
00501 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME
00502 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00503 | CODEC_FLAG_ALT_SCAN
00504 #endif
00505 )) &&
00506 s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO) {
00507 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
00508 return -1;
00509 }
00510
00511
00512 if (s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4) {
00513 av_log(avctx, AV_LOG_ERROR,
00514 "mpeg2 style quantization not supported by codec\n");
00515 return -1;
00516 }
00517
00518 if ((s->flags & CODEC_FLAG_CBP_RD) && !avctx->trellis) {
00519 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
00520 return -1;
00521 }
00522
00523 if ((s->flags & CODEC_FLAG_QP_RD) &&
00524 s->avctx->mb_decision != FF_MB_DECISION_RD) {
00525 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
00526 return -1;
00527 }
00528
00529 if (s->avctx->scenechange_threshold < 1000000000 &&
00530 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
00531 av_log(avctx, AV_LOG_ERROR,
00532 "closed gop with scene change detection are not supported yet, "
00533 "set threshold to 1000000000\n");
00534 return -1;
00535 }
00536
00537 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00538 if ((s->flags2 & CODEC_FLAG2_INTRA_VLC) &&
00539 s->codec_id != CODEC_ID_MPEG2VIDEO) {
00540 av_log(avctx, AV_LOG_ERROR,
00541 "intra vlc table not supported by codec\n");
00542 return -1;
00543 }
00544 #endif
00545
00546 if (s->flags & CODEC_FLAG_LOW_DELAY) {
00547 if (s->codec_id != CODEC_ID_MPEG2VIDEO) {
00548 av_log(avctx, AV_LOG_ERROR,
00549 "low delay forcing is only available for mpeg2\n");
00550 return -1;
00551 }
00552 if (s->max_b_frames != 0) {
00553 av_log(avctx, AV_LOG_ERROR,
00554 "b frames cannot be used with low delay\n");
00555 return -1;
00556 }
00557 }
00558
00559 if (s->q_scale_type == 1) {
00560 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00561 if (s->codec_id != CODEC_ID_MPEG2VIDEO) {
00562 av_log(avctx, AV_LOG_ERROR,
00563 "non linear quant is only available for mpeg2\n");
00564 return -1;
00565 }
00566 #endif
00567 if (avctx->qmax > 12) {
00568 av_log(avctx, AV_LOG_ERROR,
00569 "non linear quant only supports qmax <= 12 currently\n");
00570 return -1;
00571 }
00572 }
00573
00574 if (s->avctx->thread_count > 1 &&
00575 s->codec_id != CODEC_ID_MPEG4 &&
00576 s->codec_id != CODEC_ID_MPEG1VIDEO &&
00577 s->codec_id != CODEC_ID_MPEG2VIDEO &&
00578 (s->codec_id != CODEC_ID_H263P
00579 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00580 || !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT)
00581 #endif
00582 )) {
00583 av_log(avctx, AV_LOG_ERROR,
00584 "multi threaded encoding not supported by codec\n");
00585 return -1;
00586 }
00587
00588 if (s->avctx->thread_count < 1) {
00589 av_log(avctx, AV_LOG_ERROR,
00590 "automatic thread number detection not supported by codec,"
00591 "patch welcome\n");
00592 return -1;
00593 }
00594
00595 if (s->avctx->thread_count > 1)
00596 s->rtp_mode = 1;
00597
00598 if (!avctx->time_base.den || !avctx->time_base.num) {
00599 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
00600 return -1;
00601 }
00602
00603 i = (INT_MAX / 2 + 128) >> 8;
00604 if (avctx->me_threshold >= i) {
00605 av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
00606 i - 1);
00607 return -1;
00608 }
00609 if (avctx->mb_threshold >= i) {
00610 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
00611 i - 1);
00612 return -1;
00613 }
00614
00615 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
00616 av_log(avctx, AV_LOG_INFO,
00617 "notice: b_frame_strategy only affects the first pass\n");
00618 avctx->b_frame_strategy = 0;
00619 }
00620
00621 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
00622 if (i > 1) {
00623 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
00624 avctx->time_base.den /= i;
00625 avctx->time_base.num /= i;
00626
00627 }
00628
00629 if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG1VIDEO ||
00630 s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG) {
00631
00632 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
00633 s->inter_quant_bias = 0;
00634 } else {
00635 s->intra_quant_bias = 0;
00636
00637 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
00638 }
00639
00640 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
00641 s->intra_quant_bias = avctx->intra_quant_bias;
00642 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
00643 s->inter_quant_bias = avctx->inter_quant_bias;
00644
00645 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
00646 &chroma_v_shift);
00647
00648 if (avctx->codec_id == CODEC_ID_MPEG4 &&
00649 s->avctx->time_base.den > (1 << 16) - 1) {
00650 av_log(avctx, AV_LOG_ERROR,
00651 "timebase %d/%d not supported by MPEG 4 standard, "
00652 "the maximum admitted value for the timebase denominator "
00653 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
00654 (1 << 16) - 1);
00655 return -1;
00656 }
00657 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
00658
00659 switch (avctx->codec->id) {
00660 case CODEC_ID_MPEG1VIDEO:
00661 s->out_format = FMT_MPEG1;
00662 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
00663 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00664 break;
00665 case CODEC_ID_MPEG2VIDEO:
00666 s->out_format = FMT_MPEG1;
00667 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
00668 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00669 s->rtp_mode = 1;
00670 break;
00671 case CODEC_ID_LJPEG:
00672 case CODEC_ID_MJPEG:
00673 s->out_format = FMT_MJPEG;
00674 s->intra_only = 1;
00675 if (avctx->codec->id == CODEC_ID_LJPEG &&
00676 avctx->pix_fmt == PIX_FMT_BGRA) {
00677 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
00678 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
00679 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
00680 } else {
00681 s->mjpeg_vsample[0] = 2;
00682 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
00683 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
00684 s->mjpeg_hsample[0] = 2;
00685 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
00686 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
00687 }
00688 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
00689 ff_mjpeg_encode_init(s) < 0)
00690 return -1;
00691 avctx->delay = 0;
00692 s->low_delay = 1;
00693 break;
00694 case CODEC_ID_H261:
00695 if (!CONFIG_H261_ENCODER)
00696 return -1;
00697 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
00698 av_log(avctx, AV_LOG_ERROR,
00699 "The specified picture size of %dx%d is not valid for the "
00700 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
00701 s->width, s->height);
00702 return -1;
00703 }
00704 s->out_format = FMT_H261;
00705 avctx->delay = 0;
00706 s->low_delay = 1;
00707 break;
00708 case CODEC_ID_H263:
00709 if (!CONFIG_H263_ENCODER)
00710 return -1;
00711 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
00712 s->width, s->height) == 8) {
00713 av_log(avctx, AV_LOG_INFO,
00714 "The specified picture size of %dx%d is not valid for "
00715 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
00716 "352x288, 704x576, and 1408x1152."
00717 "Try H.263+.\n", s->width, s->height);
00718 return -1;
00719 }
00720 s->out_format = FMT_H263;
00721 avctx->delay = 0;
00722 s->low_delay = 1;
00723 break;
00724 case CODEC_ID_H263P:
00725 s->out_format = FMT_H263;
00726 s->h263_plus = 1;
00727
00728 #if FF_API_MPEGVIDEO_GLOBAL_OPTS
00729 if (avctx->flags & CODEC_FLAG_H263P_UMV)
00730 s->umvplus = 1;
00731 if (avctx->flags & CODEC_FLAG_H263P_AIV)
00732 s->alt_inter_vlc = 1;
00733 if (avctx->flags & CODEC_FLAG_H263P_SLICE_STRUCT)
00734 s->h263_slice_structured = 1;
00735 #endif
00736 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
00737 s->modified_quant = s->h263_aic;
00738 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
00739 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
00740
00741
00742
00743 avctx->delay = 0;
00744 s->low_delay = 1;
00745 break;
00746 case CODEC_ID_FLV1:
00747 s->out_format = FMT_H263;
00748 s->h263_flv = 2;
00749 s->unrestricted_mv = 1;
00750 s->rtp_mode = 0;
00751 avctx->delay = 0;
00752 s->low_delay = 1;
00753 break;
00754 case CODEC_ID_RV10:
00755 s->out_format = FMT_H263;
00756 avctx->delay = 0;
00757 s->low_delay = 1;
00758 break;
00759 case CODEC_ID_RV20:
00760 s->out_format = FMT_H263;
00761 avctx->delay = 0;
00762 s->low_delay = 1;
00763 s->modified_quant = 1;
00764 s->h263_aic = 1;
00765 s->h263_plus = 1;
00766 s->loop_filter = 1;
00767 s->unrestricted_mv = 0;
00768 break;
00769 case CODEC_ID_MPEG4:
00770 s->out_format = FMT_H263;
00771 s->h263_pred = 1;
00772 s->unrestricted_mv = 1;
00773 s->low_delay = s->max_b_frames ? 0 : 1;
00774 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00775 break;
00776 case CODEC_ID_MSMPEG4V2:
00777 s->out_format = FMT_H263;
00778 s->h263_pred = 1;
00779 s->unrestricted_mv = 1;
00780 s->msmpeg4_version = 2;
00781 avctx->delay = 0;
00782 s->low_delay = 1;
00783 break;
00784 case CODEC_ID_MSMPEG4V3:
00785 s->out_format = FMT_H263;
00786 s->h263_pred = 1;
00787 s->unrestricted_mv = 1;
00788 s->msmpeg4_version = 3;
00789 s->flipflop_rounding = 1;
00790 avctx->delay = 0;
00791 s->low_delay = 1;
00792 break;
00793 case CODEC_ID_WMV1:
00794 s->out_format = FMT_H263;
00795 s->h263_pred = 1;
00796 s->unrestricted_mv = 1;
00797 s->msmpeg4_version = 4;
00798 s->flipflop_rounding = 1;
00799 avctx->delay = 0;
00800 s->low_delay = 1;
00801 break;
00802 case CODEC_ID_WMV2:
00803 s->out_format = FMT_H263;
00804 s->h263_pred = 1;
00805 s->unrestricted_mv = 1;
00806 s->msmpeg4_version = 5;
00807 s->flipflop_rounding = 1;
00808 avctx->delay = 0;
00809 s->low_delay = 1;
00810 break;
00811 default:
00812 return -1;
00813 }
00814
00815 avctx->has_b_frames = !s->low_delay;
00816
00817 s->encoding = 1;
00818
00819 s->progressive_frame =
00820 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
00821 CODEC_FLAG_INTERLACED_ME) ||
00822 s->alternate_scan);
00823
00824
00825 if (MPV_common_init(s) < 0)
00826 return -1;
00827
00828 if (!s->dct_quantize)
00829 s->dct_quantize = dct_quantize_c;
00830 if (!s->denoise_dct)
00831 s->denoise_dct = denoise_dct_c;
00832 s->fast_dct_quantize = s->dct_quantize;
00833 if (avctx->trellis)
00834 s->dct_quantize = dct_quantize_trellis_c;
00835
00836 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
00837 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
00838
00839 s->quant_precision = 5;
00840
00841 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
00842 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
00843
00844 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
00845 ff_h261_encode_init(s);
00846 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
00847 ff_h263_encode_init(s);
00848 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
00849 ff_msmpeg4_encode_init(s);
00850 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
00851 && s->out_format == FMT_MPEG1)
00852 ff_mpeg1_encode_init(s);
00853
00854
00855 for (i = 0; i < 64; i++) {
00856 int j = s->dsp.idct_permutation[i];
00857 if (CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4 &&
00858 s->mpeg_quant) {
00859 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
00860 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
00861 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
00862 s->intra_matrix[j] =
00863 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
00864 } else {
00865
00866 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
00867 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
00868 }
00869 if (s->avctx->intra_matrix)
00870 s->intra_matrix[j] = s->avctx->intra_matrix[i];
00871 if (s->avctx->inter_matrix)
00872 s->inter_matrix[j] = s->avctx->inter_matrix[i];
00873 }
00874
00875
00876
00877 if (s->out_format != FMT_MJPEG) {
00878 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
00879 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
00880 31, 1);
00881 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
00882 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
00883 31, 0);
00884 }
00885
00886 if (ff_rate_control_init(s) < 0)
00887 return -1;
00888
00889 return 0;
00890 }
00891
00892 av_cold int MPV_encode_end(AVCodecContext *avctx)
00893 {
00894 MpegEncContext *s = avctx->priv_data;
00895
00896 ff_rate_control_uninit(s);
00897
00898 MPV_common_end(s);
00899 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
00900 s->out_format == FMT_MJPEG)
00901 ff_mjpeg_encode_close(s);
00902
00903 av_freep(&avctx->extradata);
00904
00905 return 0;
00906 }
00907
00908 static int get_sae(uint8_t *src, int ref, int stride)
00909 {
00910 int x,y;
00911 int acc = 0;
00912
00913 for (y = 0; y < 16; y++) {
00914 for (x = 0; x < 16; x++) {
00915 acc += FFABS(src[x + y * stride] - ref);
00916 }
00917 }
00918
00919 return acc;
00920 }
00921
00922 static int get_intra_count(MpegEncContext *s, uint8_t *src,
00923 uint8_t *ref, int stride)
00924 {
00925 int x, y, w, h;
00926 int acc = 0;
00927
00928 w = s->width & ~15;
00929 h = s->height & ~15;
00930
00931 for (y = 0; y < h; y += 16) {
00932 for (x = 0; x < w; x += 16) {
00933 int offset = x + y * stride;
00934 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
00935 16);
00936 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
00937 int sae = get_sae(src + offset, mean, stride);
00938
00939 acc += sae + 500 < sad;
00940 }
00941 }
00942 return acc;
00943 }
00944
00945
00946 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
00947 {
00948 AVFrame *pic = NULL;
00949 int64_t pts;
00950 int i;
00951 const int encoding_delay = s->max_b_frames;
00952 int direct = 1;
00953
00954 if (pic_arg) {
00955 pts = pic_arg->pts;
00956 pic_arg->display_picture_number = s->input_picture_number++;
00957
00958 if (pts != AV_NOPTS_VALUE) {
00959 if (s->user_specified_pts != AV_NOPTS_VALUE) {
00960 int64_t time = pts;
00961 int64_t last = s->user_specified_pts;
00962
00963 if (time <= last) {
00964 av_log(s->avctx, AV_LOG_ERROR,
00965 "Error, Invalid timestamp=%"PRId64", "
00966 "last=%"PRId64"\n", pts, s->user_specified_pts);
00967 return -1;
00968 }
00969 }
00970 s->user_specified_pts = pts;
00971 } else {
00972 if (s->user_specified_pts != AV_NOPTS_VALUE) {
00973 s->user_specified_pts =
00974 pts = s->user_specified_pts + 1;
00975 av_log(s->avctx, AV_LOG_INFO,
00976 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
00977 pts);
00978 } else {
00979 pts = pic_arg->display_picture_number;
00980 }
00981 }
00982 }
00983
00984 if (pic_arg) {
00985 if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
00986 direct = 0;
00987 if (pic_arg->linesize[0] != s->linesize)
00988 direct = 0;
00989 if (pic_arg->linesize[1] != s->uvlinesize)
00990 direct = 0;
00991 if (pic_arg->linesize[2] != s->uvlinesize)
00992 direct = 0;
00993
00994
00995
00996
00997 if (direct) {
00998 i = ff_find_unused_picture(s, 1);
00999 if (i < 0)
01000 return i;
01001
01002 pic = (AVFrame *) &s->picture[i];
01003 pic->reference = 3;
01004
01005 for (i = 0; i < 4; i++) {
01006 pic->data[i] = pic_arg->data[i];
01007 pic->linesize[i] = pic_arg->linesize[i];
01008 }
01009 if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
01010 return -1;
01011 }
01012 } else {
01013 i = ff_find_unused_picture(s, 0);
01014 if (i < 0)
01015 return i;
01016
01017 pic = (AVFrame *) &s->picture[i];
01018 pic->reference = 3;
01019
01020 if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
01021 return -1;
01022 }
01023
01024 if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
01025 pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
01026 pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
01027
01028 } else {
01029 int h_chroma_shift, v_chroma_shift;
01030 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift,
01031 &v_chroma_shift);
01032
01033 for (i = 0; i < 3; i++) {
01034 int src_stride = pic_arg->linesize[i];
01035 int dst_stride = i ? s->uvlinesize : s->linesize;
01036 int h_shift = i ? h_chroma_shift : 0;
01037 int v_shift = i ? v_chroma_shift : 0;
01038 int w = s->width >> h_shift;
01039 int h = s->height >> v_shift;
01040 uint8_t *src = pic_arg->data[i];
01041 uint8_t *dst = pic->data[i];
01042
01043 if (!s->avctx->rc_buffer_size)
01044 dst += INPLACE_OFFSET;
01045
01046 if (src_stride == dst_stride)
01047 memcpy(dst, src, src_stride * h);
01048 else {
01049 while (h--) {
01050 memcpy(dst, src, w);
01051 dst += dst_stride;
01052 src += src_stride;
01053 }
01054 }
01055 }
01056 }
01057 }
01058 copy_picture_attributes(s, pic, pic_arg);
01059 pic->pts = pts;
01060 }
01061
01062
01063 for (i = 1; i < MAX_PICTURE_COUNT ; i++)
01064 s->input_picture[i - 1] = s->input_picture[i];
01065
01066 s->input_picture[encoding_delay] = (Picture*) pic;
01067
01068 return 0;
01069 }
01070
01071 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
01072 {
01073 int x, y, plane;
01074 int score = 0;
01075 int64_t score64 = 0;
01076
01077 for (plane = 0; plane < 3; plane++) {
01078 const int stride = p->f.linesize[plane];
01079 const int bw = plane ? 1 : 2;
01080 for (y = 0; y < s->mb_height * bw; y++) {
01081 for (x = 0; x < s->mb_width * bw; x++) {
01082 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
01083 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
01084 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
01085 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
01086
01087 switch (s->avctx->frame_skip_exp) {
01088 case 0: score = FFMAX(score, v); break;
01089 case 1: score += FFABS(v); break;
01090 case 2: score += v * v; break;
01091 case 3: score64 += FFABS(v * v * (int64_t)v); break;
01092 case 4: score64 += v * v * (int64_t)(v * v); break;
01093 }
01094 }
01095 }
01096 }
01097
01098 if (score)
01099 score64 = score;
01100
01101 if (score64 < s->avctx->frame_skip_threshold)
01102 return 1;
01103 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
01104 return 1;
01105 return 0;
01106 }
01107
01108 static int estimate_best_b_count(MpegEncContext *s)
01109 {
01110 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
01111 AVCodecContext *c = avcodec_alloc_context3(NULL);
01112 AVFrame input[FF_MAX_B_FRAMES + 2];
01113 const int scale = s->avctx->brd_scale;
01114 int i, j, out_size, p_lambda, b_lambda, lambda2;
01115 int outbuf_size = s->width * s->height;
01116 uint8_t *outbuf = av_malloc(outbuf_size);
01117 int64_t best_rd = INT64_MAX;
01118 int best_b_count = -1;
01119
01120 assert(scale >= 0 && scale <= 3);
01121
01122
01123
01124 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
01125
01126 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
01127 if (!b_lambda)
01128 b_lambda = p_lambda;
01129 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
01130 FF_LAMBDA_SHIFT;
01131
01132 c->width = s->width >> scale;
01133 c->height = s->height >> scale;
01134 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
01135 CODEC_FLAG_INPUT_PRESERVED ;
01136 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
01137 c->mb_decision = s->avctx->mb_decision;
01138 c->me_cmp = s->avctx->me_cmp;
01139 c->mb_cmp = s->avctx->mb_cmp;
01140 c->me_sub_cmp = s->avctx->me_sub_cmp;
01141 c->pix_fmt = PIX_FMT_YUV420P;
01142 c->time_base = s->avctx->time_base;
01143 c->max_b_frames = s->max_b_frames;
01144
01145 if (avcodec_open2(c, codec, NULL) < 0)
01146 return -1;
01147
01148 for (i = 0; i < s->max_b_frames + 2; i++) {
01149 int ysize = c->width * c->height;
01150 int csize = (c->width / 2) * (c->height / 2);
01151 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
01152 s->next_picture_ptr;
01153
01154 avcodec_get_frame_defaults(&input[i]);
01155 input[i].data[0] = av_malloc(ysize + 2 * csize);
01156 input[i].data[1] = input[i].data[0] + ysize;
01157 input[i].data[2] = input[i].data[1] + csize;
01158 input[i].linesize[0] = c->width;
01159 input[i].linesize[1] =
01160 input[i].linesize[2] = c->width / 2;
01161
01162 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
01163 pre_input = *pre_input_ptr;
01164
01165 if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
01166 pre_input.f.data[0] += INPLACE_OFFSET;
01167 pre_input.f.data[1] += INPLACE_OFFSET;
01168 pre_input.f.data[2] += INPLACE_OFFSET;
01169 }
01170
01171 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
01172 pre_input.f.data[0], pre_input.f.linesize[0],
01173 c->width, c->height);
01174 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
01175 pre_input.f.data[1], pre_input.f.linesize[1],
01176 c->width >> 1, c->height >> 1);
01177 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
01178 pre_input.f.data[2], pre_input.f.linesize[2],
01179 c->width >> 1, c->height >> 1);
01180 }
01181 }
01182
01183 for (j = 0; j < s->max_b_frames + 1; j++) {
01184 int64_t rd = 0;
01185
01186 if (!s->input_picture[j])
01187 break;
01188
01189 c->error[0] = c->error[1] = c->error[2] = 0;
01190
01191 input[0].pict_type = AV_PICTURE_TYPE_I;
01192 input[0].quality = 1 * FF_QP2LAMBDA;
01193 out_size = avcodec_encode_video(c, outbuf,
01194 outbuf_size, &input[0]);
01195
01196
01197 for (i = 0; i < s->max_b_frames + 1; i++) {
01198 int is_p = i % (j + 1) == j || i == s->max_b_frames;
01199
01200 input[i + 1].pict_type = is_p ?
01201 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
01202 input[i + 1].quality = is_p ? p_lambda : b_lambda;
01203 out_size = avcodec_encode_video(c, outbuf, outbuf_size,
01204 &input[i + 1]);
01205 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
01206 }
01207
01208
01209 while (out_size) {
01210 out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
01211 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
01212 }
01213
01214 rd += c->error[0] + c->error[1] + c->error[2];
01215
01216 if (rd < best_rd) {
01217 best_rd = rd;
01218 best_b_count = j;
01219 }
01220 }
01221
01222 av_freep(&outbuf);
01223 avcodec_close(c);
01224 av_freep(&c);
01225
01226 for (i = 0; i < s->max_b_frames + 2; i++) {
01227 av_freep(&input[i].data[0]);
01228 }
01229
01230 return best_b_count;
01231 }
01232
01233 static int select_input_picture(MpegEncContext *s)
01234 {
01235 int i;
01236
01237 for (i = 1; i < MAX_PICTURE_COUNT; i++)
01238 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
01239 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
01240
01241
01242 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
01243 if (
01244 s->next_picture_ptr == NULL || s->intra_only) {
01245 s->reordered_input_picture[0] = s->input_picture[0];
01246 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
01247 s->reordered_input_picture[0]->f.coded_picture_number =
01248 s->coded_picture_number++;
01249 } else {
01250 int b_frames;
01251
01252 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
01253 if (s->picture_in_gop_number < s->gop_size &&
01254 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
01255
01256
01257
01258
01259
01260 if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
01261 for (i = 0; i < 4; i++)
01262 s->input_picture[0]->f.data[i] = NULL;
01263 s->input_picture[0]->f.type = 0;
01264 } else {
01265 assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
01266 s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
01267
01268 s->avctx->release_buffer(s->avctx,
01269 (AVFrame *) s->input_picture[0]);
01270 }
01271
01272 emms_c();
01273 ff_vbv_update(s, 0);
01274
01275 goto no_output_pic;
01276 }
01277 }
01278
01279 if (s->flags & CODEC_FLAG_PASS2) {
01280 for (i = 0; i < s->max_b_frames + 1; i++) {
01281 int pict_num = s->input_picture[0]->f.display_picture_number + i;
01282
01283 if (pict_num >= s->rc_context.num_entries)
01284 break;
01285 if (!s->input_picture[i]) {
01286 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
01287 break;
01288 }
01289
01290 s->input_picture[i]->f.pict_type =
01291 s->rc_context.entry[pict_num].new_pict_type;
01292 }
01293 }
01294
01295 if (s->avctx->b_frame_strategy == 0) {
01296 b_frames = s->max_b_frames;
01297 while (b_frames && !s->input_picture[b_frames])
01298 b_frames--;
01299 } else if (s->avctx->b_frame_strategy == 1) {
01300 for (i = 1; i < s->max_b_frames + 1; i++) {
01301 if (s->input_picture[i] &&
01302 s->input_picture[i]->b_frame_score == 0) {
01303 s->input_picture[i]->b_frame_score =
01304 get_intra_count(s,
01305 s->input_picture[i ]->f.data[0],
01306 s->input_picture[i - 1]->f.data[0],
01307 s->linesize) + 1;
01308 }
01309 }
01310 for (i = 0; i < s->max_b_frames + 1; i++) {
01311 if (s->input_picture[i] == NULL ||
01312 s->input_picture[i]->b_frame_score - 1 >
01313 s->mb_num / s->avctx->b_sensitivity)
01314 break;
01315 }
01316
01317 b_frames = FFMAX(0, i - 1);
01318
01319
01320 for (i = 0; i < b_frames + 1; i++) {
01321 s->input_picture[i]->b_frame_score = 0;
01322 }
01323 } else if (s->avctx->b_frame_strategy == 2) {
01324 b_frames = estimate_best_b_count(s);
01325 } else {
01326 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
01327 b_frames = 0;
01328 }
01329
01330 emms_c();
01331
01332
01333
01334
01335 for (i = b_frames - 1; i >= 0; i--) {
01336 int type = s->input_picture[i]->f.pict_type;
01337 if (type && type != AV_PICTURE_TYPE_B)
01338 b_frames = i;
01339 }
01340 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
01341 b_frames == s->max_b_frames) {
01342 av_log(s->avctx, AV_LOG_ERROR,
01343 "warning, too many b frames in a row\n");
01344 }
01345
01346 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
01347 if ((s->flags2 & CODEC_FLAG2_STRICT_GOP) &&
01348 s->gop_size > s->picture_in_gop_number) {
01349 b_frames = s->gop_size - s->picture_in_gop_number - 1;
01350 } else {
01351 if (s->flags & CODEC_FLAG_CLOSED_GOP)
01352 b_frames = 0;
01353 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
01354 }
01355 }
01356
01357 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
01358 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
01359 b_frames--;
01360
01361 s->reordered_input_picture[0] = s->input_picture[b_frames];
01362 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
01363 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
01364 s->reordered_input_picture[0]->f.coded_picture_number =
01365 s->coded_picture_number++;
01366 for (i = 0; i < b_frames; i++) {
01367 s->reordered_input_picture[i + 1] = s->input_picture[i];
01368 s->reordered_input_picture[i + 1]->f.pict_type =
01369 AV_PICTURE_TYPE_B;
01370 s->reordered_input_picture[i + 1]->f.coded_picture_number =
01371 s->coded_picture_number++;
01372 }
01373 }
01374 }
01375 no_output_pic:
01376 if (s->reordered_input_picture[0]) {
01377 s->reordered_input_picture[0]->f.reference =
01378 s->reordered_input_picture[0]->f.pict_type !=
01379 AV_PICTURE_TYPE_B ? 3 : 0;
01380
01381 ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
01382
01383 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
01384 s->avctx->rc_buffer_size) {
01385
01386
01387
01388 Picture *pic;
01389 int i = ff_find_unused_picture(s, 0);
01390 if (i < 0)
01391 return i;
01392 pic = &s->picture[i];
01393
01394 pic->f.reference = s->reordered_input_picture[0]->f.reference;
01395 if (ff_alloc_picture(s, pic, 0) < 0) {
01396 return -1;
01397 }
01398
01399
01400 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
01401 s->avctx->release_buffer(s->avctx,
01402 (AVFrame *) s->reordered_input_picture[0]);
01403 for (i = 0; i < 4; i++)
01404 s->reordered_input_picture[0]->f.data[i] = NULL;
01405 s->reordered_input_picture[0]->f.type = 0;
01406
01407 copy_picture_attributes(s, (AVFrame *) pic,
01408 (AVFrame *) s->reordered_input_picture[0]);
01409
01410 s->current_picture_ptr = pic;
01411 } else {
01412
01413
01414 assert(s->reordered_input_picture[0]->f.type ==
01415 FF_BUFFER_TYPE_USER ||
01416 s->reordered_input_picture[0]->f.type ==
01417 FF_BUFFER_TYPE_INTERNAL);
01418
01419 s->current_picture_ptr = s->reordered_input_picture[0];
01420 for (i = 0; i < 4; i++) {
01421 s->new_picture.f.data[i] += INPLACE_OFFSET;
01422 }
01423 }
01424 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01425
01426 s->picture_number = s->new_picture.f.display_picture_number;
01427
01428 } else {
01429 memset(&s->new_picture, 0, sizeof(Picture));
01430 }
01431 return 0;
01432 }
01433
01434 int MPV_encode_picture(AVCodecContext *avctx,
01435 unsigned char *buf, int buf_size, void *data)
01436 {
01437 MpegEncContext *s = avctx->priv_data;
01438 AVFrame *pic_arg = data;
01439 int i, stuffing_count;
01440 int context_count = s->slice_context_count;
01441
01442 for (i = 0; i < context_count; i++) {
01443 int start_y = s->thread_context[i]->start_mb_y;
01444 int end_y = s->thread_context[i]-> end_mb_y;
01445 int h = s->mb_height;
01446 uint8_t *start = buf + (size_t)(((int64_t) buf_size) * start_y / h);
01447 uint8_t *end = buf + (size_t)(((int64_t) buf_size) * end_y / h);
01448
01449 init_put_bits(&s->thread_context[i]->pb, start, end - start);
01450 }
01451
01452 s->picture_in_gop_number++;
01453
01454 if (load_input_picture(s, pic_arg) < 0)
01455 return -1;
01456
01457 if (select_input_picture(s) < 0) {
01458 return -1;
01459 }
01460
01461
01462 if (s->new_picture.f.data[0]) {
01463 s->pict_type = s->new_picture.f.pict_type;
01464
01465
01466
01467 MPV_frame_start(s, avctx);
01468 vbv_retry:
01469 if (encode_picture(s, s->picture_number) < 0)
01470 return -1;
01471
01472 avctx->header_bits = s->header_bits;
01473 avctx->mv_bits = s->mv_bits;
01474 avctx->misc_bits = s->misc_bits;
01475 avctx->i_tex_bits = s->i_tex_bits;
01476 avctx->p_tex_bits = s->p_tex_bits;
01477 avctx->i_count = s->i_count;
01478
01479 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
01480 avctx->skip_count = s->skip_count;
01481
01482 MPV_frame_end(s);
01483
01484 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
01485 ff_mjpeg_encode_picture_trailer(s);
01486
01487 if (avctx->rc_buffer_size) {
01488 RateControlContext *rcc = &s->rc_context;
01489 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
01490
01491 if (put_bits_count(&s->pb) > max_size &&
01492 s->lambda < s->avctx->lmax) {
01493 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
01494 (s->qscale + 1) / s->qscale);
01495 if (s->adaptive_quant) {
01496 int i;
01497 for (i = 0; i < s->mb_height * s->mb_stride; i++)
01498 s->lambda_table[i] =
01499 FFMAX(s->lambda_table[i] + 1,
01500 s->lambda_table[i] * (s->qscale + 1) /
01501 s->qscale);
01502 }
01503 s->mb_skipped = 0;
01504
01505 if (s->pict_type == AV_PICTURE_TYPE_P) {
01506 if (s->flipflop_rounding ||
01507 s->codec_id == CODEC_ID_H263P ||
01508 s->codec_id == CODEC_ID_MPEG4)
01509 s->no_rounding ^= 1;
01510 }
01511 if (s->pict_type != AV_PICTURE_TYPE_B) {
01512 s->time_base = s->last_time_base;
01513 s->last_non_b_time = s->time - s->pp_time;
01514 }
01515
01516 for (i = 0; i < context_count; i++) {
01517 PutBitContext *pb = &s->thread_context[i]->pb;
01518 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
01519 }
01520 goto vbv_retry;
01521 }
01522
01523 assert(s->avctx->rc_max_rate);
01524 }
01525
01526 if (s->flags & CODEC_FLAG_PASS1)
01527 ff_write_pass1_stats(s);
01528
01529 for (i = 0; i < 4; i++) {
01530 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
01531 avctx->error[i] += s->current_picture_ptr->f.error[i];
01532 }
01533
01534 if (s->flags & CODEC_FLAG_PASS1)
01535 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
01536 avctx->i_tex_bits + avctx->p_tex_bits ==
01537 put_bits_count(&s->pb));
01538 flush_put_bits(&s->pb);
01539 s->frame_bits = put_bits_count(&s->pb);
01540
01541 stuffing_count = ff_vbv_update(s, s->frame_bits);
01542 if (stuffing_count) {
01543 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
01544 stuffing_count + 50) {
01545 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
01546 return -1;
01547 }
01548
01549 switch (s->codec_id) {
01550 case CODEC_ID_MPEG1VIDEO:
01551 case CODEC_ID_MPEG2VIDEO:
01552 while (stuffing_count--) {
01553 put_bits(&s->pb, 8, 0);
01554 }
01555 break;
01556 case CODEC_ID_MPEG4:
01557 put_bits(&s->pb, 16, 0);
01558 put_bits(&s->pb, 16, 0x1C3);
01559 stuffing_count -= 4;
01560 while (stuffing_count--) {
01561 put_bits(&s->pb, 8, 0xFF);
01562 }
01563 break;
01564 default:
01565 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
01566 }
01567 flush_put_bits(&s->pb);
01568 s->frame_bits = put_bits_count(&s->pb);
01569 }
01570
01571
01572 if (s->avctx->rc_max_rate &&
01573 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
01574 s->out_format == FMT_MPEG1 &&
01575 90000LL * (avctx->rc_buffer_size - 1) <=
01576 s->avctx->rc_max_rate * 0xFFFFLL) {
01577 int vbv_delay, min_delay;
01578 double inbits = s->avctx->rc_max_rate *
01579 av_q2d(s->avctx->time_base);
01580 int minbits = s->frame_bits - 8 *
01581 (s->vbv_delay_ptr - s->pb.buf - 1);
01582 double bits = s->rc_context.buffer_index + minbits - inbits;
01583
01584 if (bits < 0)
01585 av_log(s->avctx, AV_LOG_ERROR,
01586 "Internal error, negative bits\n");
01587
01588 assert(s->repeat_first_field == 0);
01589
01590 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
01591 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
01592 s->avctx->rc_max_rate;
01593
01594 vbv_delay = FFMAX(vbv_delay, min_delay);
01595
01596 assert(vbv_delay < 0xFFFF);
01597
01598 s->vbv_delay_ptr[0] &= 0xF8;
01599 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
01600 s->vbv_delay_ptr[1] = vbv_delay >> 5;
01601 s->vbv_delay_ptr[2] &= 0x07;
01602 s->vbv_delay_ptr[2] |= vbv_delay << 3;
01603 avctx->vbv_delay = vbv_delay * 300;
01604 }
01605 s->total_bits += s->frame_bits;
01606 avctx->frame_bits = s->frame_bits;
01607 } else {
01608 assert((put_bits_ptr(&s->pb) == s->pb.buf));
01609 s->frame_bits = 0;
01610 }
01611 assert((s->frame_bits & 7) == 0);
01612
01613 return s->frame_bits / 8;
01614 }
01615
01616 static inline void dct_single_coeff_elimination(MpegEncContext *s,
01617 int n, int threshold)
01618 {
01619 static const char tab[64] = {
01620 3, 2, 2, 1, 1, 1, 1, 1,
01621 1, 1, 1, 1, 1, 1, 1, 1,
01622 1, 1, 1, 1, 1, 1, 1, 1,
01623 0, 0, 0, 0, 0, 0, 0, 0,
01624 0, 0, 0, 0, 0, 0, 0, 0,
01625 0, 0, 0, 0, 0, 0, 0, 0,
01626 0, 0, 0, 0, 0, 0, 0, 0,
01627 0, 0, 0, 0, 0, 0, 0, 0
01628 };
01629 int score = 0;
01630 int run = 0;
01631 int i;
01632 DCTELEM *block = s->block[n];
01633 const int last_index = s->block_last_index[n];
01634 int skip_dc;
01635
01636 if (threshold < 0) {
01637 skip_dc = 0;
01638 threshold = -threshold;
01639 } else
01640 skip_dc = 1;
01641
01642
01643 if (last_index <= skip_dc - 1)
01644 return;
01645
01646 for (i = 0; i <= last_index; i++) {
01647 const int j = s->intra_scantable.permutated[i];
01648 const int level = FFABS(block[j]);
01649 if (level == 1) {
01650 if (skip_dc && i == 0)
01651 continue;
01652 score += tab[run];
01653 run = 0;
01654 } else if (level > 1) {
01655 return;
01656 } else {
01657 run++;
01658 }
01659 }
01660 if (score >= threshold)
01661 return;
01662 for (i = skip_dc; i <= last_index; i++) {
01663 const int j = s->intra_scantable.permutated[i];
01664 block[j] = 0;
01665 }
01666 if (block[0])
01667 s->block_last_index[n] = 0;
01668 else
01669 s->block_last_index[n] = -1;
01670 }
01671
01672 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
01673 int last_index)
01674 {
01675 int i;
01676 const int maxlevel = s->max_qcoeff;
01677 const int minlevel = s->min_qcoeff;
01678 int overflow = 0;
01679
01680 if (s->mb_intra) {
01681 i = 1;
01682 } else
01683 i = 0;
01684
01685 for (; i <= last_index; i++) {
01686 const int j = s->intra_scantable.permutated[i];
01687 int level = block[j];
01688
01689 if (level > maxlevel) {
01690 level = maxlevel;
01691 overflow++;
01692 } else if (level < minlevel) {
01693 level = minlevel;
01694 overflow++;
01695 }
01696
01697 block[j] = level;
01698 }
01699
01700 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
01701 av_log(s->avctx, AV_LOG_INFO,
01702 "warning, clipping %d dct coefficients to %d..%d\n",
01703 overflow, minlevel, maxlevel);
01704 }
01705
01706 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
01707 {
01708 int x, y;
01709
01710 for (y = 0; y < 8; y++) {
01711 for (x = 0; x < 8; x++) {
01712 int x2, y2;
01713 int sum = 0;
01714 int sqr = 0;
01715 int count = 0;
01716
01717 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
01718 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
01719 int v = ptr[x2 + y2 * stride];
01720 sum += v;
01721 sqr += v * v;
01722 count++;
01723 }
01724 }
01725 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
01726 }
01727 }
01728 }
01729
01730 static av_always_inline void encode_mb_internal(MpegEncContext *s,
01731 int motion_x, int motion_y,
01732 int mb_block_height,
01733 int mb_block_count)
01734 {
01735 int16_t weight[8][64];
01736 DCTELEM orig[8][64];
01737 const int mb_x = s->mb_x;
01738 const int mb_y = s->mb_y;
01739 int i;
01740 int skip_dct[8];
01741 int dct_offset = s->linesize * 8;
01742 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01743 int wrap_y, wrap_c;
01744
01745 for (i = 0; i < mb_block_count; i++)
01746 skip_dct[i] = s->skipdct;
01747
01748 if (s->adaptive_quant) {
01749 const int last_qp = s->qscale;
01750 const int mb_xy = mb_x + mb_y * s->mb_stride;
01751
01752 s->lambda = s->lambda_table[mb_xy];
01753 update_qscale(s);
01754
01755 if (!(s->flags & CODEC_FLAG_QP_RD)) {
01756 s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
01757 s->dquant = s->qscale - last_qp;
01758
01759 if (s->out_format == FMT_H263) {
01760 s->dquant = av_clip(s->dquant, -2, 2);
01761
01762 if (s->codec_id == CODEC_ID_MPEG4) {
01763 if (!s->mb_intra) {
01764 if (s->pict_type == AV_PICTURE_TYPE_B) {
01765 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
01766 s->dquant = 0;
01767 }
01768 if (s->mv_type == MV_TYPE_8X8)
01769 s->dquant = 0;
01770 }
01771 }
01772 }
01773 }
01774 ff_set_qscale(s, last_qp + s->dquant);
01775 } else if (s->flags & CODEC_FLAG_QP_RD)
01776 ff_set_qscale(s, s->qscale + s->dquant);
01777
01778 wrap_y = s->linesize;
01779 wrap_c = s->uvlinesize;
01780 ptr_y = s->new_picture.f.data[0] +
01781 (mb_y * 16 * wrap_y) + mb_x * 16;
01782 ptr_cb = s->new_picture.f.data[1] +
01783 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
01784 ptr_cr = s->new_picture.f.data[2] +
01785 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
01786
01787 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
01788 uint8_t *ebuf = s->edge_emu_buffer + 32;
01789 s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
01790 mb_y * 16, s->width, s->height);
01791 ptr_y = ebuf;
01792 s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
01793 mb_block_height, mb_x * 8, mb_y * 8,
01794 s->width >> 1, s->height >> 1);
01795 ptr_cb = ebuf + 18 * wrap_y;
01796 s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
01797 mb_block_height, mb_x * 8, mb_y * 8,
01798 s->width >> 1, s->height >> 1);
01799 ptr_cr = ebuf + 18 * wrap_y + 8;
01800 }
01801
01802 if (s->mb_intra) {
01803 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
01804 int progressive_score, interlaced_score;
01805
01806 s->interlaced_dct = 0;
01807 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
01808 NULL, wrap_y, 8) +
01809 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
01810 NULL, wrap_y, 8) - 400;
01811
01812 if (progressive_score > 0) {
01813 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
01814 NULL, wrap_y * 2, 8) +
01815 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
01816 NULL, wrap_y * 2, 8);
01817 if (progressive_score > interlaced_score) {
01818 s->interlaced_dct = 1;
01819
01820 dct_offset = wrap_y;
01821 wrap_y <<= 1;
01822 if (s->chroma_format == CHROMA_422)
01823 wrap_c <<= 1;
01824 }
01825 }
01826 }
01827
01828 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
01829 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
01830 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
01831 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
01832
01833 if (s->flags & CODEC_FLAG_GRAY) {
01834 skip_dct[4] = 1;
01835 skip_dct[5] = 1;
01836 } else {
01837 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
01838 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
01839 if (!s->chroma_y_shift) {
01840 s->dsp.get_pixels(s->block[6],
01841 ptr_cb + (dct_offset >> 1), wrap_c);
01842 s->dsp.get_pixels(s->block[7],
01843 ptr_cr + (dct_offset >> 1), wrap_c);
01844 }
01845 }
01846 } else {
01847 op_pixels_func (*op_pix)[4];
01848 qpel_mc_func (*op_qpix)[16];
01849 uint8_t *dest_y, *dest_cb, *dest_cr;
01850
01851 dest_y = s->dest[0];
01852 dest_cb = s->dest[1];
01853 dest_cr = s->dest[2];
01854
01855 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
01856 op_pix = s->dsp.put_pixels_tab;
01857 op_qpix = s->dsp.put_qpel_pixels_tab;
01858 } else {
01859 op_pix = s->dsp.put_no_rnd_pixels_tab;
01860 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
01861 }
01862
01863 if (s->mv_dir & MV_DIR_FORWARD) {
01864 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data,
01865 op_pix, op_qpix);
01866 op_pix = s->dsp.avg_pixels_tab;
01867 op_qpix = s->dsp.avg_qpel_pixels_tab;
01868 }
01869 if (s->mv_dir & MV_DIR_BACKWARD) {
01870 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data,
01871 op_pix, op_qpix);
01872 }
01873
01874 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
01875 int progressive_score, interlaced_score;
01876
01877 s->interlaced_dct = 0;
01878 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
01879 ptr_y, wrap_y,
01880 8) +
01881 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
01882 ptr_y + wrap_y * 8, wrap_y,
01883 8) - 400;
01884
01885 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
01886 progressive_score -= 400;
01887
01888 if (progressive_score > 0) {
01889 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
01890 ptr_y,
01891 wrap_y * 2, 8) +
01892 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
01893 ptr_y + wrap_y,
01894 wrap_y * 2, 8);
01895
01896 if (progressive_score > interlaced_score) {
01897 s->interlaced_dct = 1;
01898
01899 dct_offset = wrap_y;
01900 wrap_y <<= 1;
01901 if (s->chroma_format == CHROMA_422)
01902 wrap_c <<= 1;
01903 }
01904 }
01905 }
01906
01907 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
01908 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
01909 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
01910 dest_y + dct_offset, wrap_y);
01911 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
01912 dest_y + dct_offset + 8, wrap_y);
01913
01914 if (s->flags & CODEC_FLAG_GRAY) {
01915 skip_dct[4] = 1;
01916 skip_dct[5] = 1;
01917 } else {
01918 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
01919 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
01920 if (!s->chroma_y_shift) {
01921 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
01922 dest_cb + (dct_offset >> 1), wrap_c);
01923 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
01924 dest_cr + (dct_offset >> 1), wrap_c);
01925 }
01926 }
01927
01928 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
01929 2 * s->qscale * s->qscale) {
01930
01931 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
01932 wrap_y, 8) < 20 * s->qscale)
01933 skip_dct[0] = 1;
01934 if (s->dsp.sad[1](NULL, ptr_y + 8,
01935 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
01936 skip_dct[1] = 1;
01937 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
01938 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
01939 skip_dct[2] = 1;
01940 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
01941 dest_y + dct_offset + 8,
01942 wrap_y, 8) < 20 * s->qscale)
01943 skip_dct[3] = 1;
01944 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
01945 wrap_c, 8) < 20 * s->qscale)
01946 skip_dct[4] = 1;
01947 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
01948 wrap_c, 8) < 20 * s->qscale)
01949 skip_dct[5] = 1;
01950 if (!s->chroma_y_shift) {
01951 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
01952 dest_cb + (dct_offset >> 1),
01953 wrap_c, 8) < 20 * s->qscale)
01954 skip_dct[6] = 1;
01955 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
01956 dest_cr + (dct_offset >> 1),
01957 wrap_c, 8) < 20 * s->qscale)
01958 skip_dct[7] = 1;
01959 }
01960 }
01961 }
01962
01963 if (s->avctx->quantizer_noise_shaping) {
01964 if (!skip_dct[0])
01965 get_visual_weight(weight[0], ptr_y , wrap_y);
01966 if (!skip_dct[1])
01967 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
01968 if (!skip_dct[2])
01969 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
01970 if (!skip_dct[3])
01971 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
01972 if (!skip_dct[4])
01973 get_visual_weight(weight[4], ptr_cb , wrap_c);
01974 if (!skip_dct[5])
01975 get_visual_weight(weight[5], ptr_cr , wrap_c);
01976 if (!s->chroma_y_shift) {
01977 if (!skip_dct[6])
01978 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
01979 wrap_c);
01980 if (!skip_dct[7])
01981 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
01982 wrap_c);
01983 }
01984 memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
01985 }
01986
01987
01988 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
01989 {
01990 for (i = 0; i < mb_block_count; i++) {
01991 if (!skip_dct[i]) {
01992 int overflow;
01993 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
01994
01995
01996
01997
01998
01999 if (overflow)
02000 clip_coeffs(s, s->block[i], s->block_last_index[i]);
02001 } else
02002 s->block_last_index[i] = -1;
02003 }
02004 if (s->avctx->quantizer_noise_shaping) {
02005 for (i = 0; i < mb_block_count; i++) {
02006 if (!skip_dct[i]) {
02007 s->block_last_index[i] =
02008 dct_quantize_refine(s, s->block[i], weight[i],
02009 orig[i], i, s->qscale);
02010 }
02011 }
02012 }
02013
02014 if (s->luma_elim_threshold && !s->mb_intra)
02015 for (i = 0; i < 4; i++)
02016 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
02017 if (s->chroma_elim_threshold && !s->mb_intra)
02018 for (i = 4; i < mb_block_count; i++)
02019 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
02020
02021 if (s->flags & CODEC_FLAG_CBP_RD) {
02022 for (i = 0; i < mb_block_count; i++) {
02023 if (s->block_last_index[i] == -1)
02024 s->coded_score[i] = INT_MAX / 256;
02025 }
02026 }
02027 }
02028
02029 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
02030 s->block_last_index[4] =
02031 s->block_last_index[5] = 0;
02032 s->block[4][0] =
02033 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
02034 }
02035
02036
02037 if (s->alternate_scan && s->dct_quantize != dct_quantize_c) {
02038 for (i = 0; i < mb_block_count; i++) {
02039 int j;
02040 if (s->block_last_index[i] > 0) {
02041 for (j = 63; j > 0; j--) {
02042 if (s->block[i][s->intra_scantable.permutated[j]])
02043 break;
02044 }
02045 s->block_last_index[i] = j;
02046 }
02047 }
02048 }
02049
02050
02051 switch(s->codec_id){
02052 case CODEC_ID_MPEG1VIDEO:
02053 case CODEC_ID_MPEG2VIDEO:
02054 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
02055 mpeg1_encode_mb(s, s->block, motion_x, motion_y);
02056 break;
02057 case CODEC_ID_MPEG4:
02058 if (CONFIG_MPEG4_ENCODER)
02059 mpeg4_encode_mb(s, s->block, motion_x, motion_y);
02060 break;
02061 case CODEC_ID_MSMPEG4V2:
02062 case CODEC_ID_MSMPEG4V3:
02063 case CODEC_ID_WMV1:
02064 if (CONFIG_MSMPEG4_ENCODER)
02065 msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
02066 break;
02067 case CODEC_ID_WMV2:
02068 if (CONFIG_WMV2_ENCODER)
02069 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
02070 break;
02071 case CODEC_ID_H261:
02072 if (CONFIG_H261_ENCODER)
02073 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
02074 break;
02075 case CODEC_ID_H263:
02076 case CODEC_ID_H263P:
02077 case CODEC_ID_FLV1:
02078 case CODEC_ID_RV10:
02079 case CODEC_ID_RV20:
02080 if (CONFIG_H263_ENCODER)
02081 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
02082 break;
02083 case CODEC_ID_MJPEG:
02084 if (CONFIG_MJPEG_ENCODER)
02085 ff_mjpeg_encode_mb(s, s->block);
02086 break;
02087 default:
02088 assert(0);
02089 }
02090 }
02091
02092 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
02093 {
02094 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
02095 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
02096 }
02097
02098 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
02099 int i;
02100
02101 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int));
02102
02103
02104 d->mb_skip_run= s->mb_skip_run;
02105 for(i=0; i<3; i++)
02106 d->last_dc[i] = s->last_dc[i];
02107
02108
02109 d->mv_bits= s->mv_bits;
02110 d->i_tex_bits= s->i_tex_bits;
02111 d->p_tex_bits= s->p_tex_bits;
02112 d->i_count= s->i_count;
02113 d->f_count= s->f_count;
02114 d->b_count= s->b_count;
02115 d->skip_count= s->skip_count;
02116 d->misc_bits= s->misc_bits;
02117 d->last_bits= 0;
02118
02119 d->mb_skipped= 0;
02120 d->qscale= s->qscale;
02121 d->dquant= s->dquant;
02122
02123 d->esc3_level_length= s->esc3_level_length;
02124 }
02125
02126 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
02127 int i;
02128
02129 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
02130 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int));
02131
02132
02133 d->mb_skip_run= s->mb_skip_run;
02134 for(i=0; i<3; i++)
02135 d->last_dc[i] = s->last_dc[i];
02136
02137
02138 d->mv_bits= s->mv_bits;
02139 d->i_tex_bits= s->i_tex_bits;
02140 d->p_tex_bits= s->p_tex_bits;
02141 d->i_count= s->i_count;
02142 d->f_count= s->f_count;
02143 d->b_count= s->b_count;
02144 d->skip_count= s->skip_count;
02145 d->misc_bits= s->misc_bits;
02146
02147 d->mb_intra= s->mb_intra;
02148 d->mb_skipped= s->mb_skipped;
02149 d->mv_type= s->mv_type;
02150 d->mv_dir= s->mv_dir;
02151 d->pb= s->pb;
02152 if(s->data_partitioning){
02153 d->pb2= s->pb2;
02154 d->tex_pb= s->tex_pb;
02155 }
02156 d->block= s->block;
02157 for(i=0; i<8; i++)
02158 d->block_last_index[i]= s->block_last_index[i];
02159 d->interlaced_dct= s->interlaced_dct;
02160 d->qscale= s->qscale;
02161
02162 d->esc3_level_length= s->esc3_level_length;
02163 }
02164
02165 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
02166 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
02167 int *dmin, int *next_block, int motion_x, int motion_y)
02168 {
02169 int score;
02170 uint8_t *dest_backup[3];
02171
02172 copy_context_before_encode(s, backup, type);
02173
02174 s->block= s->blocks[*next_block];
02175 s->pb= pb[*next_block];
02176 if(s->data_partitioning){
02177 s->pb2 = pb2 [*next_block];
02178 s->tex_pb= tex_pb[*next_block];
02179 }
02180
02181 if(*next_block){
02182 memcpy(dest_backup, s->dest, sizeof(s->dest));
02183 s->dest[0] = s->rd_scratchpad;
02184 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
02185 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
02186 assert(s->linesize >= 32);
02187 }
02188
02189 encode_mb(s, motion_x, motion_y);
02190
02191 score= put_bits_count(&s->pb);
02192 if(s->data_partitioning){
02193 score+= put_bits_count(&s->pb2);
02194 score+= put_bits_count(&s->tex_pb);
02195 }
02196
02197 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
02198 MPV_decode_mb(s, s->block);
02199
02200 score *= s->lambda2;
02201 score += sse_mb(s) << FF_LAMBDA_SHIFT;
02202 }
02203
02204 if(*next_block){
02205 memcpy(s->dest, dest_backup, sizeof(s->dest));
02206 }
02207
02208 if(score<*dmin){
02209 *dmin= score;
02210 *next_block^=1;
02211
02212 copy_context_after_encode(best, s, type);
02213 }
02214 }
02215
02216 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
02217 uint32_t *sq = ff_squareTbl + 256;
02218 int acc=0;
02219 int x,y;
02220
02221 if(w==16 && h==16)
02222 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
02223 else if(w==8 && h==8)
02224 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
02225
02226 for(y=0; y<h; y++){
02227 for(x=0; x<w; x++){
02228 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
02229 }
02230 }
02231
02232 assert(acc>=0);
02233
02234 return acc;
02235 }
02236
02237 static int sse_mb(MpegEncContext *s){
02238 int w= 16;
02239 int h= 16;
02240
02241 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
02242 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
02243
02244 if(w==16 && h==16)
02245 if(s->avctx->mb_cmp == FF_CMP_NSSE){
02246 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
02247 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
02248 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
02249 }else{
02250 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
02251 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
02252 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
02253 }
02254 else
02255 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
02256 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
02257 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
02258 }
02259
02260 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
02261 MpegEncContext *s= *(void**)arg;
02262
02263
02264 s->me.pre_pass=1;
02265 s->me.dia_size= s->avctx->pre_dia_size;
02266 s->first_slice_line=1;
02267 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
02268 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
02269 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
02270 }
02271 s->first_slice_line=0;
02272 }
02273
02274 s->me.pre_pass=0;
02275
02276 return 0;
02277 }
02278
02279 static int estimate_motion_thread(AVCodecContext *c, void *arg){
02280 MpegEncContext *s= *(void**)arg;
02281
02282 ff_check_alignment();
02283
02284 s->me.dia_size= s->avctx->dia_size;
02285 s->first_slice_line=1;
02286 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
02287 s->mb_x=0;
02288 ff_init_block_index(s);
02289 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
02290 s->block_index[0]+=2;
02291 s->block_index[1]+=2;
02292 s->block_index[2]+=2;
02293 s->block_index[3]+=2;
02294
02295
02296 if(s->pict_type==AV_PICTURE_TYPE_B)
02297 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
02298 else
02299 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
02300 }
02301 s->first_slice_line=0;
02302 }
02303 return 0;
02304 }
02305
02306 static int mb_var_thread(AVCodecContext *c, void *arg){
02307 MpegEncContext *s= *(void**)arg;
02308 int mb_x, mb_y;
02309
02310 ff_check_alignment();
02311
02312 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
02313 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
02314 int xx = mb_x * 16;
02315 int yy = mb_y * 16;
02316 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
02317 int varc;
02318 int sum = s->dsp.pix_sum(pix, s->linesize);
02319
02320 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
02321
02322 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
02323 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
02324 s->me.mb_var_sum_temp += varc;
02325 }
02326 }
02327 return 0;
02328 }
02329
02330 static void write_slice_end(MpegEncContext *s){
02331 if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4){
02332 if(s->partitioned_frame){
02333 ff_mpeg4_merge_partitions(s);
02334 }
02335
02336 ff_mpeg4_stuffing(&s->pb);
02337 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
02338 ff_mjpeg_encode_stuffing(&s->pb);
02339 }
02340
02341 avpriv_align_put_bits(&s->pb);
02342 flush_put_bits(&s->pb);
02343
02344 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
02345 s->misc_bits+= get_bits_diff(s);
02346 }
02347
02348 static int encode_thread(AVCodecContext *c, void *arg){
02349 MpegEncContext *s= *(void**)arg;
02350 int mb_x, mb_y, pdif = 0;
02351 int chr_h= 16>>s->chroma_y_shift;
02352 int i, j;
02353 MpegEncContext best_s, backup_s;
02354 uint8_t bit_buf[2][MAX_MB_BYTES];
02355 uint8_t bit_buf2[2][MAX_MB_BYTES];
02356 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
02357 PutBitContext pb[2], pb2[2], tex_pb[2];
02358
02359
02360 ff_check_alignment();
02361
02362 for(i=0; i<2; i++){
02363 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
02364 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
02365 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
02366 }
02367
02368 s->last_bits= put_bits_count(&s->pb);
02369 s->mv_bits=0;
02370 s->misc_bits=0;
02371 s->i_tex_bits=0;
02372 s->p_tex_bits=0;
02373 s->i_count=0;
02374 s->f_count=0;
02375 s->b_count=0;
02376 s->skip_count=0;
02377
02378 for(i=0; i<3; i++){
02379
02380
02381 s->last_dc[i] = 128 << s->intra_dc_precision;
02382
02383 s->current_picture.f.error[i] = 0;
02384 }
02385 s->mb_skip_run = 0;
02386 memset(s->last_mv, 0, sizeof(s->last_mv));
02387
02388 s->last_mv_dir = 0;
02389
02390 switch(s->codec_id){
02391 case CODEC_ID_H263:
02392 case CODEC_ID_H263P:
02393 case CODEC_ID_FLV1:
02394 if (CONFIG_H263_ENCODER)
02395 s->gob_index = ff_h263_get_gob_height(s);
02396 break;
02397 case CODEC_ID_MPEG4:
02398 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
02399 ff_mpeg4_init_partitions(s);
02400 break;
02401 }
02402
02403 s->resync_mb_x=0;
02404 s->resync_mb_y=0;
02405 s->first_slice_line = 1;
02406 s->ptr_lastgob = s->pb.buf;
02407 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
02408
02409 s->mb_x=0;
02410 s->mb_y= mb_y;
02411
02412 ff_set_qscale(s, s->qscale);
02413 ff_init_block_index(s);
02414
02415 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
02416 int xy= mb_y*s->mb_stride + mb_x;
02417 int mb_type= s->mb_type[xy];
02418
02419 int dmin= INT_MAX;
02420 int dir;
02421
02422 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
02423 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
02424 return -1;
02425 }
02426 if(s->data_partitioning){
02427 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
02428 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
02429 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
02430 return -1;
02431 }
02432 }
02433
02434 s->mb_x = mb_x;
02435 s->mb_y = mb_y;
02436 ff_update_block_index(s);
02437
02438 if(CONFIG_H261_ENCODER && s->codec_id == CODEC_ID_H261){
02439 ff_h261_reorder_mb_index(s);
02440 xy= s->mb_y*s->mb_stride + s->mb_x;
02441 mb_type= s->mb_type[xy];
02442 }
02443
02444
02445 if(s->rtp_mode){
02446 int current_packet_size, is_gob_start;
02447
02448 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
02449
02450 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
02451
02452 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
02453
02454 switch(s->codec_id){
02455 case CODEC_ID_H263:
02456 case CODEC_ID_H263P:
02457 if(!s->h263_slice_structured)
02458 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
02459 break;
02460 case CODEC_ID_MPEG2VIDEO:
02461 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
02462 case CODEC_ID_MPEG1VIDEO:
02463 if(s->mb_skip_run) is_gob_start=0;
02464 break;
02465 }
02466
02467 if(is_gob_start){
02468 if(s->start_mb_y != mb_y || mb_x!=0){
02469 write_slice_end(s);
02470
02471 if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){
02472 ff_mpeg4_init_partitions(s);
02473 }
02474 }
02475
02476 assert((put_bits_count(&s->pb)&7) == 0);
02477 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
02478
02479 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
02480 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
02481 int d= 100 / s->avctx->error_rate;
02482 if(r % d == 0){
02483 current_packet_size=0;
02484 s->pb.buf_ptr= s->ptr_lastgob;
02485 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
02486 }
02487 }
02488
02489 if (s->avctx->rtp_callback){
02490 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
02491 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
02492 }
02493
02494 switch(s->codec_id){
02495 case CODEC_ID_MPEG4:
02496 if (CONFIG_MPEG4_ENCODER) {
02497 ff_mpeg4_encode_video_packet_header(s);
02498 ff_mpeg4_clean_buffers(s);
02499 }
02500 break;
02501 case CODEC_ID_MPEG1VIDEO:
02502 case CODEC_ID_MPEG2VIDEO:
02503 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
02504 ff_mpeg1_encode_slice_header(s);
02505 ff_mpeg1_clean_buffers(s);
02506 }
02507 break;
02508 case CODEC_ID_H263:
02509 case CODEC_ID_H263P:
02510 if (CONFIG_H263_ENCODER)
02511 ff_h263_encode_gob_header(s, mb_y);
02512 break;
02513 }
02514
02515 if(s->flags&CODEC_FLAG_PASS1){
02516 int bits= put_bits_count(&s->pb);
02517 s->misc_bits+= bits - s->last_bits;
02518 s->last_bits= bits;
02519 }
02520
02521 s->ptr_lastgob += current_packet_size;
02522 s->first_slice_line=1;
02523 s->resync_mb_x=mb_x;
02524 s->resync_mb_y=mb_y;
02525 }
02526 }
02527
02528 if( (s->resync_mb_x == s->mb_x)
02529 && s->resync_mb_y+1 == s->mb_y){
02530 s->first_slice_line=0;
02531 }
02532
02533 s->mb_skipped=0;
02534 s->dquant=0;
02535
02536 if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){
02537 int next_block=0;
02538 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
02539
02540 copy_context_before_encode(&backup_s, s, -1);
02541 backup_s.pb= s->pb;
02542 best_s.data_partitioning= s->data_partitioning;
02543 best_s.partitioned_frame= s->partitioned_frame;
02544 if(s->data_partitioning){
02545 backup_s.pb2= s->pb2;
02546 backup_s.tex_pb= s->tex_pb;
02547 }
02548
02549 if(mb_type&CANDIDATE_MB_TYPE_INTER){
02550 s->mv_dir = MV_DIR_FORWARD;
02551 s->mv_type = MV_TYPE_16X16;
02552 s->mb_intra= 0;
02553 s->mv[0][0][0] = s->p_mv_table[xy][0];
02554 s->mv[0][0][1] = s->p_mv_table[xy][1];
02555 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
02556 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02557 }
02558 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
02559 s->mv_dir = MV_DIR_FORWARD;
02560 s->mv_type = MV_TYPE_FIELD;
02561 s->mb_intra= 0;
02562 for(i=0; i<2; i++){
02563 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
02564 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
02565 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
02566 }
02567 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
02568 &dmin, &next_block, 0, 0);
02569 }
02570 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
02571 s->mv_dir = MV_DIR_FORWARD;
02572 s->mv_type = MV_TYPE_16X16;
02573 s->mb_intra= 0;
02574 s->mv[0][0][0] = 0;
02575 s->mv[0][0][1] = 0;
02576 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
02577 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02578 }
02579 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
02580 s->mv_dir = MV_DIR_FORWARD;
02581 s->mv_type = MV_TYPE_8X8;
02582 s->mb_intra= 0;
02583 for(i=0; i<4; i++){
02584 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
02585 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
02586 }
02587 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
02588 &dmin, &next_block, 0, 0);
02589 }
02590 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
02591 s->mv_dir = MV_DIR_FORWARD;
02592 s->mv_type = MV_TYPE_16X16;
02593 s->mb_intra= 0;
02594 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
02595 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
02596 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
02597 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02598 }
02599 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
02600 s->mv_dir = MV_DIR_BACKWARD;
02601 s->mv_type = MV_TYPE_16X16;
02602 s->mb_intra= 0;
02603 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
02604 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
02605 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
02606 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
02607 }
02608 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
02609 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02610 s->mv_type = MV_TYPE_16X16;
02611 s->mb_intra= 0;
02612 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
02613 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
02614 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
02615 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
02616 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
02617 &dmin, &next_block, 0, 0);
02618 }
02619 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
02620 s->mv_dir = MV_DIR_FORWARD;
02621 s->mv_type = MV_TYPE_FIELD;
02622 s->mb_intra= 0;
02623 for(i=0; i<2; i++){
02624 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
02625 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
02626 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
02627 }
02628 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
02629 &dmin, &next_block, 0, 0);
02630 }
02631 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
02632 s->mv_dir = MV_DIR_BACKWARD;
02633 s->mv_type = MV_TYPE_FIELD;
02634 s->mb_intra= 0;
02635 for(i=0; i<2; i++){
02636 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
02637 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
02638 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
02639 }
02640 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
02641 &dmin, &next_block, 0, 0);
02642 }
02643 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
02644 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02645 s->mv_type = MV_TYPE_FIELD;
02646 s->mb_intra= 0;
02647 for(dir=0; dir<2; dir++){
02648 for(i=0; i<2; i++){
02649 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
02650 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
02651 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
02652 }
02653 }
02654 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
02655 &dmin, &next_block, 0, 0);
02656 }
02657 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
02658 s->mv_dir = 0;
02659 s->mv_type = MV_TYPE_16X16;
02660 s->mb_intra= 1;
02661 s->mv[0][0][0] = 0;
02662 s->mv[0][0][1] = 0;
02663 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
02664 &dmin, &next_block, 0, 0);
02665 if(s->h263_pred || s->h263_aic){
02666 if(best_s.mb_intra)
02667 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
02668 else
02669 ff_clean_intra_table_entries(s);
02670 }
02671 }
02672
02673 if((s->flags & CODEC_FLAG_QP_RD) && dmin < INT_MAX){
02674 if(best_s.mv_type==MV_TYPE_16X16){
02675 const int last_qp= backup_s.qscale;
02676 int qpi, qp, dc[6];
02677 DCTELEM ac[6][16];
02678 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
02679 static const int dquant_tab[4]={-1,1,-2,2};
02680
02681 assert(backup_s.dquant == 0);
02682
02683
02684 s->mv_dir= best_s.mv_dir;
02685 s->mv_type = MV_TYPE_16X16;
02686 s->mb_intra= best_s.mb_intra;
02687 s->mv[0][0][0] = best_s.mv[0][0][0];
02688 s->mv[0][0][1] = best_s.mv[0][0][1];
02689 s->mv[1][0][0] = best_s.mv[1][0][0];
02690 s->mv[1][0][1] = best_s.mv[1][0][1];
02691
02692 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
02693 for(; qpi<4; qpi++){
02694 int dquant= dquant_tab[qpi];
02695 qp= last_qp + dquant;
02696 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
02697 continue;
02698 backup_s.dquant= dquant;
02699 if(s->mb_intra && s->dc_val[0]){
02700 for(i=0; i<6; i++){
02701 dc[i]= s->dc_val[0][ s->block_index[i] ];
02702 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
02703 }
02704 }
02705
02706 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
02707 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
02708 if(best_s.qscale != qp){
02709 if(s->mb_intra && s->dc_val[0]){
02710 for(i=0; i<6; i++){
02711 s->dc_val[0][ s->block_index[i] ]= dc[i];
02712 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
02713 }
02714 }
02715 }
02716 }
02717 }
02718 }
02719 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
02720 int mx= s->b_direct_mv_table[xy][0];
02721 int my= s->b_direct_mv_table[xy][1];
02722
02723 backup_s.dquant = 0;
02724 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
02725 s->mb_intra= 0;
02726 ff_mpeg4_set_direct_mv(s, mx, my);
02727 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
02728 &dmin, &next_block, mx, my);
02729 }
02730 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
02731 backup_s.dquant = 0;
02732 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
02733 s->mb_intra= 0;
02734 ff_mpeg4_set_direct_mv(s, 0, 0);
02735 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
02736 &dmin, &next_block, 0, 0);
02737 }
02738 if(!best_s.mb_intra && s->flags2&CODEC_FLAG2_SKIP_RD){
02739 int coded=0;
02740 for(i=0; i<6; i++)
02741 coded |= s->block_last_index[i];
02742 if(coded){
02743 int mx,my;
02744 memcpy(s->mv, best_s.mv, sizeof(s->mv));
02745 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
02746 mx=my=0;
02747 ff_mpeg4_set_direct_mv(s, mx, my);
02748 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
02749 mx= s->mv[1][0][0];
02750 my= s->mv[1][0][1];
02751 }else{
02752 mx= s->mv[0][0][0];
02753 my= s->mv[0][0][1];
02754 }
02755
02756 s->mv_dir= best_s.mv_dir;
02757 s->mv_type = best_s.mv_type;
02758 s->mb_intra= 0;
02759
02760
02761
02762
02763 backup_s.dquant= 0;
02764 s->skipdct=1;
02765 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
02766 &dmin, &next_block, mx, my);
02767 s->skipdct=0;
02768 }
02769 }
02770
02771 s->current_picture.f.qscale_table[xy] = best_s.qscale;
02772
02773 copy_context_after_encode(s, &best_s, -1);
02774
02775 pb_bits_count= put_bits_count(&s->pb);
02776 flush_put_bits(&s->pb);
02777 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
02778 s->pb= backup_s.pb;
02779
02780 if(s->data_partitioning){
02781 pb2_bits_count= put_bits_count(&s->pb2);
02782 flush_put_bits(&s->pb2);
02783 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
02784 s->pb2= backup_s.pb2;
02785
02786 tex_pb_bits_count= put_bits_count(&s->tex_pb);
02787 flush_put_bits(&s->tex_pb);
02788 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
02789 s->tex_pb= backup_s.tex_pb;
02790 }
02791 s->last_bits= put_bits_count(&s->pb);
02792
02793 if (CONFIG_H263_ENCODER &&
02794 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
02795 ff_h263_update_motion_val(s);
02796
02797 if(next_block==0){
02798 s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
02799 s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
02800 s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
02801 }
02802
02803 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
02804 MPV_decode_mb(s, s->block);
02805 } else {
02806 int motion_x = 0, motion_y = 0;
02807 s->mv_type=MV_TYPE_16X16;
02808
02809
02810 switch(mb_type){
02811 case CANDIDATE_MB_TYPE_INTRA:
02812 s->mv_dir = 0;
02813 s->mb_intra= 1;
02814 motion_x= s->mv[0][0][0] = 0;
02815 motion_y= s->mv[0][0][1] = 0;
02816 break;
02817 case CANDIDATE_MB_TYPE_INTER:
02818 s->mv_dir = MV_DIR_FORWARD;
02819 s->mb_intra= 0;
02820 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
02821 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
02822 break;
02823 case CANDIDATE_MB_TYPE_INTER_I:
02824 s->mv_dir = MV_DIR_FORWARD;
02825 s->mv_type = MV_TYPE_FIELD;
02826 s->mb_intra= 0;
02827 for(i=0; i<2; i++){
02828 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
02829 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
02830 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
02831 }
02832 break;
02833 case CANDIDATE_MB_TYPE_INTER4V:
02834 s->mv_dir = MV_DIR_FORWARD;
02835 s->mv_type = MV_TYPE_8X8;
02836 s->mb_intra= 0;
02837 for(i=0; i<4; i++){
02838 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
02839 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
02840 }
02841 break;
02842 case CANDIDATE_MB_TYPE_DIRECT:
02843 if (CONFIG_MPEG4_ENCODER) {
02844 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
02845 s->mb_intra= 0;
02846 motion_x=s->b_direct_mv_table[xy][0];
02847 motion_y=s->b_direct_mv_table[xy][1];
02848 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
02849 }
02850 break;
02851 case CANDIDATE_MB_TYPE_DIRECT0:
02852 if (CONFIG_MPEG4_ENCODER) {
02853 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
02854 s->mb_intra= 0;
02855 ff_mpeg4_set_direct_mv(s, 0, 0);
02856 }
02857 break;
02858 case CANDIDATE_MB_TYPE_BIDIR:
02859 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02860 s->mb_intra= 0;
02861 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
02862 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
02863 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
02864 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
02865 break;
02866 case CANDIDATE_MB_TYPE_BACKWARD:
02867 s->mv_dir = MV_DIR_BACKWARD;
02868 s->mb_intra= 0;
02869 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
02870 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
02871 break;
02872 case CANDIDATE_MB_TYPE_FORWARD:
02873 s->mv_dir = MV_DIR_FORWARD;
02874 s->mb_intra= 0;
02875 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
02876 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
02877
02878 break;
02879 case CANDIDATE_MB_TYPE_FORWARD_I:
02880 s->mv_dir = MV_DIR_FORWARD;
02881 s->mv_type = MV_TYPE_FIELD;
02882 s->mb_intra= 0;
02883 for(i=0; i<2; i++){
02884 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
02885 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
02886 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
02887 }
02888 break;
02889 case CANDIDATE_MB_TYPE_BACKWARD_I:
02890 s->mv_dir = MV_DIR_BACKWARD;
02891 s->mv_type = MV_TYPE_FIELD;
02892 s->mb_intra= 0;
02893 for(i=0; i<2; i++){
02894 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
02895 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
02896 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
02897 }
02898 break;
02899 case CANDIDATE_MB_TYPE_BIDIR_I:
02900 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02901 s->mv_type = MV_TYPE_FIELD;
02902 s->mb_intra= 0;
02903 for(dir=0; dir<2; dir++){
02904 for(i=0; i<2; i++){
02905 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
02906 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
02907 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
02908 }
02909 }
02910 break;
02911 default:
02912 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
02913 }
02914
02915 encode_mb(s, motion_x, motion_y);
02916
02917
02918 s->last_mv_dir = s->mv_dir;
02919
02920 if (CONFIG_H263_ENCODER &&
02921 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
02922 ff_h263_update_motion_val(s);
02923
02924 MPV_decode_mb(s, s->block);
02925 }
02926
02927
02928 if(s->mb_intra ){
02929 s->p_mv_table[xy][0]=0;
02930 s->p_mv_table[xy][1]=0;
02931 }
02932
02933 if(s->flags&CODEC_FLAG_PSNR){
02934 int w= 16;
02935 int h= 16;
02936
02937 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
02938 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
02939
02940 s->current_picture.f.error[0] += sse(
02941 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
02942 s->dest[0], w, h, s->linesize);
02943 s->current_picture.f.error[1] += sse(
02944 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
02945 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
02946 s->current_picture.f.error[2] += sse(
02947 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
02948 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
02949 }
02950 if(s->loop_filter){
02951 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
02952 ff_h263_loop_filter(s);
02953 }
02954
02955 }
02956 }
02957
02958
02959 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
02960 msmpeg4_encode_ext_header(s);
02961
02962 write_slice_end(s);
02963
02964
02965 if (s->avctx->rtp_callback) {
02966 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
02967 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
02968
02969 emms_c();
02970 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
02971 }
02972
02973 return 0;
02974 }
02975
02976 #define MERGE(field) dst->field += src->field; src->field=0
02977 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
02978 MERGE(me.scene_change_score);
02979 MERGE(me.mc_mb_var_sum_temp);
02980 MERGE(me.mb_var_sum_temp);
02981 }
02982
02983 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
02984 int i;
02985
02986 MERGE(dct_count[0]);
02987 MERGE(dct_count[1]);
02988 MERGE(mv_bits);
02989 MERGE(i_tex_bits);
02990 MERGE(p_tex_bits);
02991 MERGE(i_count);
02992 MERGE(f_count);
02993 MERGE(b_count);
02994 MERGE(skip_count);
02995 MERGE(misc_bits);
02996 MERGE(error_count);
02997 MERGE(padding_bug_score);
02998 MERGE(current_picture.f.error[0]);
02999 MERGE(current_picture.f.error[1]);
03000 MERGE(current_picture.f.error[2]);
03001
03002 if(dst->avctx->noise_reduction){
03003 for(i=0; i<64; i++){
03004 MERGE(dct_error_sum[0][i]);
03005 MERGE(dct_error_sum[1][i]);
03006 }
03007 }
03008
03009 assert(put_bits_count(&src->pb) % 8 ==0);
03010 assert(put_bits_count(&dst->pb) % 8 ==0);
03011 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
03012 flush_put_bits(&dst->pb);
03013 }
03014
03015 static int estimate_qp(MpegEncContext *s, int dry_run){
03016 if (s->next_lambda){
03017 s->current_picture_ptr->f.quality =
03018 s->current_picture.f.quality = s->next_lambda;
03019 if(!dry_run) s->next_lambda= 0;
03020 } else if (!s->fixed_qscale) {
03021 s->current_picture_ptr->f.quality =
03022 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
03023 if (s->current_picture.f.quality < 0)
03024 return -1;
03025 }
03026
03027 if(s->adaptive_quant){
03028 switch(s->codec_id){
03029 case CODEC_ID_MPEG4:
03030 if (CONFIG_MPEG4_ENCODER)
03031 ff_clean_mpeg4_qscales(s);
03032 break;
03033 case CODEC_ID_H263:
03034 case CODEC_ID_H263P:
03035 case CODEC_ID_FLV1:
03036 if (CONFIG_H263_ENCODER)
03037 ff_clean_h263_qscales(s);
03038 break;
03039 default:
03040 ff_init_qscale_tab(s);
03041 }
03042
03043 s->lambda= s->lambda_table[0];
03044
03045 }else
03046 s->lambda = s->current_picture.f.quality;
03047
03048 update_qscale(s);
03049 return 0;
03050 }
03051
03052
03053 static void set_frame_distances(MpegEncContext * s){
03054 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
03055 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
03056
03057 if(s->pict_type==AV_PICTURE_TYPE_B){
03058 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
03059 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
03060 }else{
03061 s->pp_time= s->time - s->last_non_b_time;
03062 s->last_non_b_time= s->time;
03063 assert(s->picture_number==0 || s->pp_time > 0);
03064 }
03065 }
03066
03067 static int encode_picture(MpegEncContext *s, int picture_number)
03068 {
03069 int i;
03070 int bits;
03071 int context_count = s->slice_context_count;
03072
03073 s->picture_number = picture_number;
03074
03075
03076 s->me.mb_var_sum_temp =
03077 s->me.mc_mb_var_sum_temp = 0;
03078
03079
03080
03081 if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
03082 set_frame_distances(s);
03083 if(CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4)
03084 ff_set_mpeg4_time(s);
03085
03086 s->me.scene_change_score=0;
03087
03088
03089
03090 if(s->pict_type==AV_PICTURE_TYPE_I){
03091 if(s->msmpeg4_version >= 3) s->no_rounding=1;
03092 else s->no_rounding=0;
03093 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
03094 if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
03095 s->no_rounding ^= 1;
03096 }
03097
03098 if(s->flags & CODEC_FLAG_PASS2){
03099 if (estimate_qp(s,1) < 0)
03100 return -1;
03101 ff_get_2pass_fcode(s);
03102 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
03103 if(s->pict_type==AV_PICTURE_TYPE_B)
03104 s->lambda= s->last_lambda_for[s->pict_type];
03105 else
03106 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
03107 update_qscale(s);
03108 }
03109
03110 s->mb_intra=0;
03111 for(i=1; i<context_count; i++){
03112 ff_update_duplicate_context(s->thread_context[i], s);
03113 }
03114
03115 if(ff_init_me(s)<0)
03116 return -1;
03117
03118
03119 if(s->pict_type != AV_PICTURE_TYPE_I){
03120 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
03121 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
03122 if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
03123 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
03124 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03125 }
03126 }
03127
03128 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03129 }else {
03130
03131 for(i=0; i<s->mb_stride*s->mb_height; i++)
03132 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
03133
03134 if(!s->fixed_qscale){
03135
03136 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03137 }
03138 }
03139 for(i=1; i<context_count; i++){
03140 merge_context_after_me(s, s->thread_context[i]);
03141 }
03142 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
03143 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
03144 emms_c();
03145
03146 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
03147 s->pict_type= AV_PICTURE_TYPE_I;
03148 for(i=0; i<s->mb_stride*s->mb_height; i++)
03149 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
03150
03151 }
03152
03153 if(!s->umvplus){
03154 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
03155 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
03156
03157 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03158 int a,b;
03159 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I);
03160 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
03161 s->f_code= FFMAX3(s->f_code, a, b);
03162 }
03163
03164 ff_fix_long_p_mvs(s);
03165 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
03166 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03167 int j;
03168 for(i=0; i<2; i++){
03169 for(j=0; j<2; j++)
03170 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
03171 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
03172 }
03173 }
03174 }
03175
03176 if(s->pict_type==AV_PICTURE_TYPE_B){
03177 int a, b;
03178
03179 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
03180 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
03181 s->f_code = FFMAX(a, b);
03182
03183 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
03184 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
03185 s->b_code = FFMAX(a, b);
03186
03187 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
03188 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
03189 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
03190 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
03191 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03192 int dir, j;
03193 for(dir=0; dir<2; dir++){
03194 for(i=0; i<2; i++){
03195 for(j=0; j<2; j++){
03196 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
03197 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
03198 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
03199 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
03200 }
03201 }
03202 }
03203 }
03204 }
03205 }
03206
03207 if (estimate_qp(s, 0) < 0)
03208 return -1;
03209
03210 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
03211 s->qscale= 3;
03212
03213 if (s->out_format == FMT_MJPEG) {
03214
03215 for(i=1;i<64;i++){
03216 int j= s->dsp.idct_permutation[i];
03217
03218 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
03219 }
03220 s->y_dc_scale_table=
03221 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
03222 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
03223 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
03224 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
03225 s->qscale= 8;
03226 }
03227
03228
03229 s->current_picture_ptr->f.key_frame =
03230 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
03231 s->current_picture_ptr->f.pict_type =
03232 s->current_picture.f.pict_type = s->pict_type;
03233
03234 if (s->current_picture.f.key_frame)
03235 s->picture_in_gop_number=0;
03236
03237 s->last_bits= put_bits_count(&s->pb);
03238 switch(s->out_format) {
03239 case FMT_MJPEG:
03240 if (CONFIG_MJPEG_ENCODER)
03241 ff_mjpeg_encode_picture_header(s);
03242 break;
03243 case FMT_H261:
03244 if (CONFIG_H261_ENCODER)
03245 ff_h261_encode_picture_header(s, picture_number);
03246 break;
03247 case FMT_H263:
03248 if (CONFIG_WMV2_ENCODER && s->codec_id == CODEC_ID_WMV2)
03249 ff_wmv2_encode_picture_header(s, picture_number);
03250 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
03251 msmpeg4_encode_picture_header(s, picture_number);
03252 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
03253 mpeg4_encode_picture_header(s, picture_number);
03254 else if (CONFIG_RV10_ENCODER && s->codec_id == CODEC_ID_RV10)
03255 rv10_encode_picture_header(s, picture_number);
03256 else if (CONFIG_RV20_ENCODER && s->codec_id == CODEC_ID_RV20)
03257 rv20_encode_picture_header(s, picture_number);
03258 else if (CONFIG_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1)
03259 ff_flv_encode_picture_header(s, picture_number);
03260 else if (CONFIG_H263_ENCODER)
03261 ff_h263_encode_picture_header(s, picture_number);
03262 break;
03263 case FMT_MPEG1:
03264 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
03265 mpeg1_encode_picture_header(s, picture_number);
03266 break;
03267 case FMT_H264:
03268 break;
03269 default:
03270 assert(0);
03271 }
03272 bits= put_bits_count(&s->pb);
03273 s->header_bits= bits - s->last_bits;
03274
03275 for(i=1; i<context_count; i++){
03276 update_duplicate_context_after_me(s->thread_context[i], s);
03277 }
03278 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03279 for(i=1; i<context_count; i++){
03280 merge_context_after_encode(s, s->thread_context[i]);
03281 }
03282 emms_c();
03283 return 0;
03284 }
03285
03286 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
03287 const int intra= s->mb_intra;
03288 int i;
03289
03290 s->dct_count[intra]++;
03291
03292 for(i=0; i<64; i++){
03293 int level= block[i];
03294
03295 if(level){
03296 if(level>0){
03297 s->dct_error_sum[intra][i] += level;
03298 level -= s->dct_offset[intra][i];
03299 if(level<0) level=0;
03300 }else{
03301 s->dct_error_sum[intra][i] -= level;
03302 level += s->dct_offset[intra][i];
03303 if(level>0) level=0;
03304 }
03305 block[i]= level;
03306 }
03307 }
03308 }
03309
03310 static int dct_quantize_trellis_c(MpegEncContext *s,
03311 DCTELEM *block, int n,
03312 int qscale, int *overflow){
03313 const int *qmat;
03314 const uint8_t *scantable= s->intra_scantable.scantable;
03315 const uint8_t *perm_scantable= s->intra_scantable.permutated;
03316 int max=0;
03317 unsigned int threshold1, threshold2;
03318 int bias=0;
03319 int run_tab[65];
03320 int level_tab[65];
03321 int score_tab[65];
03322 int survivor[65];
03323 int survivor_count;
03324 int last_run=0;
03325 int last_level=0;
03326 int last_score= 0;
03327 int last_i;
03328 int coeff[2][64];
03329 int coeff_count[64];
03330 int qmul, qadd, start_i, last_non_zero, i, dc;
03331 const int esc_length= s->ac_esc_length;
03332 uint8_t * length;
03333 uint8_t * last_length;
03334 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
03335
03336 s->dsp.fdct (block);
03337
03338 if(s->dct_error_sum)
03339 s->denoise_dct(s, block);
03340 qmul= qscale*16;
03341 qadd= ((qscale-1)|1)*8;
03342
03343 if (s->mb_intra) {
03344 int q;
03345 if (!s->h263_aic) {
03346 if (n < 4)
03347 q = s->y_dc_scale;
03348 else
03349 q = s->c_dc_scale;
03350 q = q << 3;
03351 } else{
03352
03353 q = 1 << 3;
03354 qadd=0;
03355 }
03356
03357
03358 block[0] = (block[0] + (q >> 1)) / q;
03359 start_i = 1;
03360 last_non_zero = 0;
03361 qmat = s->q_intra_matrix[qscale];
03362 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
03363 bias= 1<<(QMAT_SHIFT-1);
03364 length = s->intra_ac_vlc_length;
03365 last_length= s->intra_ac_vlc_last_length;
03366 } else {
03367 start_i = 0;
03368 last_non_zero = -1;
03369 qmat = s->q_inter_matrix[qscale];
03370 length = s->inter_ac_vlc_length;
03371 last_length= s->inter_ac_vlc_last_length;
03372 }
03373 last_i= start_i;
03374
03375 threshold1= (1<<QMAT_SHIFT) - bias - 1;
03376 threshold2= (threshold1<<1);
03377
03378 for(i=63; i>=start_i; i--) {
03379 const int j = scantable[i];
03380 int level = block[j] * qmat[j];
03381
03382 if(((unsigned)(level+threshold1))>threshold2){
03383 last_non_zero = i;
03384 break;
03385 }
03386 }
03387
03388 for(i=start_i; i<=last_non_zero; i++) {
03389 const int j = scantable[i];
03390 int level = block[j] * qmat[j];
03391
03392
03393
03394 if(((unsigned)(level+threshold1))>threshold2){
03395 if(level>0){
03396 level= (bias + level)>>QMAT_SHIFT;
03397 coeff[0][i]= level;
03398 coeff[1][i]= level-1;
03399
03400 }else{
03401 level= (bias - level)>>QMAT_SHIFT;
03402 coeff[0][i]= -level;
03403 coeff[1][i]= -level+1;
03404
03405 }
03406 coeff_count[i]= FFMIN(level, 2);
03407 assert(coeff_count[i]);
03408 max |=level;
03409 }else{
03410 coeff[0][i]= (level>>31)|1;
03411 coeff_count[i]= 1;
03412 }
03413 }
03414
03415 *overflow= s->max_qcoeff < max;
03416
03417 if(last_non_zero < start_i){
03418 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
03419 return last_non_zero;
03420 }
03421
03422 score_tab[start_i]= 0;
03423 survivor[0]= start_i;
03424 survivor_count= 1;
03425
03426 for(i=start_i; i<=last_non_zero; i++){
03427 int level_index, j, zero_distortion;
03428 int dct_coeff= FFABS(block[ scantable[i] ]);
03429 int best_score=256*256*256*120;
03430
03431 if ( s->dsp.fdct == fdct_ifast
03432 #ifndef FAAN_POSTSCALE
03433 || s->dsp.fdct == ff_faandct
03434 #endif
03435 )
03436 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
03437 zero_distortion= dct_coeff*dct_coeff;
03438
03439 for(level_index=0; level_index < coeff_count[i]; level_index++){
03440 int distortion;
03441 int level= coeff[level_index][i];
03442 const int alevel= FFABS(level);
03443 int unquant_coeff;
03444
03445 assert(level);
03446
03447 if(s->out_format == FMT_H263){
03448 unquant_coeff= alevel*qmul + qadd;
03449 }else{
03450 j= s->dsp.idct_permutation[ scantable[i] ];
03451 if(s->mb_intra){
03452 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
03453 unquant_coeff = (unquant_coeff - 1) | 1;
03454 }else{
03455 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
03456 unquant_coeff = (unquant_coeff - 1) | 1;
03457 }
03458 unquant_coeff<<= 3;
03459 }
03460
03461 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
03462 level+=64;
03463 if((level&(~127)) == 0){
03464 for(j=survivor_count-1; j>=0; j--){
03465 int run= i - survivor[j];
03466 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
03467 score += score_tab[i-run];
03468
03469 if(score < best_score){
03470 best_score= score;
03471 run_tab[i+1]= run;
03472 level_tab[i+1]= level-64;
03473 }
03474 }
03475
03476 if(s->out_format == FMT_H263){
03477 for(j=survivor_count-1; j>=0; j--){
03478 int run= i - survivor[j];
03479 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
03480 score += score_tab[i-run];
03481 if(score < last_score){
03482 last_score= score;
03483 last_run= run;
03484 last_level= level-64;
03485 last_i= i+1;
03486 }
03487 }
03488 }
03489 }else{
03490 distortion += esc_length*lambda;
03491 for(j=survivor_count-1; j>=0; j--){
03492 int run= i - survivor[j];
03493 int score= distortion + score_tab[i-run];
03494
03495 if(score < best_score){
03496 best_score= score;
03497 run_tab[i+1]= run;
03498 level_tab[i+1]= level-64;
03499 }
03500 }
03501
03502 if(s->out_format == FMT_H263){
03503 for(j=survivor_count-1; j>=0; j--){
03504 int run= i - survivor[j];
03505 int score= distortion + score_tab[i-run];
03506 if(score < last_score){
03507 last_score= score;
03508 last_run= run;
03509 last_level= level-64;
03510 last_i= i+1;
03511 }
03512 }
03513 }
03514 }
03515 }
03516
03517 score_tab[i+1]= best_score;
03518
03519
03520 if(last_non_zero <= 27){
03521 for(; survivor_count; survivor_count--){
03522 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
03523 break;
03524 }
03525 }else{
03526 for(; survivor_count; survivor_count--){
03527 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
03528 break;
03529 }
03530 }
03531
03532 survivor[ survivor_count++ ]= i+1;
03533 }
03534
03535 if(s->out_format != FMT_H263){
03536 last_score= 256*256*256*120;
03537 for(i= survivor[0]; i<=last_non_zero + 1; i++){
03538 int score= score_tab[i];
03539 if(i) score += lambda*2;
03540
03541 if(score < last_score){
03542 last_score= score;
03543 last_i= i;
03544 last_level= level_tab[i];
03545 last_run= run_tab[i];
03546 }
03547 }
03548 }
03549
03550 s->coded_score[n] = last_score;
03551
03552 dc= FFABS(block[0]);
03553 last_non_zero= last_i - 1;
03554 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
03555
03556 if(last_non_zero < start_i)
03557 return last_non_zero;
03558
03559 if(last_non_zero == 0 && start_i == 0){
03560 int best_level= 0;
03561 int best_score= dc * dc;
03562
03563 for(i=0; i<coeff_count[0]; i++){
03564 int level= coeff[i][0];
03565 int alevel= FFABS(level);
03566 int unquant_coeff, score, distortion;
03567
03568 if(s->out_format == FMT_H263){
03569 unquant_coeff= (alevel*qmul + qadd)>>3;
03570 }else{
03571 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
03572 unquant_coeff = (unquant_coeff - 1) | 1;
03573 }
03574 unquant_coeff = (unquant_coeff + 4) >> 3;
03575 unquant_coeff<<= 3 + 3;
03576
03577 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
03578 level+=64;
03579 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
03580 else score= distortion + esc_length*lambda;
03581
03582 if(score < best_score){
03583 best_score= score;
03584 best_level= level - 64;
03585 }
03586 }
03587 block[0]= best_level;
03588 s->coded_score[n] = best_score - dc*dc;
03589 if(best_level == 0) return -1;
03590 else return last_non_zero;
03591 }
03592
03593 i= last_i;
03594 assert(last_level);
03595
03596 block[ perm_scantable[last_non_zero] ]= last_level;
03597 i -= last_run + 1;
03598
03599 for(; i>start_i; i -= run_tab[i] + 1){
03600 block[ perm_scantable[i-1] ]= level_tab[i];
03601 }
03602
03603 return last_non_zero;
03604 }
03605
03606
03607 static int16_t basis[64][64];
03608
03609 static void build_basis(uint8_t *perm){
03610 int i, j, x, y;
03611 emms_c();
03612 for(i=0; i<8; i++){
03613 for(j=0; j<8; j++){
03614 for(y=0; y<8; y++){
03615 for(x=0; x<8; x++){
03616 double s= 0.25*(1<<BASIS_SHIFT);
03617 int index= 8*i + j;
03618 int perm_index= perm[index];
03619 if(i==0) s*= sqrt(0.5);
03620 if(j==0) s*= sqrt(0.5);
03621 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
03622 }
03623 }
03624 }
03625 }
03626 }
03627
03628 static int dct_quantize_refine(MpegEncContext *s,
03629 DCTELEM *block, int16_t *weight, DCTELEM *orig,
03630 int n, int qscale){
03631 int16_t rem[64];
03632 LOCAL_ALIGNED_16(DCTELEM, d1, [64]);
03633 const uint8_t *scantable= s->intra_scantable.scantable;
03634 const uint8_t *perm_scantable= s->intra_scantable.permutated;
03635
03636
03637 int run_tab[65];
03638 int prev_run=0;
03639 int prev_level=0;
03640 int qmul, qadd, start_i, last_non_zero, i, dc;
03641 uint8_t * length;
03642 uint8_t * last_length;
03643 int lambda;
03644 int rle_index, run, q = 1, sum;
03645 #ifdef REFINE_STATS
03646 static int count=0;
03647 static int after_last=0;
03648 static int to_zero=0;
03649 static int from_zero=0;
03650 static int raise=0;
03651 static int lower=0;
03652 static int messed_sign=0;
03653 #endif
03654
03655 if(basis[0][0] == 0)
03656 build_basis(s->dsp.idct_permutation);
03657
03658 qmul= qscale*2;
03659 qadd= (qscale-1)|1;
03660 if (s->mb_intra) {
03661 if (!s->h263_aic) {
03662 if (n < 4)
03663 q = s->y_dc_scale;
03664 else
03665 q = s->c_dc_scale;
03666 } else{
03667
03668 q = 1;
03669 qadd=0;
03670 }
03671 q <<= RECON_SHIFT-3;
03672
03673 dc= block[0]*q;
03674
03675 start_i = 1;
03676
03677
03678 length = s->intra_ac_vlc_length;
03679 last_length= s->intra_ac_vlc_last_length;
03680 } else {
03681 dc= 0;
03682 start_i = 0;
03683 length = s->inter_ac_vlc_length;
03684 last_length= s->inter_ac_vlc_last_length;
03685 }
03686 last_non_zero = s->block_last_index[n];
03687
03688 #ifdef REFINE_STATS
03689 {START_TIMER
03690 #endif
03691 dc += (1<<(RECON_SHIFT-1));
03692 for(i=0; i<64; i++){
03693 rem[i]= dc - (orig[i]<<RECON_SHIFT);
03694 }
03695 #ifdef REFINE_STATS
03696 STOP_TIMER("memset rem[]")}
03697 #endif
03698 sum=0;
03699 for(i=0; i<64; i++){
03700 int one= 36;
03701 int qns=4;
03702 int w;
03703
03704 w= FFABS(weight[i]) + qns*one;
03705 w= 15 + (48*qns*one + w/2)/w;
03706
03707 weight[i] = w;
03708
03709
03710 assert(w>0);
03711 assert(w<(1<<6));
03712 sum += w*w;
03713 }
03714 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
03715 #ifdef REFINE_STATS
03716 {START_TIMER
03717 #endif
03718 run=0;
03719 rle_index=0;
03720 for(i=start_i; i<=last_non_zero; i++){
03721 int j= perm_scantable[i];
03722 const int level= block[j];
03723 int coeff;
03724
03725 if(level){
03726 if(level<0) coeff= qmul*level - qadd;
03727 else coeff= qmul*level + qadd;
03728 run_tab[rle_index++]=run;
03729 run=0;
03730
03731 s->dsp.add_8x8basis(rem, basis[j], coeff);
03732 }else{
03733 run++;
03734 }
03735 }
03736 #ifdef REFINE_STATS
03737 if(last_non_zero>0){
03738 STOP_TIMER("init rem[]")
03739 }
03740 }
03741
03742 {START_TIMER
03743 #endif
03744 for(;;){
03745 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
03746 int best_coeff=0;
03747 int best_change=0;
03748 int run2, best_unquant_change=0, analyze_gradient;
03749 #ifdef REFINE_STATS
03750 {START_TIMER
03751 #endif
03752 analyze_gradient = last_non_zero > 2 || s->avctx->quantizer_noise_shaping >= 3;
03753
03754 if(analyze_gradient){
03755 #ifdef REFINE_STATS
03756 {START_TIMER
03757 #endif
03758 for(i=0; i<64; i++){
03759 int w= weight[i];
03760
03761 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
03762 }
03763 #ifdef REFINE_STATS
03764 STOP_TIMER("rem*w*w")}
03765 {START_TIMER
03766 #endif
03767 s->dsp.fdct(d1);
03768 #ifdef REFINE_STATS
03769 STOP_TIMER("dct")}
03770 #endif
03771 }
03772
03773 if(start_i){
03774 const int level= block[0];
03775 int change, old_coeff;
03776
03777 assert(s->mb_intra);
03778
03779 old_coeff= q*level;
03780
03781 for(change=-1; change<=1; change+=2){
03782 int new_level= level + change;
03783 int score, new_coeff;
03784
03785 new_coeff= q*new_level;
03786 if(new_coeff >= 2048 || new_coeff < 0)
03787 continue;
03788
03789 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
03790 if(score<best_score){
03791 best_score= score;
03792 best_coeff= 0;
03793 best_change= change;
03794 best_unquant_change= new_coeff - old_coeff;
03795 }
03796 }
03797 }
03798
03799 run=0;
03800 rle_index=0;
03801 run2= run_tab[rle_index++];
03802 prev_level=0;
03803 prev_run=0;
03804
03805 for(i=start_i; i<64; i++){
03806 int j= perm_scantable[i];
03807 const int level= block[j];
03808 int change, old_coeff;
03809
03810 if(s->avctx->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
03811 break;
03812
03813 if(level){
03814 if(level<0) old_coeff= qmul*level - qadd;
03815 else old_coeff= qmul*level + qadd;
03816 run2= run_tab[rle_index++];
03817 }else{
03818 old_coeff=0;
03819 run2--;
03820 assert(run2>=0 || i >= last_non_zero );
03821 }
03822
03823 for(change=-1; change<=1; change+=2){
03824 int new_level= level + change;
03825 int score, new_coeff, unquant_change;
03826
03827 score=0;
03828 if(s->avctx->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
03829 continue;
03830
03831 if(new_level){
03832 if(new_level<0) new_coeff= qmul*new_level - qadd;
03833 else new_coeff= qmul*new_level + qadd;
03834 if(new_coeff >= 2048 || new_coeff <= -2048)
03835 continue;
03836
03837
03838 if(level){
03839 if(level < 63 && level > -63){
03840 if(i < last_non_zero)
03841 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
03842 - length[UNI_AC_ENC_INDEX(run, level+64)];
03843 else
03844 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
03845 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
03846 }
03847 }else{
03848 assert(FFABS(new_level)==1);
03849
03850 if(analyze_gradient){
03851 int g= d1[ scantable[i] ];
03852 if(g && (g^new_level) >= 0)
03853 continue;
03854 }
03855
03856 if(i < last_non_zero){
03857 int next_i= i + run2 + 1;
03858 int next_level= block[ perm_scantable[next_i] ] + 64;
03859
03860 if(next_level&(~127))
03861 next_level= 0;
03862
03863 if(next_i < last_non_zero)
03864 score += length[UNI_AC_ENC_INDEX(run, 65)]
03865 + length[UNI_AC_ENC_INDEX(run2, next_level)]
03866 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
03867 else
03868 score += length[UNI_AC_ENC_INDEX(run, 65)]
03869 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
03870 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
03871 }else{
03872 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
03873 if(prev_level){
03874 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
03875 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
03876 }
03877 }
03878 }
03879 }else{
03880 new_coeff=0;
03881 assert(FFABS(level)==1);
03882
03883 if(i < last_non_zero){
03884 int next_i= i + run2 + 1;
03885 int next_level= block[ perm_scantable[next_i] ] + 64;
03886
03887 if(next_level&(~127))
03888 next_level= 0;
03889
03890 if(next_i < last_non_zero)
03891 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
03892 - length[UNI_AC_ENC_INDEX(run2, next_level)]
03893 - length[UNI_AC_ENC_INDEX(run, 65)];
03894 else
03895 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
03896 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
03897 - length[UNI_AC_ENC_INDEX(run, 65)];
03898 }else{
03899 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
03900 if(prev_level){
03901 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
03902 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
03903 }
03904 }
03905 }
03906
03907 score *= lambda;
03908
03909 unquant_change= new_coeff - old_coeff;
03910 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
03911
03912 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
03913 if(score<best_score){
03914 best_score= score;
03915 best_coeff= i;
03916 best_change= change;
03917 best_unquant_change= unquant_change;
03918 }
03919 }
03920 if(level){
03921 prev_level= level + 64;
03922 if(prev_level&(~127))
03923 prev_level= 0;
03924 prev_run= run;
03925 run=0;
03926 }else{
03927 run++;
03928 }
03929 }
03930 #ifdef REFINE_STATS
03931 STOP_TIMER("iterative step")}
03932 #endif
03933
03934 if(best_change){
03935 int j= perm_scantable[ best_coeff ];
03936
03937 block[j] += best_change;
03938
03939 if(best_coeff > last_non_zero){
03940 last_non_zero= best_coeff;
03941 assert(block[j]);
03942 #ifdef REFINE_STATS
03943 after_last++;
03944 #endif
03945 }else{
03946 #ifdef REFINE_STATS
03947 if(block[j]){
03948 if(block[j] - best_change){
03949 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
03950 raise++;
03951 }else{
03952 lower++;
03953 }
03954 }else{
03955 from_zero++;
03956 }
03957 }else{
03958 to_zero++;
03959 }
03960 #endif
03961 for(; last_non_zero>=start_i; last_non_zero--){
03962 if(block[perm_scantable[last_non_zero]])
03963 break;
03964 }
03965 }
03966 #ifdef REFINE_STATS
03967 count++;
03968 if(256*256*256*64 % count == 0){
03969 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
03970 }
03971 #endif
03972 run=0;
03973 rle_index=0;
03974 for(i=start_i; i<=last_non_zero; i++){
03975 int j= perm_scantable[i];
03976 const int level= block[j];
03977
03978 if(level){
03979 run_tab[rle_index++]=run;
03980 run=0;
03981 }else{
03982 run++;
03983 }
03984 }
03985
03986 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
03987 }else{
03988 break;
03989 }
03990 }
03991 #ifdef REFINE_STATS
03992 if(last_non_zero>0){
03993 STOP_TIMER("iterative search")
03994 }
03995 }
03996 #endif
03997
03998 return last_non_zero;
03999 }
04000
04001 int dct_quantize_c(MpegEncContext *s,
04002 DCTELEM *block, int n,
04003 int qscale, int *overflow)
04004 {
04005 int i, j, level, last_non_zero, q, start_i;
04006 const int *qmat;
04007 const uint8_t *scantable= s->intra_scantable.scantable;
04008 int bias;
04009 int max=0;
04010 unsigned int threshold1, threshold2;
04011
04012 s->dsp.fdct (block);
04013
04014 if(s->dct_error_sum)
04015 s->denoise_dct(s, block);
04016
04017 if (s->mb_intra) {
04018 if (!s->h263_aic) {
04019 if (n < 4)
04020 q = s->y_dc_scale;
04021 else
04022 q = s->c_dc_scale;
04023 q = q << 3;
04024 } else
04025
04026 q = 1 << 3;
04027
04028
04029 block[0] = (block[0] + (q >> 1)) / q;
04030 start_i = 1;
04031 last_non_zero = 0;
04032 qmat = s->q_intra_matrix[qscale];
04033 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
04034 } else {
04035 start_i = 0;
04036 last_non_zero = -1;
04037 qmat = s->q_inter_matrix[qscale];
04038 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
04039 }
04040 threshold1= (1<<QMAT_SHIFT) - bias - 1;
04041 threshold2= (threshold1<<1);
04042 for(i=63;i>=start_i;i--) {
04043 j = scantable[i];
04044 level = block[j] * qmat[j];
04045
04046 if(((unsigned)(level+threshold1))>threshold2){
04047 last_non_zero = i;
04048 break;
04049 }else{
04050 block[j]=0;
04051 }
04052 }
04053 for(i=start_i; i<=last_non_zero; i++) {
04054 j = scantable[i];
04055 level = block[j] * qmat[j];
04056
04057
04058
04059 if(((unsigned)(level+threshold1))>threshold2){
04060 if(level>0){
04061 level= (bias + level)>>QMAT_SHIFT;
04062 block[j]= level;
04063 }else{
04064 level= (bias - level)>>QMAT_SHIFT;
04065 block[j]= -level;
04066 }
04067 max |=level;
04068 }else{
04069 block[j]=0;
04070 }
04071 }
04072 *overflow= s->max_qcoeff < max;
04073
04074
04075 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
04076 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
04077
04078 return last_non_zero;
04079 }
04080
04081 #define OFFSET(x) offsetof(MpegEncContext, x)
04082 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
04083 static const AVOption h263_options[] = {
04084 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
04085 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE},
04086 { NULL },
04087 };
04088
04089 static const AVClass h263_class = {
04090 .class_name = "H.263 encoder",
04091 .item_name = av_default_item_name,
04092 .option = h263_options,
04093 .version = LIBAVUTIL_VERSION_INT,
04094 };
04095
04096 AVCodec ff_h263_encoder = {
04097 .name = "h263",
04098 .type = AVMEDIA_TYPE_VIDEO,
04099 .id = CODEC_ID_H263,
04100 .priv_data_size = sizeof(MpegEncContext),
04101 .init = MPV_encode_init,
04102 .encode = MPV_encode_picture,
04103 .close = MPV_encode_end,
04104 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
04105 .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
04106 .priv_class = &h263_class,
04107 };
04108
04109 static const AVOption h263p_options[] = {
04110 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
04111 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
04112 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
04113 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE},
04114 { NULL },
04115 };
04116 static const AVClass h263p_class = {
04117 .class_name = "H.263p encoder",
04118 .item_name = av_default_item_name,
04119 .option = h263p_options,
04120 .version = LIBAVUTIL_VERSION_INT,
04121 };
04122
04123 AVCodec ff_h263p_encoder = {
04124 .name = "h263p",
04125 .type = AVMEDIA_TYPE_VIDEO,
04126 .id = CODEC_ID_H263P,
04127 .priv_data_size = sizeof(MpegEncContext),
04128 .init = MPV_encode_init,
04129 .encode = MPV_encode_picture,
04130 .close = MPV_encode_end,
04131 .capabilities = CODEC_CAP_SLICE_THREADS,
04132 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
04133 .long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
04134 .priv_class = &h263p_class,
04135 };
04136
04137 AVCodec ff_msmpeg4v2_encoder = {
04138 .name = "msmpeg4v2",
04139 .type = AVMEDIA_TYPE_VIDEO,
04140 .id = CODEC_ID_MSMPEG4V2,
04141 .priv_data_size = sizeof(MpegEncContext),
04142 .init = MPV_encode_init,
04143 .encode = MPV_encode_picture,
04144 .close = MPV_encode_end,
04145 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
04146 .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
04147 };
04148
04149 AVCodec ff_msmpeg4v3_encoder = {
04150 .name = "msmpeg4",
04151 .type = AVMEDIA_TYPE_VIDEO,
04152 .id = CODEC_ID_MSMPEG4V3,
04153 .priv_data_size = sizeof(MpegEncContext),
04154 .init = MPV_encode_init,
04155 .encode = MPV_encode_picture,
04156 .close = MPV_encode_end,
04157 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
04158 .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
04159 };
04160
04161 AVCodec ff_wmv1_encoder = {
04162 .name = "wmv1",
04163 .type = AVMEDIA_TYPE_VIDEO,
04164 .id = CODEC_ID_WMV1,
04165 .priv_data_size = sizeof(MpegEncContext),
04166 .init = MPV_encode_init,
04167 .encode = MPV_encode_picture,
04168 .close = MPV_encode_end,
04169 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
04170 .long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
04171 };