*/
/**
- * @file cavs.c
+ * @file libavcodec/cavs.c
* Chinese AVS video (AVS1-P2, JiZhun profile) decoder
* @author Stefan Gehrer <stefan.gehrer@gmx.de>
*/
#include "avcodec.h"
-#include "bitstream.h"
+#include "get_bits.h"
#include "golomb.h"
+#include "mathops.h"
#include "cavs.h"
#include "cavsdata.h"
*
****************************************************************************/
-static inline int get_bs(vector_t *mvP, vector_t *mvQ, int b) {
+static inline int get_bs(cavs_vector *mvP, cavs_vector *mvQ, int b) {
if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA))
return 2;
if( (abs(mvP->x - mvQ->x) >= 4) || (abs(mvP->y - mvQ->y) >= 4) )
* ---------
*
*/
-void ff_cavs_filter(AVSContext *h, enum mb_t mb_type) {
+void ff_cavs_filter(AVSContext *h, enum cavs_mb mb_type) {
DECLARE_ALIGNED_8(uint8_t, bs[8]);
int qp_avg, alpha, beta, tc;
int i;
*
****************************************************************************/
+void ff_cavs_load_intra_pred_luma(AVSContext *h, uint8_t *top,
+ uint8_t **left, int block) {
+ int i;
+
+ switch(block) {
+ case 0:
+ *left = h->left_border_y;
+ h->left_border_y[0] = h->left_border_y[1];
+ memset(&h->left_border_y[17],h->left_border_y[16],9);
+ memcpy(&top[1],&h->top_border_y[h->mbx*16],16);
+ top[17] = top[16];
+ top[0] = top[1];
+ if((h->flags & A_AVAIL) && (h->flags & B_AVAIL))
+ h->left_border_y[0] = top[0] = h->topleft_border_y;
+ break;
+ case 1:
+ *left = h->intern_border_y;
+ for(i=0;i<8;i++)
+ h->intern_border_y[i+1] = *(h->cy + 7 + i*h->l_stride);
+ memset(&h->intern_border_y[9],h->intern_border_y[8],9);
+ h->intern_border_y[0] = h->intern_border_y[1];
+ memcpy(&top[1],&h->top_border_y[h->mbx*16+8],8);
+ if(h->flags & C_AVAIL)
+ memcpy(&top[9],&h->top_border_y[(h->mbx + 1)*16],8);
+ else
+ memset(&top[9],top[8],9);
+ top[17] = top[16];
+ top[0] = top[1];
+ if(h->flags & B_AVAIL)
+ h->intern_border_y[0] = top[0] = h->top_border_y[h->mbx*16+7];
+ break;
+ case 2:
+ *left = &h->left_border_y[8];
+ memcpy(&top[1],h->cy + 7*h->l_stride,16);
+ top[17] = top[16];
+ top[0] = top[1];
+ if(h->flags & A_AVAIL)
+ top[0] = h->left_border_y[8];
+ break;
+ case 3:
+ *left = &h->intern_border_y[8];
+ for(i=0;i<8;i++)
+ h->intern_border_y[i+9] = *(h->cy + 7 + (i+8)*h->l_stride);
+ memset(&h->intern_border_y[17],h->intern_border_y[16],9);
+ memcpy(&top[0],h->cy + 7 + 7*h->l_stride,9);
+ memset(&top[9],top[8],9);
+ break;
+ }
+}
+
+void ff_cavs_load_intra_pred_chroma(AVSContext *h) {
+ /* extend borders by one pixel */
+ h->left_border_u[9] = h->left_border_u[8];
+ h->left_border_v[9] = h->left_border_v[8];
+ h->top_border_u[h->mbx*10+9] = h->top_border_u[h->mbx*10+8];
+ h->top_border_v[h->mbx*10+9] = h->top_border_v[h->mbx*10+8];
+ if(h->mbx && h->mby) {
+ h->top_border_u[h->mbx*10] = h->left_border_u[0] = h->topleft_border_u;
+ h->top_border_v[h->mbx*10] = h->left_border_v[0] = h->topleft_border_v;
+ } else {
+ h->left_border_u[0] = h->left_border_u[1];
+ h->left_border_v[0] = h->left_border_v[1];
+ h->top_border_u[h->mbx*10] = h->top_border_u[h->mbx*10+1];
+ h->top_border_v[h->mbx*10] = h->top_border_v[h->mbx*10+1];
+ }
+}
+
static void intra_pred_vert(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
int y;
- uint64_t a = unaligned64(&top[1]);
+ uint64_t a = AV_RN64(&top[1]);
for(y=0;y<8;y++) {
*((uint64_t *)(d+y*stride)) = a;
}
#undef LOWPASS
+void ff_cavs_modify_mb_i(AVSContext *h, int *pred_mode_uv) {
+ /* save pred modes before they get modified */
+ h->pred_mode_Y[3] = h->pred_mode_Y[5];
+ h->pred_mode_Y[6] = h->pred_mode_Y[8];
+ h->top_pred_Y[h->mbx*2+0] = h->pred_mode_Y[7];
+ h->top_pred_Y[h->mbx*2+1] = h->pred_mode_Y[8];
+
+ /* modify pred modes according to availability of neighbour samples */
+ if(!(h->flags & A_AVAIL)) {
+ modify_pred(ff_left_modifier_l, &h->pred_mode_Y[4] );
+ modify_pred(ff_left_modifier_l, &h->pred_mode_Y[7] );
+ modify_pred(ff_left_modifier_c, pred_mode_uv );
+ }
+ if(!(h->flags & B_AVAIL)) {
+ modify_pred(ff_top_modifier_l, &h->pred_mode_Y[4] );
+ modify_pred(ff_top_modifier_l, &h->pred_mode_Y[5] );
+ modify_pred(ff_top_modifier_c, pred_mode_uv );
+ }
+}
+
/*****************************************************************************
*
* motion compensation
int chroma_height,int delta,int list,uint8_t *dest_y,
uint8_t *dest_cb,uint8_t *dest_cr,int src_x_offset,
int src_y_offset,qpel_mc_func *qpix_op,
- h264_chroma_mc_func chroma_op,vector_t *mv){
+ h264_chroma_mc_func chroma_op,cavs_vector *mv){
MpegEncContext * const s = &h->s;
const int mx= mv->x + src_x_offset*8;
const int my= mv->y + src_y_offset*8;
uint8_t *dest_y,uint8_t *dest_cb,uint8_t *dest_cr,
int x_offset, int y_offset,qpel_mc_func *qpix_put,
h264_chroma_mc_func chroma_put,qpel_mc_func *qpix_avg,
- h264_chroma_mc_func chroma_avg, vector_t *mv){
+ h264_chroma_mc_func chroma_avg, cavs_vector *mv){
qpel_mc_func *qpix_op= qpix_put;
h264_chroma_mc_func chroma_op= chroma_put;
}
}
-void ff_cavs_inter(AVSContext *h, enum mb_t mb_type) {
+void ff_cavs_inter(AVSContext *h, enum cavs_mb mb_type) {
if(ff_cavs_partition_flags[mb_type] == 0){ // 16x16
mc_part_std(h, 1, 8, 0, h->cy, h->cu, h->cv, 0, 0,
h->s.dsp.put_cavs_qpel_pixels_tab[0],
*
****************************************************************************/
-static inline void scale_mv(AVSContext *h, int *d_x, int *d_y, vector_t *src, int distp) {
+static inline void scale_mv(AVSContext *h, int *d_x, int *d_y, cavs_vector *src, int distp) {
int den = h->scale_den[src->ref];
*d_x = (src->x*distp*den + 256 + (src->x>>31)) >> 9;
*d_y = (src->y*distp*den + 256 + (src->y>>31)) >> 9;
}
-static inline void mv_pred_median(AVSContext *h, vector_t *mvP, vector_t *mvA, vector_t *mvB, vector_t *mvC) {
+static inline void mv_pred_median(AVSContext *h, cavs_vector *mvP,
+ cavs_vector *mvA, cavs_vector *mvB, cavs_vector *mvC) {
int ax, ay, bx, by, cx, cy;
int len_ab, len_bc, len_ca, len_mid;
}
}
-void ff_cavs_mv(AVSContext *h, enum mv_loc_t nP, enum mv_loc_t nC,
- enum mv_pred_t mode, enum block_t size, int ref) {
- vector_t *mvP = &h->mv[nP];
- vector_t *mvA = &h->mv[nP-1];
- vector_t *mvB = &h->mv[nP-4];
- vector_t *mvC = &h->mv[nC];
- const vector_t *mvP2 = NULL;
+void ff_cavs_mv(AVSContext *h, enum cavs_mv_loc nP, enum cavs_mv_loc nC,
+ enum cavs_mv_pred mode, enum cavs_block size, int ref) {
+ cavs_vector *mvP = &h->mv[nP];
+ cavs_vector *mvA = &h->mv[nP-1];
+ cavs_vector *mvB = &h->mv[nP-4];
+ cavs_vector *mvC = &h->mv[nC];
+ const cavs_vector *mvP2 = NULL;
mvP->ref = ref;
mvP->dist = h->dist[mvP->ref];
set_mvs(mvP,size);
}
+/*****************************************************************************
+ *
+ * macroblock level
+ *
+ ****************************************************************************/
+
+/**
+ * initialise predictors for motion vectors and intra prediction
+ */
+void ff_cavs_init_mb(AVSContext *h) {
+ int i;
+
+ /* copy predictors from top line (MB B and C) into cache */
+ for(i=0;i<3;i++) {
+ h->mv[MV_FWD_B2+i] = h->top_mv[0][h->mbx*2+i];
+ h->mv[MV_BWD_B2+i] = h->top_mv[1][h->mbx*2+i];
+ }
+ h->pred_mode_Y[1] = h->top_pred_Y[h->mbx*2+0];
+ h->pred_mode_Y[2] = h->top_pred_Y[h->mbx*2+1];
+ /* clear top predictors if MB B is not available */
+ if(!(h->flags & B_AVAIL)) {
+ h->mv[MV_FWD_B2] = ff_cavs_un_mv;
+ h->mv[MV_FWD_B3] = ff_cavs_un_mv;
+ h->mv[MV_BWD_B2] = ff_cavs_un_mv;
+ h->mv[MV_BWD_B3] = ff_cavs_un_mv;
+ h->pred_mode_Y[1] = h->pred_mode_Y[2] = NOT_AVAIL;
+ h->flags &= ~(C_AVAIL|D_AVAIL);
+ } else if(h->mbx) {
+ h->flags |= D_AVAIL;
+ }
+ if(h->mbx == h->mb_width-1) //MB C not available
+ h->flags &= ~C_AVAIL;
+ /* clear top-right predictors if MB C is not available */
+ if(!(h->flags & C_AVAIL)) {
+ h->mv[MV_FWD_C2] = ff_cavs_un_mv;
+ h->mv[MV_BWD_C2] = ff_cavs_un_mv;
+ }
+ /* clear top-left predictors if MB D is not available */
+ if(!(h->flags & D_AVAIL)) {
+ h->mv[MV_FWD_D3] = ff_cavs_un_mv;
+ h->mv[MV_BWD_D3] = ff_cavs_un_mv;
+ }
+}
+
+/**
+ * save predictors for later macroblocks and increase
+ * macroblock address
+ * @returns 0 if end of frame is reached, 1 otherwise
+ */
+int ff_cavs_next_mb(AVSContext *h) {
+ int i;
+
+ h->flags |= A_AVAIL;
+ h->cy += 16;
+ h->cu += 8;
+ h->cv += 8;
+ /* copy mvs as predictors to the left */
+ for(i=0;i<=20;i+=4)
+ h->mv[i] = h->mv[i+2];
+ /* copy bottom mvs from cache to top line */
+ h->top_mv[0][h->mbx*2+0] = h->mv[MV_FWD_X2];
+ h->top_mv[0][h->mbx*2+1] = h->mv[MV_FWD_X3];
+ h->top_mv[1][h->mbx*2+0] = h->mv[MV_BWD_X2];
+ h->top_mv[1][h->mbx*2+1] = h->mv[MV_BWD_X3];
+ /* next MB address */
+ h->mbidx++;
+ h->mbx++;
+ if(h->mbx == h->mb_width) { //new mb line
+ h->flags = B_AVAIL|C_AVAIL;
+ /* clear left pred_modes */
+ h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
+ /* clear left mv predictors */
+ for(i=0;i<=20;i+=4)
+ h->mv[i] = ff_cavs_un_mv;
+ h->mbx = 0;
+ h->mby++;
+ /* re-calculate sample pointers */
+ h->cy = h->picture.data[0] + h->mby*16*h->l_stride;
+ h->cu = h->picture.data[1] + h->mby*8*h->c_stride;
+ h->cv = h->picture.data[2] + h->mby*8*h->c_stride;
+ if(h->mby == h->mb_height) { //frame end
+ return 0;
+ }
+ }
+ return 1;
+}
+
/*****************************************************************************
*
* frame level
h->c_stride = h->picture.linesize[1];
h->luma_scan[2] = 8*h->l_stride;
h->luma_scan[3] = 8*h->l_stride+8;
- h->mbx = h->mby = 0;
+ h->mbx = h->mby = h->mbidx = 0;
h->flags = 0;
}
void ff_cavs_init_top_lines(AVSContext *h) {
/* alloc top line of predictors */
h->top_qp = av_malloc( h->mb_width);
- h->top_mv[0] = av_malloc((h->mb_width*2+1)*sizeof(vector_t));
- h->top_mv[1] = av_malloc((h->mb_width*2+1)*sizeof(vector_t));
+ h->top_mv[0] = av_malloc((h->mb_width*2+1)*sizeof(cavs_vector));
+ h->top_mv[1] = av_malloc((h->mb_width*2+1)*sizeof(cavs_vector));
h->top_pred_Y = av_malloc( h->mb_width*2*sizeof(*h->top_pred_Y));
h->top_border_y = av_malloc((h->mb_width+1)*16);
h->top_border_u = av_malloc((h->mb_width)*10);
h->top_border_v = av_malloc((h->mb_width)*10);
/* alloc space for co-located MVs and types */
- h->col_mv = av_malloc( h->mb_width*h->mb_height*4*sizeof(vector_t));
+ h->col_mv = av_malloc( h->mb_width*h->mb_height*4*sizeof(cavs_vector));
h->col_type_base = av_malloc(h->mb_width*h->mb_height);
h->block = av_mallocz(64*sizeof(DCTELEM));
}
-int ff_cavs_init(AVCodecContext *avctx) {
+av_cold int ff_cavs_init(AVCodecContext *avctx) {
AVSContext *h = avctx->priv_data;
MpegEncContext * const s = &h->s;
return 0;
}
-int ff_cavs_end(AVCodecContext *avctx) {
+av_cold int ff_cavs_end(AVCodecContext *avctx) {
AVSContext *h = avctx->priv_data;
av_free(h->top_qp);