crap. They have misread the MPEG Systems spec !
*/
static void put_frame(AVFormatContext *s, ASFStream *stream, int timestamp,
- uint8_t *buf, int payload_size)
+ const uint8_t *buf, int payload_size)
{
ASFContext *asf = s->priv_data;
int frag_pos, frag_len, frag_len1;
static int asf_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int timestamp)
+ const uint8_t *buf, int size, int64_t timestamp)
{
ASFContext *asf = s->priv_data;
ASFStream *stream;
}
static int au_write_packet(AVFormatContext *s, int stream_index_ptr,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
ByteIOContext *pb = &s->pb;
put_buffer(pb, buf, size);
}
static int audio_write_packet(AVFormatContext *s1, int stream_index,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
AudioData *s = s1->priv_data;
int len, ret;
#define LIBAVFORMAT_VERSION_INT 0x000408
#define LIBAVFORMAT_VERSION "0.4.8"
-#define LIBAVFORMAT_BUILD 4607
-#define LIBAVFORMAT_BUILD_STR "4607"
+#define LIBAVFORMAT_BUILD 4608
+#define LIBAVFORMAT_BUILD_STR "4608"
#define LIBAVFORMAT_IDENT "FFmpeg" LIBAVFORMAT_VERSION "b" LIBAVFORMAT_BUILD_STR
enum CodecID audio_codec; /* default audio codec */
enum CodecID video_codec; /* default video codec */
int (*write_header)(struct AVFormatContext *);
- /* XXX: change prototype for 64 bit pts */
int (*write_packet)(struct AVFormatContext *,
int stream_index,
- unsigned char *buf, int size, int force_pts);
+ const uint8_t *buf, int size, int64_t pts);
int (*write_trailer)(struct AVFormatContext *);
/* can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER */
int flags;
}
static int avi_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
AVIContext *avi = s->priv_data;
ByteIOContext *pb = &s->pb;
#define DO8(buf) DO4(buf); DO4(buf);
#define DO16(buf) DO8(buf); DO8(buf);
-static uint32_t adler32(uint32_t adler, uint8_t *buf, unsigned int len)
+static uint32_t adler32(uint32_t adler, const uint8_t *buf, unsigned int len)
{
unsigned long s1 = adler & 0xffff;
unsigned long s2 = (adler >> 16) & 0xffff;
static int crc_write_packet(struct AVFormatContext *s,
int stream_index,
- unsigned char *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
CRCState *crc = s->priv_data;
crc->crcval = adler32(crc->crcval, buf, size);
}
int dv_write_packet(struct AVFormatContext *s,
- int stream_index,
- unsigned char *buf, int size, int force_pts)
+ int stream_index,
+ const uint8_t *buf, int size, int64_t pts)
{
DVMuxContext *c = s->priv_data;
}
}
-void dv_inject_audio(DVMuxContext *c, uint8_t* pcm, uint8_t* frame_ptr)
+void dv_inject_audio(DVMuxContext *c, const uint8_t* pcm, uint8_t* frame_ptr)
{
int i, j, d, of;
for (i = 0; i < c->sys->difseg_size; i++) {
}
}
-void dv_inject_video(DVMuxContext *c, uint8_t* video_data, uint8_t* frame_ptr)
+void dv_inject_video(DVMuxContext *c, const uint8_t* video_data, uint8_t* frame_ptr)
{
int i, j;
int ptr = 0;
}
/* FIXME: The following three functions could be underengineered ;-) */
-void dv_assemble_frame(DVMuxContext *c, uint8_t* video, uint8_t* audio, int asize)
+void dv_assemble_frame(DVMuxContext *c, const uint8_t* video, const uint8_t* audio, int asize)
{
uint8_t pcm[8192];
uint8_t* frame = &c->frame_buf[0];
/* FIXME: we have to have more sensible approach than this one */
if (fifo_size(&c->audio_data, c->audio_data.rptr) + asize >= AVCODEC_MAX_AUDIO_FRAME_SIZE)
fprintf(stderr, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames);
- fifo_write(&c->audio_data, audio, asize, &c->audio_data.wptr);
+ fifo_write(&c->audio_data, (uint8_t *)audio, asize, &c->audio_data.wptr);
}
}
} DVMuxContext;
void dv_format_frame(DVMuxContext *, uint8_t*);
-void dv_inject_audio(DVMuxContext *, uint8_t*, uint8_t*);
-void dv_inject_video(DVMuxContext *, uint8_t*, uint8_t*);
+void dv_inject_audio(DVMuxContext *, const uint8_t*, uint8_t*);
+void dv_inject_video(DVMuxContext *, const uint8_t*, uint8_t*);
int dv_extract_audio(uint8_t*, uint8_t*, AVCodecContext*);
int dv_audio_frame_size(const DVprofile*, int);
-void dv_assemble_frame(DVMuxContext *, uint8_t*, uint8_t*, int);
+void dv_assemble_frame(DVMuxContext *c, const uint8_t*, const uint8_t*, int);
int dv_core_init(DVMuxContext *, AVStream*[]);
void dv_core_delete(DVMuxContext *);
/* 'first' is true if first data of a frame */
static void ffm_write_data(AVFormatContext *s,
- uint8_t *buf, int size,
+ const uint8_t *buf, int size,
int64_t pts, int first)
{
FFMContext *ffm = s->priv_data;
}
static int ffm_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t force_pts)
{
AVStream *st = s->streams[stream_index];
FFMStream *fst = st->priv_data;
}
static int flv_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int timestamp)
+ const uint8_t *buf, int size, int64_t timestamp)
{
ByteIOContext *pb = &s->pb;
AVCodecContext *enc = &s->streams[stream_index]->codec;
static int gif_image_write_image(ByteIOContext *pb,
int x1, int y1, int width, int height,
- uint8_t *buf, int linesize, int pix_fmt)
+ const uint8_t *buf, int linesize, int pix_fmt)
{
PutBitContext p;
uint8_t buffer[200]; /* 100 * 9 / 8 = 113 */
int i, left, w, v;
- uint8_t *ptr;
+ const uint8_t *ptr;
/* image block */
put_byte(pb, 0x2c);
}
static int gif_write_video(AVFormatContext *s,
- AVCodecContext *enc, uint8_t *buf, int size)
+ AVCodecContext *enc, const uint8_t *buf, int size)
{
ByteIOContext *pb = &s->pb;
GIFContext *gif = s->priv_data;
}
static int gif_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
AVCodecContext *codec = &s->streams[stream_index]->codec;
if (codec->codec_type == CODEC_TYPE_AUDIO)
}
static int img_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
VideoData *img = s->priv_data;
AVStream *st = s->streams[stream_index];
}
static int mov_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
MOVContext *mov = s->priv_data;
ByteIOContext *pb = &s->pb;
}
static int mp3_write_packet(struct AVFormatContext *s, int stream_index,
- unsigned char *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
put_buffer(&s->pb, buf, size);
put_flush_packet(&s->pb);
}
static int mpeg_mux_write_packet(AVFormatContext *ctx, int stream_index,
- uint8_t *buf, int size, int pts)
+ const uint8_t *buf, int size, int64_t pts)
{
MpegMuxContext *s = ctx->priv_data;
AVStream *st = ctx->streams[stream_index];
/* write DVB SI sections */
-static uint32_t crc_table[256] = {
+static const uint32_t crc_table[256] = {
0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b,
0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7,
}
static int mpegts_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int pts1)
+ const uint8_t *buf, int size, int64_t pts1)
{
AVStream *st = s->streams[stream_index];
MpegTSWriteStream *ts_st = st->priv_data;
}
static int mpjpeg_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
uint8_t buf1[256];
}
static int single_jpeg_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
put_buffer(&s->pb, buf, size);
put_flush_packet(&s->pb);
}
static int nut_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
NUTContext *nut = s->priv_data;
ByteIOContext *bc = &s->pb;
put_packetheader(nut, bc, size+20);
put_v(bc, stream_index);
- put_s(bc, force_pts); /* lsb_timestamp */
+ put_s(bc, pts); /* lsb_timestamp */
update_packetheader(nut, bc, size);
put_buffer(bc, buf, size);
}
static int raw_write_packet(struct AVFormatContext *s, int stream_index,
- unsigned char *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
put_buffer(&s->pb, buf, size);
put_flush_packet(&s->pb);
static int null_write_packet(struct AVFormatContext *s,
int stream_index,
- unsigned char *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
return 0;
}
return 0;
}
-static int rm_write_audio(AVFormatContext *s, uint8_t *buf, int size)
+static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size)
{
uint8_t *buf1;
RMContext *rm = s->priv_data;
return 0;
}
-static int rm_write_video(AVFormatContext *s, uint8_t *buf, int size)
+static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size)
{
RMContext *rm = s->priv_data;
ByteIOContext *pb = &s->pb;
}
static int rm_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
if (s->streams[stream_index]->codec.codec_type ==
CODEC_TYPE_AUDIO)
/* send an rtp packet. sequence number is incremented, but the caller
must update the timestamp itself */
-static void rtp_send_data(AVFormatContext *s1, uint8_t *buf1, int len)
+static void rtp_send_data(AVFormatContext *s1, const uint8_t *buf1, int len)
{
RTPContext *s = s1->priv_data;
/* send an integer number of samples and compute time stamp and fill
the rtp send buffer before sending. */
static void rtp_send_samples(AVFormatContext *s1,
- uint8_t *buf1, int size, int sample_size)
+ const uint8_t *buf1, int size, int sample_size)
{
RTPContext *s = s1->priv_data;
int len, max_packet_size, n;
/* NOTE: we suppose that exactly one frame is given as argument here */
/* XXX: test it */
static void rtp_send_mpegaudio(AVFormatContext *s1,
- uint8_t *buf1, int size)
+ const uint8_t *buf1, int size)
{
RTPContext *s = s1->priv_data;
AVStream *st = s1->streams[0];
/* NOTE: a single frame must be passed with sequence header if
needed. XXX: use slices. */
static void rtp_send_mpegvideo(AVFormatContext *s1,
- uint8_t *buf1, int size)
+ const uint8_t *buf1, int size)
{
RTPContext *s = s1->priv_data;
AVStream *st = s1->streams[0];
}
static void rtp_send_raw(AVFormatContext *s1,
- uint8_t *buf1, int size)
+ const uint8_t *buf1, int size)
{
RTPContext *s = s1->priv_data;
AVStream *st = s1->streams[0];
/* write an RTP packet. 'buf1' must contain a single specific frame. */
static int rtp_write_packet(AVFormatContext *s1, int stream_index,
- uint8_t *buf1, int size, int force_pts)
+ const uint8_t *buf1, int size, int64_t pts)
{
RTPContext *s = s1->priv_data;
AVStream *st = s1->streams[0];
if (s->first_packet || rtcp_bytes >= 28) {
/* compute NTP time */
/* XXX: 90 kHz timestamp hardcoded */
- ntp_time = ((int64_t)force_pts << 28) / 5625;
+ ntp_time = (pts << 28) / 5625;
rtcp_send_sr(s1, ntp_time);
s->last_octet_count = s->octet_count;
s->first_packet = 0;
}
static int swf_write_video(AVFormatContext *s,
- AVCodecContext *enc, uint8_t *buf, int size)
+ AVCodecContext *enc, const uint8_t *buf, int size)
{
ByteIOContext *pb = &s->pb;
static int tag_id = 0;
return 0;
}
-static int swf_write_audio(AVFormatContext *s, uint8_t *buf, int size)
+static int swf_write_audio(AVFormatContext *s, const uint8_t *buf, int size)
{
ByteIOContext *pb = &s->pb;
}
static int swf_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
AVCodecContext *codec = &s->streams[stream_index]->codec;
if (codec->codec_type == CODEC_TYPE_AUDIO)
}
static int wav_write_packet(AVFormatContext *s, int stream_index_ptr,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
ByteIOContext *pb = &s->pb;
put_buffer(pb, buf, size);
}
static int yuv4_write_packet(AVFormatContext *s, int stream_index,
- uint8_t *buf, int size, int force_pts)
+ const uint8_t *buf, int size, int64_t pts)
{
AVStream *st = s->streams[stream_index];
ByteIOContext *pb = &s->pb;