FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/mem.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/fifo.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/bprint.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/tx.h"
49 
50 #include "libavfilter/avfilter.h"
51 #include "libavfilter/buffersink.h"
52 #include "libavfilter/buffersrc.h"
53 
54 #include <SDL.h>
55 #include <SDL_thread.h>
56 
57 #include "cmdutils.h"
58 #include "ffplay_renderer.h"
59 #include "opt_common.h"
60 
61 const char program_name[] = "ffplay";
62 const int program_birth_year = 2003;
63 
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_FRAMES 25
66 #define EXTERNAL_CLOCK_MIN_FRAMES 2
67 #define EXTERNAL_CLOCK_MAX_FRAMES 10
68 
69 /* Minimum SDL audio buffer size, in samples. */
70 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
71 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
72 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
73 
74 /* Step size for volume control in dB */
75 #define SDL_VOLUME_STEP (0.75)
76 
77 /* no AV sync correction is done if below the minimum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MIN 0.04
79 /* AV sync correction is done if above the maximum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MAX 0.1
81 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
82 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
83 /* no AV correction is done if too big error */
84 #define AV_NOSYNC_THRESHOLD 10.0
85 
86 /* maximum audio speed change to get correct sync */
87 #define SAMPLE_CORRECTION_PERCENT_MAX 10
88 
89 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
90 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
91 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
92 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
93 
94 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
95 #define AUDIO_DIFF_AVG_NB 20
96 
97 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
98 #define REFRESH_RATE 0.01
99 
100 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
101 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
102 #define SAMPLE_ARRAY_SIZE (8 * 65536)
103 
104 #define CURSOR_HIDE_DELAY 1000000
105 
106 #define USE_ONEPASS_SUBTITLE_RENDER 1
107 
108 typedef struct MyAVPacketList {
110  int serial;
112 
113 typedef struct PacketQueue {
116  int size;
119  int serial;
120  SDL_mutex *mutex;
121  SDL_cond *cond;
122 } PacketQueue;
123 
124 #define VIDEO_PICTURE_QUEUE_SIZE 3
125 #define SUBPICTURE_QUEUE_SIZE 16
126 #define SAMPLE_QUEUE_SIZE 9
127 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
128 
129 typedef struct AudioParams {
130  int freq;
135 } AudioParams;
136 
137 typedef struct Clock {
138  double pts; /* clock base */
139  double pts_drift; /* clock base minus time at which we updated the clock */
140  double last_updated;
141  double speed;
142  int serial; /* clock is based on a packet with this serial */
143  int paused;
144  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
145 } Clock;
146 
147 typedef struct FrameData {
149 } FrameData;
150 
151 /* Common struct for handling all types of decoded data and allocated render buffers. */
152 typedef struct Frame {
155  int serial;
156  double pts; /* presentation timestamp for the frame */
157  double duration; /* estimated duration of the frame */
158  int64_t pos; /* byte position of the frame in the input file */
159  int width;
160  int height;
161  int format;
163  int uploaded;
164  int flip_v;
165 } Frame;
166 
167 typedef struct FrameQueue {
169  int rindex;
170  int windex;
171  int size;
172  int max_size;
175  SDL_mutex *mutex;
176  SDL_cond *cond;
178 } FrameQueue;
179 
180 enum {
181  AV_SYNC_AUDIO_MASTER, /* default choice */
183  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
184 };
185 
186 typedef struct Decoder {
191  int finished;
193  SDL_cond *empty_queue_cond;
198  SDL_Thread *decoder_tid;
199 } Decoder;
200 
201 typedef struct VideoState {
202  SDL_Thread *read_tid;
206  int paused;
209  int seek_req;
215  int realtime;
216 
220 
224 
228 
230 
232 
233  double audio_clock;
235  double audio_diff_cum; /* used for AV difference average computation */
242  uint8_t *audio_buf;
243  uint8_t *audio_buf1;
244  unsigned int audio_buf_size; /* in bytes */
245  unsigned int audio_buf1_size;
246  int audio_buf_index; /* in bytes */
249  int muted;
256 
257  enum ShowMode {
259  } show_mode;
266  float *real_data;
268  int xpos;
270  SDL_Texture *vis_texture;
271  SDL_Texture *sub_texture;
272  SDL_Texture *vid_texture;
273 
277 
278  double frame_timer;
284  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
286  int eof;
287 
288  char *filename;
290  int step;
291 
293  AVFilterContext *in_video_filter; // the first filter in the video chain
294  AVFilterContext *out_video_filter; // the last filter in the video chain
295  AVFilterContext *in_audio_filter; // the first filter in the audio chain
296  AVFilterContext *out_audio_filter; // the last filter in the audio chain
297  AVFilterGraph *agraph; // audio filter graph
298 
300 
302 } VideoState;
303 
304 /* options specified by the user */
306 static const char *input_filename;
307 static const char *window_title;
308 static int default_width = 640;
309 static int default_height = 480;
310 static int screen_width = 0;
311 static int screen_height = 0;
312 static int screen_left = SDL_WINDOWPOS_CENTERED;
313 static int screen_top = SDL_WINDOWPOS_CENTERED;
314 static int audio_disable;
315 static int video_disable;
316 static int subtitle_disable;
317 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
318 static int seek_by_bytes = -1;
319 static float seek_interval = 10;
320 static int display_disable;
321 static int borderless;
322 static int alwaysontop;
323 static int startup_volume = 100;
324 static int show_status = -1;
328 static int fast = 0;
329 static int genpts = 0;
330 static int lowres = 0;
331 static int decoder_reorder_pts = -1;
332 static int autoexit;
333 static int exit_on_keydown;
334 static int exit_on_mousedown;
335 static int loop = 1;
336 static int framedrop = -1;
337 static int infinite_buffer = -1;
338 static enum ShowMode show_mode = SHOW_MODE_NONE;
339 static const char *audio_codec_name;
340 static const char *subtitle_codec_name;
341 static const char *video_codec_name;
342 double rdftspeed = 0.02;
344 static int cursor_hidden = 0;
345 static const char **vfilters_list = NULL;
346 static int nb_vfilters = 0;
347 static char *afilters = NULL;
348 static int autorotate = 1;
349 static int find_stream_info = 1;
350 static int filter_nbthreads = 0;
351 static int enable_vulkan = 0;
352 static char *vulkan_params = NULL;
353 static const char *hwaccel = NULL;
354 
355 /* current context */
356 static int is_full_screen;
358 
359 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
360 
361 static SDL_Window *window;
362 static SDL_Renderer *renderer;
363 static SDL_RendererInfo renderer_info = {0};
364 static SDL_AudioDeviceID audio_dev;
365 
367 
368 static const struct TextureFormatEntry {
372  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
373  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
374  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
375  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
376  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
377  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
378  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
379  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
380  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
381  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
382  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
383  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
384  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
385  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
386  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
387  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
388  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
389  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
390  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
391 };
392 
393 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
394 {
396  if (ret < 0)
397  return ret;
398 
400  if (!vfilters_list[nb_vfilters - 1])
401  return AVERROR(ENOMEM);
402 
403  return 0;
404 }
405 
406 static inline
407 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
408  enum AVSampleFormat fmt2, int64_t channel_count2)
409 {
410  /* If channel count == 1, planar and non-planar formats are the same */
411  if (channel_count1 == 1 && channel_count2 == 1)
413  else
414  return channel_count1 != channel_count2 || fmt1 != fmt2;
415 }
416 
418 {
419  MyAVPacketList pkt1;
420  int ret;
421 
422  if (q->abort_request)
423  return -1;
424 
425 
426  pkt1.pkt = pkt;
427  pkt1.serial = q->serial;
428 
429  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
430  if (ret < 0)
431  return ret;
432  q->nb_packets++;
433  q->size += pkt1.pkt->size + sizeof(pkt1);
434  q->duration += pkt1.pkt->duration;
435  /* XXX: should duplicate packet data in DV case */
436  SDL_CondSignal(q->cond);
437  return 0;
438 }
439 
441 {
442  AVPacket *pkt1;
443  int ret;
444 
445  pkt1 = av_packet_alloc();
446  if (!pkt1) {
448  return -1;
449  }
450  av_packet_move_ref(pkt1, pkt);
451 
452  SDL_LockMutex(q->mutex);
453  ret = packet_queue_put_private(q, pkt1);
454  SDL_UnlockMutex(q->mutex);
455 
456  if (ret < 0)
457  av_packet_free(&pkt1);
458 
459  return ret;
460 }
461 
462 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
463 {
464  pkt->stream_index = stream_index;
465  return packet_queue_put(q, pkt);
466 }
467 
468 /* packet queue handling */
470 {
471  memset(q, 0, sizeof(PacketQueue));
473  if (!q->pkt_list)
474  return AVERROR(ENOMEM);
475  q->mutex = SDL_CreateMutex();
476  if (!q->mutex) {
477  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
478  return AVERROR(ENOMEM);
479  }
480  q->cond = SDL_CreateCond();
481  if (!q->cond) {
482  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
483  return AVERROR(ENOMEM);
484  }
485  q->abort_request = 1;
486  return 0;
487 }
488 
490 {
491  MyAVPacketList pkt1;
492 
493  SDL_LockMutex(q->mutex);
494  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
495  av_packet_free(&pkt1.pkt);
496  q->nb_packets = 0;
497  q->size = 0;
498  q->duration = 0;
499  q->serial++;
500  SDL_UnlockMutex(q->mutex);
501 }
502 
504 {
507  SDL_DestroyMutex(q->mutex);
508  SDL_DestroyCond(q->cond);
509 }
510 
512 {
513  SDL_LockMutex(q->mutex);
514 
515  q->abort_request = 1;
516 
517  SDL_CondSignal(q->cond);
518 
519  SDL_UnlockMutex(q->mutex);
520 }
521 
523 {
524  SDL_LockMutex(q->mutex);
525  q->abort_request = 0;
526  q->serial++;
527  SDL_UnlockMutex(q->mutex);
528 }
529 
530 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
531 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
532 {
533  MyAVPacketList pkt1;
534  int ret;
535 
536  SDL_LockMutex(q->mutex);
537 
538  for (;;) {
539  if (q->abort_request) {
540  ret = -1;
541  break;
542  }
543 
544  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
545  q->nb_packets--;
546  q->size -= pkt1.pkt->size + sizeof(pkt1);
547  q->duration -= pkt1.pkt->duration;
548  av_packet_move_ref(pkt, pkt1.pkt);
549  if (serial)
550  *serial = pkt1.serial;
551  av_packet_free(&pkt1.pkt);
552  ret = 1;
553  break;
554  } else if (!block) {
555  ret = 0;
556  break;
557  } else {
558  SDL_CondWait(q->cond, q->mutex);
559  }
560  }
561  SDL_UnlockMutex(q->mutex);
562  return ret;
563 }
564 
565 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
566  memset(d, 0, sizeof(Decoder));
567  d->pkt = av_packet_alloc();
568  if (!d->pkt)
569  return AVERROR(ENOMEM);
570  d->avctx = avctx;
571  d->queue = queue;
572  d->empty_queue_cond = empty_queue_cond;
574  d->pkt_serial = -1;
575  return 0;
576 }
577 
579  int ret = AVERROR(EAGAIN);
580 
581  for (;;) {
582  if (d->queue->serial == d->pkt_serial) {
583  do {
584  if (d->queue->abort_request)
585  return -1;
586 
587  switch (d->avctx->codec_type) {
588  case AVMEDIA_TYPE_VIDEO:
590  if (ret >= 0) {
591  if (decoder_reorder_pts == -1) {
592  frame->pts = frame->best_effort_timestamp;
593  } else if (!decoder_reorder_pts) {
594  frame->pts = frame->pkt_dts;
595  }
596  }
597  break;
598  case AVMEDIA_TYPE_AUDIO:
600  if (ret >= 0) {
601  AVRational tb = (AVRational){1, frame->sample_rate};
602  if (frame->pts != AV_NOPTS_VALUE)
603  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
604  else if (d->next_pts != AV_NOPTS_VALUE)
605  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
606  if (frame->pts != AV_NOPTS_VALUE) {
607  d->next_pts = frame->pts + frame->nb_samples;
608  d->next_pts_tb = tb;
609  }
610  }
611  break;
612  }
613  if (ret == AVERROR_EOF) {
614  d->finished = d->pkt_serial;
616  return 0;
617  }
618  if (ret >= 0)
619  return 1;
620  } while (ret != AVERROR(EAGAIN));
621  }
622 
623  do {
624  if (d->queue->nb_packets == 0)
625  SDL_CondSignal(d->empty_queue_cond);
626  if (d->packet_pending) {
627  d->packet_pending = 0;
628  } else {
629  int old_serial = d->pkt_serial;
630  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
631  return -1;
632  if (old_serial != d->pkt_serial) {
634  d->finished = 0;
635  d->next_pts = d->start_pts;
636  d->next_pts_tb = d->start_pts_tb;
637  }
638  }
639  if (d->queue->serial == d->pkt_serial)
640  break;
641  av_packet_unref(d->pkt);
642  } while (1);
643 
645  int got_frame = 0;
646  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
647  if (ret < 0) {
648  ret = AVERROR(EAGAIN);
649  } else {
650  if (got_frame && !d->pkt->data) {
651  d->packet_pending = 1;
652  }
653  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
654  }
655  av_packet_unref(d->pkt);
656  } else {
657  if (d->pkt->buf && !d->pkt->opaque_ref) {
658  FrameData *fd;
659 
660  d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
661  if (!d->pkt->opaque_ref)
662  return AVERROR(ENOMEM);
663  fd = (FrameData*)d->pkt->opaque_ref->data;
664  fd->pkt_pos = d->pkt->pos;
665  }
666 
667  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
668  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
669  d->packet_pending = 1;
670  } else {
671  av_packet_unref(d->pkt);
672  }
673  }
674  }
675 }
676 
677 static void decoder_destroy(Decoder *d) {
678  av_packet_free(&d->pkt);
680 }
681 
683 {
684  av_frame_unref(vp->frame);
685  avsubtitle_free(&vp->sub);
686 }
687 
688 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
689 {
690  int i;
691  memset(f, 0, sizeof(FrameQueue));
692  if (!(f->mutex = SDL_CreateMutex())) {
693  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
694  return AVERROR(ENOMEM);
695  }
696  if (!(f->cond = SDL_CreateCond())) {
697  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
698  return AVERROR(ENOMEM);
699  }
700  f->pktq = pktq;
701  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
702  f->keep_last = !!keep_last;
703  for (i = 0; i < f->max_size; i++)
704  if (!(f->queue[i].frame = av_frame_alloc()))
705  return AVERROR(ENOMEM);
706  return 0;
707 }
708 
710 {
711  int i;
712  for (i = 0; i < f->max_size; i++) {
713  Frame *vp = &f->queue[i];
715  av_frame_free(&vp->frame);
716  }
717  SDL_DestroyMutex(f->mutex);
718  SDL_DestroyCond(f->cond);
719 }
720 
722 {
723  SDL_LockMutex(f->mutex);
724  SDL_CondSignal(f->cond);
725  SDL_UnlockMutex(f->mutex);
726 }
727 
729 {
730  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
731 }
732 
734 {
735  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
736 }
737 
739 {
740  return &f->queue[f->rindex];
741 }
742 
744 {
745  /* wait until we have space to put a new frame */
746  SDL_LockMutex(f->mutex);
747  while (f->size >= f->max_size &&
748  !f->pktq->abort_request) {
749  SDL_CondWait(f->cond, f->mutex);
750  }
751  SDL_UnlockMutex(f->mutex);
752 
753  if (f->pktq->abort_request)
754  return NULL;
755 
756  return &f->queue[f->windex];
757 }
758 
760 {
761  /* wait until we have a readable a new frame */
762  SDL_LockMutex(f->mutex);
763  while (f->size - f->rindex_shown <= 0 &&
764  !f->pktq->abort_request) {
765  SDL_CondWait(f->cond, f->mutex);
766  }
767  SDL_UnlockMutex(f->mutex);
768 
769  if (f->pktq->abort_request)
770  return NULL;
771 
772  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
773 }
774 
776 {
777  if (++f->windex == f->max_size)
778  f->windex = 0;
779  SDL_LockMutex(f->mutex);
780  f->size++;
781  SDL_CondSignal(f->cond);
782  SDL_UnlockMutex(f->mutex);
783 }
784 
786 {
787  if (f->keep_last && !f->rindex_shown) {
788  f->rindex_shown = 1;
789  return;
790  }
791  frame_queue_unref_item(&f->queue[f->rindex]);
792  if (++f->rindex == f->max_size)
793  f->rindex = 0;
794  SDL_LockMutex(f->mutex);
795  f->size--;
796  SDL_CondSignal(f->cond);
797  SDL_UnlockMutex(f->mutex);
798 }
799 
800 /* return the number of undisplayed frames in the queue */
802 {
803  return f->size - f->rindex_shown;
804 }
805 
806 /* return last shown position */
808 {
809  Frame *fp = &f->queue[f->rindex];
810  if (f->rindex_shown && fp->serial == f->pktq->serial)
811  return fp->pos;
812  else
813  return -1;
814 }
815 
816 static void decoder_abort(Decoder *d, FrameQueue *fq)
817 {
819  frame_queue_signal(fq);
820  SDL_WaitThread(d->decoder_tid, NULL);
821  d->decoder_tid = NULL;
823 }
824 
825 static inline void fill_rectangle(int x, int y, int w, int h)
826 {
827  SDL_Rect rect;
828  rect.x = x;
829  rect.y = y;
830  rect.w = w;
831  rect.h = h;
832  if (w && h)
833  SDL_RenderFillRect(renderer, &rect);
834 }
835 
836 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
837 {
838  Uint32 format;
839  int access, w, h;
840  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
841  void *pixels;
842  int pitch;
843  if (*texture)
844  SDL_DestroyTexture(*texture);
845  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
846  return -1;
847  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
848  return -1;
849  if (init_texture) {
850  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
851  return -1;
852  memset(pixels, 0, pitch * new_height);
853  SDL_UnlockTexture(*texture);
854  }
855  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
856  }
857  return 0;
858 }
859 
860 static void calculate_display_rect(SDL_Rect *rect,
861  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
862  int pic_width, int pic_height, AVRational pic_sar)
863 {
864  AVRational aspect_ratio = pic_sar;
865  int64_t width, height, x, y;
866 
867  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
868  aspect_ratio = av_make_q(1, 1);
869 
870  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
871 
872  /* XXX: we suppose the screen has a 1.0 pixel ratio */
873  height = scr_height;
874  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
875  if (width > scr_width) {
876  width = scr_width;
877  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
878  }
879  x = (scr_width - width) / 2;
880  y = (scr_height - height) / 2;
881  rect->x = scr_xleft + x;
882  rect->y = scr_ytop + y;
883  rect->w = FFMAX((int)width, 1);
884  rect->h = FFMAX((int)height, 1);
885 }
886 
887 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
888 {
889  int i;
890  *sdl_blendmode = SDL_BLENDMODE_NONE;
891  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
892  if (format == AV_PIX_FMT_RGB32 ||
896  *sdl_blendmode = SDL_BLENDMODE_BLEND;
897  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map); i++) {
899  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
900  return;
901  }
902  }
903 }
904 
905 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
906 {
907  int ret = 0;
908  Uint32 sdl_pix_fmt;
909  SDL_BlendMode sdl_blendmode;
910  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
911  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
912  return -1;
913  switch (sdl_pix_fmt) {
914  case SDL_PIXELFORMAT_IYUV:
915  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
916  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
917  frame->data[1], frame->linesize[1],
918  frame->data[2], frame->linesize[2]);
919  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
920  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
921  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
922  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
923  } else {
924  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
925  return -1;
926  }
927  break;
928  default:
929  if (frame->linesize[0] < 0) {
930  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
931  } else {
932  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
933  }
934  break;
935  }
936  return ret;
937 }
938 
943 };
944 
948 };
949 
951 {
952 #if SDL_VERSION_ATLEAST(2,0,8)
953  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
954  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
955  if (frame->color_range == AVCOL_RANGE_JPEG)
956  mode = SDL_YUV_CONVERSION_JPEG;
957  else if (frame->colorspace == AVCOL_SPC_BT709)
958  mode = SDL_YUV_CONVERSION_BT709;
959  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
960  mode = SDL_YUV_CONVERSION_BT601;
961  }
962  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
963 #endif
964 }
965 
967 {
968  Frame *vp;
969  Frame *sp = NULL;
970  SDL_Rect rect;
971 
972  vp = frame_queue_peek_last(&is->pictq);
973  if (vk_renderer) {
975  return;
976  }
977 
978  if (is->subtitle_st) {
979  if (frame_queue_nb_remaining(&is->subpq) > 0) {
980  sp = frame_queue_peek(&is->subpq);
981 
982  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
983  if (!sp->uploaded) {
984  uint8_t* pixels[4];
985  int pitch[4];
986  int i;
987  if (!sp->width || !sp->height) {
988  sp->width = vp->width;
989  sp->height = vp->height;
990  }
991  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
992  return;
993 
994  for (i = 0; i < sp->sub.num_rects; i++) {
995  AVSubtitleRect *sub_rect = sp->sub.rects[i];
996 
997  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
998  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
999  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1000  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1001 
1002  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
1003  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1004  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1005  0, NULL, NULL, NULL);
1006  if (!is->sub_convert_ctx) {
1007  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1008  return;
1009  }
1010  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1011  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1012  0, sub_rect->h, pixels, pitch);
1013  SDL_UnlockTexture(is->sub_texture);
1014  }
1015  }
1016  sp->uploaded = 1;
1017  }
1018  } else
1019  sp = NULL;
1020  }
1021  }
1022 
1023  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1025 
1026  if (!vp->uploaded) {
1027  if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1029  return;
1030  }
1031  vp->uploaded = 1;
1032  vp->flip_v = vp->frame->linesize[0] < 0;
1033  }
1034 
1035  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1037  if (sp) {
1038 #if USE_ONEPASS_SUBTITLE_RENDER
1039  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1040 #else
1041  int i;
1042  double xratio = (double)rect.w / (double)sp->width;
1043  double yratio = (double)rect.h / (double)sp->height;
1044  for (i = 0; i < sp->sub.num_rects; i++) {
1045  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1046  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1047  .y = rect.y + sub_rect->y * yratio,
1048  .w = sub_rect->w * xratio,
1049  .h = sub_rect->h * yratio};
1050  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1051  }
1052 #endif
1053  }
1054 }
1055 
1056 static inline int compute_mod(int a, int b)
1057 {
1058  return a < 0 ? a%b + b : a%b;
1059 }
1060 
1062 {
1063  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1064  int ch, channels, h, h2;
1065  int64_t time_diff;
1066  int rdft_bits, nb_freq;
1067 
1068  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1069  ;
1070  nb_freq = 1 << (rdft_bits - 1);
1071 
1072  /* compute display index : center on currently output samples */
1073  channels = s->audio_tgt.ch_layout.nb_channels;
1074  nb_display_channels = channels;
1075  if (!s->paused) {
1076  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1077  n = 2 * channels;
1078  delay = s->audio_write_buf_size;
1079  delay /= n;
1080 
1081  /* to be more precise, we take into account the time spent since
1082  the last buffer computation */
1083  if (audio_callback_time) {
1084  time_diff = av_gettime_relative() - audio_callback_time;
1085  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1086  }
1087 
1088  delay += 2 * data_used;
1089  if (delay < data_used)
1090  delay = data_used;
1091 
1092  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1093  if (s->show_mode == SHOW_MODE_WAVES) {
1094  h = INT_MIN;
1095  for (i = 0; i < 1000; i += channels) {
1096  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1097  int a = s->sample_array[idx];
1098  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1099  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1100  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1101  int score = a - d;
1102  if (h < score && (b ^ c) < 0) {
1103  h = score;
1104  i_start = idx;
1105  }
1106  }
1107  }
1108 
1109  s->last_i_start = i_start;
1110  } else {
1111  i_start = s->last_i_start;
1112  }
1113 
1114  if (s->show_mode == SHOW_MODE_WAVES) {
1115  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1116 
1117  /* total height for one channel */
1118  h = s->height / nb_display_channels;
1119  /* graph height / 2 */
1120  h2 = (h * 9) / 20;
1121  for (ch = 0; ch < nb_display_channels; ch++) {
1122  i = i_start + ch;
1123  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1124  for (x = 0; x < s->width; x++) {
1125  y = (s->sample_array[i] * h2) >> 15;
1126  if (y < 0) {
1127  y = -y;
1128  ys = y1 - y;
1129  } else {
1130  ys = y1;
1131  }
1132  fill_rectangle(s->xleft + x, ys, 1, y);
1133  i += channels;
1134  if (i >= SAMPLE_ARRAY_SIZE)
1135  i -= SAMPLE_ARRAY_SIZE;
1136  }
1137  }
1138 
1139  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1140 
1141  for (ch = 1; ch < nb_display_channels; ch++) {
1142  y = s->ytop + ch * h;
1143  fill_rectangle(s->xleft, y, s->width, 1);
1144  }
1145  } else {
1146  int err = 0;
1147  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1148  return;
1149 
1150  if (s->xpos >= s->width)
1151  s->xpos = 0;
1152  nb_display_channels= FFMIN(nb_display_channels, 2);
1153  if (rdft_bits != s->rdft_bits) {
1154  const float rdft_scale = 1.0;
1155  av_tx_uninit(&s->rdft);
1156  av_freep(&s->real_data);
1157  av_freep(&s->rdft_data);
1158  s->rdft_bits = rdft_bits;
1159  s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
1160  s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
1161  err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
1162  0, 1 << rdft_bits, &rdft_scale, 0);
1163  }
1164  if (err < 0 || !s->rdft_data) {
1165  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1166  s->show_mode = SHOW_MODE_WAVES;
1167  } else {
1168  float *data_in[2];
1169  AVComplexFloat *data[2];
1170  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1171  uint32_t *pixels;
1172  int pitch;
1173  for (ch = 0; ch < nb_display_channels; ch++) {
1174  data_in[ch] = s->real_data + 2 * nb_freq * ch;
1175  data[ch] = s->rdft_data + nb_freq * ch;
1176  i = i_start + ch;
1177  for (x = 0; x < 2 * nb_freq; x++) {
1178  double w = (x-nb_freq) * (1.0 / nb_freq);
1179  data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
1180  i += channels;
1181  if (i >= SAMPLE_ARRAY_SIZE)
1182  i -= SAMPLE_ARRAY_SIZE;
1183  }
1184  s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
1185  data[ch][0].im = data[ch][nb_freq].re;
1186  data[ch][nb_freq].re = 0;
1187  }
1188  /* Least efficient way to do this, we should of course
1189  * directly access it but it is more than fast enough. */
1190  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1191  pitch >>= 2;
1192  pixels += pitch * s->height;
1193  for (y = 0; y < s->height; y++) {
1194  double w = 1 / sqrt(nb_freq);
1195  int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
1196  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
1197  : a;
1198  a = FFMIN(a, 255);
1199  b = FFMIN(b, 255);
1200  pixels -= pitch;
1201  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1202  }
1203  SDL_UnlockTexture(s->vis_texture);
1204  }
1205  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1206  }
1207  if (!s->paused)
1208  s->xpos++;
1209  }
1210 }
1211 
1212 static void stream_component_close(VideoState *is, int stream_index)
1213 {
1214  AVFormatContext *ic = is->ic;
1215  AVCodecParameters *codecpar;
1216 
1217  if (stream_index < 0 || stream_index >= ic->nb_streams)
1218  return;
1219  codecpar = ic->streams[stream_index]->codecpar;
1220 
1221  switch (codecpar->codec_type) {
1222  case AVMEDIA_TYPE_AUDIO:
1223  decoder_abort(&is->auddec, &is->sampq);
1224  SDL_CloseAudioDevice(audio_dev);
1225  decoder_destroy(&is->auddec);
1226  swr_free(&is->swr_ctx);
1227  av_freep(&is->audio_buf1);
1228  is->audio_buf1_size = 0;
1229  is->audio_buf = NULL;
1230 
1231  if (is->rdft) {
1232  av_tx_uninit(&is->rdft);
1233  av_freep(&is->real_data);
1234  av_freep(&is->rdft_data);
1235  is->rdft = NULL;
1236  is->rdft_bits = 0;
1237  }
1238  break;
1239  case AVMEDIA_TYPE_VIDEO:
1240  decoder_abort(&is->viddec, &is->pictq);
1241  decoder_destroy(&is->viddec);
1242  break;
1243  case AVMEDIA_TYPE_SUBTITLE:
1244  decoder_abort(&is->subdec, &is->subpq);
1245  decoder_destroy(&is->subdec);
1246  break;
1247  default:
1248  break;
1249  }
1250 
1251  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1252  switch (codecpar->codec_type) {
1253  case AVMEDIA_TYPE_AUDIO:
1254  is->audio_st = NULL;
1255  is->audio_stream = -1;
1256  break;
1257  case AVMEDIA_TYPE_VIDEO:
1258  is->video_st = NULL;
1259  is->video_stream = -1;
1260  break;
1261  case AVMEDIA_TYPE_SUBTITLE:
1262  is->subtitle_st = NULL;
1263  is->subtitle_stream = -1;
1264  break;
1265  default:
1266  break;
1267  }
1268 }
1269 
1271 {
1272  /* XXX: use a special url_shutdown call to abort parse cleanly */
1273  is->abort_request = 1;
1274  SDL_WaitThread(is->read_tid, NULL);
1275 
1276  /* close each stream */
1277  if (is->audio_stream >= 0)
1278  stream_component_close(is, is->audio_stream);
1279  if (is->video_stream >= 0)
1280  stream_component_close(is, is->video_stream);
1281  if (is->subtitle_stream >= 0)
1282  stream_component_close(is, is->subtitle_stream);
1283 
1284  avformat_close_input(&is->ic);
1285 
1286  packet_queue_destroy(&is->videoq);
1287  packet_queue_destroy(&is->audioq);
1288  packet_queue_destroy(&is->subtitleq);
1289 
1290  /* free all pictures */
1291  frame_queue_destroy(&is->pictq);
1292  frame_queue_destroy(&is->sampq);
1293  frame_queue_destroy(&is->subpq);
1294  SDL_DestroyCond(is->continue_read_thread);
1295  sws_freeContext(is->sub_convert_ctx);
1296  av_free(is->filename);
1297  if (is->vis_texture)
1298  SDL_DestroyTexture(is->vis_texture);
1299  if (is->vid_texture)
1300  SDL_DestroyTexture(is->vid_texture);
1301  if (is->sub_texture)
1302  SDL_DestroyTexture(is->sub_texture);
1303  av_free(is);
1304 }
1305 
1306 static void do_exit(VideoState *is)
1307 {
1308  if (is) {
1309  stream_close(is);
1310  }
1311  if (renderer)
1312  SDL_DestroyRenderer(renderer);
1313  if (vk_renderer)
1315  if (window)
1316  SDL_DestroyWindow(window);
1317  uninit_opts();
1318  for (int i = 0; i < nb_vfilters; i++)
1326  if (show_status)
1327  printf("\n");
1328  SDL_Quit();
1329  av_log(NULL, AV_LOG_QUIET, "%s", "");
1330  exit(0);
1331 }
1332 
1333 static void sigterm_handler(int sig)
1334 {
1335  exit(123);
1336 }
1337 
1339 {
1340  SDL_Rect rect;
1341  int max_width = screen_width ? screen_width : INT_MAX;
1342  int max_height = screen_height ? screen_height : INT_MAX;
1343  if (max_width == INT_MAX && max_height == INT_MAX)
1344  max_height = height;
1345  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1346  default_width = rect.w;
1347  default_height = rect.h;
1348 }
1349 
1351 {
1352  int w,h;
1353 
1356 
1357  if (!window_title)
1359  SDL_SetWindowTitle(window, window_title);
1360 
1361  SDL_SetWindowSize(window, w, h);
1362  SDL_SetWindowPosition(window, screen_left, screen_top);
1363  if (is_full_screen)
1364  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1365  SDL_ShowWindow(window);
1366 
1367  is->width = w;
1368  is->height = h;
1369 
1370  return 0;
1371 }
1372 
1373 /* display the current picture, if any */
1375 {
1376  if (!is->width)
1377  video_open(is);
1378 
1379  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1380  SDL_RenderClear(renderer);
1381  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1383  else if (is->video_st)
1385  SDL_RenderPresent(renderer);
1386 }
1387 
1388 static double get_clock(Clock *c)
1389 {
1390  if (*c->queue_serial != c->serial)
1391  return NAN;
1392  if (c->paused) {
1393  return c->pts;
1394  } else {
1395  double time = av_gettime_relative() / 1000000.0;
1396  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1397  }
1398 }
1399 
1400 static void set_clock_at(Clock *c, double pts, int serial, double time)
1401 {
1402  c->pts = pts;
1403  c->last_updated = time;
1404  c->pts_drift = c->pts - time;
1405  c->serial = serial;
1406 }
1407 
1408 static void set_clock(Clock *c, double pts, int serial)
1409 {
1410  double time = av_gettime_relative() / 1000000.0;
1411  set_clock_at(c, pts, serial, time);
1412 }
1413 
1414 static void set_clock_speed(Clock *c, double speed)
1415 {
1416  set_clock(c, get_clock(c), c->serial);
1417  c->speed = speed;
1418 }
1419 
1420 static void init_clock(Clock *c, int *queue_serial)
1421 {
1422  c->speed = 1.0;
1423  c->paused = 0;
1424  c->queue_serial = queue_serial;
1425  set_clock(c, NAN, -1);
1426 }
1427 
1428 static void sync_clock_to_slave(Clock *c, Clock *slave)
1429 {
1430  double clock = get_clock(c);
1431  double slave_clock = get_clock(slave);
1432  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1433  set_clock(c, slave_clock, slave->serial);
1434 }
1435 
1437  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1438  if (is->video_st)
1439  return AV_SYNC_VIDEO_MASTER;
1440  else
1441  return AV_SYNC_AUDIO_MASTER;
1442  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1443  if (is->audio_st)
1444  return AV_SYNC_AUDIO_MASTER;
1445  else
1446  return AV_SYNC_EXTERNAL_CLOCK;
1447  } else {
1448  return AV_SYNC_EXTERNAL_CLOCK;
1449  }
1450 }
1451 
1452 /* get the current master clock value */
1454 {
1455  double val;
1456 
1457  switch (get_master_sync_type(is)) {
1458  case AV_SYNC_VIDEO_MASTER:
1459  val = get_clock(&is->vidclk);
1460  break;
1461  case AV_SYNC_AUDIO_MASTER:
1462  val = get_clock(&is->audclk);
1463  break;
1464  default:
1465  val = get_clock(&is->extclk);
1466  break;
1467  }
1468  return val;
1469 }
1470 
1472  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1473  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1475  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1476  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1478  } else {
1479  double speed = is->extclk.speed;
1480  if (speed != 1.0)
1481  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1482  }
1483 }
1484 
1485 /* seek in the stream */
1486 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1487 {
1488  if (!is->seek_req) {
1489  is->seek_pos = pos;
1490  is->seek_rel = rel;
1491  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1492  if (by_bytes)
1493  is->seek_flags |= AVSEEK_FLAG_BYTE;
1494  is->seek_req = 1;
1495  SDL_CondSignal(is->continue_read_thread);
1496  }
1497 }
1498 
1499 /* pause or resume the video */
1501 {
1502  if (is->paused) {
1503  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1504  if (is->read_pause_return != AVERROR(ENOSYS)) {
1505  is->vidclk.paused = 0;
1506  }
1507  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1508  }
1509  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1510  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1511 }
1512 
1514 {
1516  is->step = 0;
1517 }
1518 
1520 {
1521  is->muted = !is->muted;
1522 }
1523 
1524 static void update_volume(VideoState *is, int sign, double step)
1525 {
1526  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1527  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1528  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1529 }
1530 
1532 {
1533  /* if the stream is paused unpause it, then step */
1534  if (is->paused)
1536  is->step = 1;
1537 }
1538 
1539 static double compute_target_delay(double delay, VideoState *is)
1540 {
1541  double sync_threshold, diff = 0;
1542 
1543  /* update delay to follow master synchronisation source */
1545  /* if video is slave, we try to correct big delays by
1546  duplicating or deleting a frame */
1547  diff = get_clock(&is->vidclk) - get_master_clock(is);
1548 
1549  /* skip or repeat frame. We take into account the
1550  delay to compute the threshold. I still don't know
1551  if it is the best guess */
1552  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1553  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1554  if (diff <= -sync_threshold)
1555  delay = FFMAX(0, delay + diff);
1556  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1557  delay = delay + diff;
1558  else if (diff >= sync_threshold)
1559  delay = 2 * delay;
1560  }
1561  }
1562 
1563  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1564  delay, -diff);
1565 
1566  return delay;
1567 }
1568 
1569 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1570  if (vp->serial == nextvp->serial) {
1571  double duration = nextvp->pts - vp->pts;
1572  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1573  return vp->duration;
1574  else
1575  return duration;
1576  } else {
1577  return 0.0;
1578  }
1579 }
1580 
1581 static void update_video_pts(VideoState *is, double pts, int serial)
1582 {
1583  /* update current video pts */
1584  set_clock(&is->vidclk, pts, serial);
1585  sync_clock_to_slave(&is->extclk, &is->vidclk);
1586 }
1587 
1588 /* called to display each frame */
1589 static void video_refresh(void *opaque, double *remaining_time)
1590 {
1591  VideoState *is = opaque;
1592  double time;
1593 
1594  Frame *sp, *sp2;
1595 
1596  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1598 
1599  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1600  time = av_gettime_relative() / 1000000.0;
1601  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1602  video_display(is);
1603  is->last_vis_time = time;
1604  }
1605  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1606  }
1607 
1608  if (is->video_st) {
1609 retry:
1610  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1611  // nothing to do, no picture to display in the queue
1612  } else {
1613  double last_duration, duration, delay;
1614  Frame *vp, *lastvp;
1615 
1616  /* dequeue the picture */
1617  lastvp = frame_queue_peek_last(&is->pictq);
1618  vp = frame_queue_peek(&is->pictq);
1619 
1620  if (vp->serial != is->videoq.serial) {
1621  frame_queue_next(&is->pictq);
1622  goto retry;
1623  }
1624 
1625  if (lastvp->serial != vp->serial)
1626  is->frame_timer = av_gettime_relative() / 1000000.0;
1627 
1628  if (is->paused)
1629  goto display;
1630 
1631  /* compute nominal last_duration */
1632  last_duration = vp_duration(is, lastvp, vp);
1633  delay = compute_target_delay(last_duration, is);
1634 
1635  time= av_gettime_relative()/1000000.0;
1636  if (time < is->frame_timer + delay) {
1637  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1638  goto display;
1639  }
1640 
1641  is->frame_timer += delay;
1642  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1643  is->frame_timer = time;
1644 
1645  SDL_LockMutex(is->pictq.mutex);
1646  if (!isnan(vp->pts))
1647  update_video_pts(is, vp->pts, vp->serial);
1648  SDL_UnlockMutex(is->pictq.mutex);
1649 
1650  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1651  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1652  duration = vp_duration(is, vp, nextvp);
1653  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1654  is->frame_drops_late++;
1655  frame_queue_next(&is->pictq);
1656  goto retry;
1657  }
1658  }
1659 
1660  if (is->subtitle_st) {
1661  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1662  sp = frame_queue_peek(&is->subpq);
1663 
1664  if (frame_queue_nb_remaining(&is->subpq) > 1)
1665  sp2 = frame_queue_peek_next(&is->subpq);
1666  else
1667  sp2 = NULL;
1668 
1669  if (sp->serial != is->subtitleq.serial
1670  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1671  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1672  {
1673  if (sp->uploaded) {
1674  int i;
1675  for (i = 0; i < sp->sub.num_rects; i++) {
1676  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1677  uint8_t *pixels;
1678  int pitch, j;
1679 
1680  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1681  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1682  memset(pixels, 0, sub_rect->w << 2);
1683  SDL_UnlockTexture(is->sub_texture);
1684  }
1685  }
1686  }
1687  frame_queue_next(&is->subpq);
1688  } else {
1689  break;
1690  }
1691  }
1692  }
1693 
1694  frame_queue_next(&is->pictq);
1695  is->force_refresh = 1;
1696 
1697  if (is->step && !is->paused)
1699  }
1700 display:
1701  /* display picture */
1702  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1703  video_display(is);
1704  }
1705  is->force_refresh = 0;
1706  if (show_status) {
1707  AVBPrint buf;
1708  static int64_t last_time;
1709  int64_t cur_time;
1710  int aqsize, vqsize, sqsize;
1711  double av_diff;
1712 
1713  cur_time = av_gettime_relative();
1714  if (!last_time || (cur_time - last_time) >= 30000) {
1715  aqsize = 0;
1716  vqsize = 0;
1717  sqsize = 0;
1718  if (is->audio_st)
1719  aqsize = is->audioq.size;
1720  if (is->video_st)
1721  vqsize = is->videoq.size;
1722  if (is->subtitle_st)
1723  sqsize = is->subtitleq.size;
1724  av_diff = 0;
1725  if (is->audio_st && is->video_st)
1726  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1727  else if (is->video_st)
1728  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1729  else if (is->audio_st)
1730  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1731 
1733  av_bprintf(&buf,
1734  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB \r",
1736  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1737  av_diff,
1738  is->frame_drops_early + is->frame_drops_late,
1739  aqsize / 1024,
1740  vqsize / 1024,
1741  sqsize);
1742 
1743  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1744  fprintf(stderr, "%s", buf.str);
1745  else
1746  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1747 
1748  fflush(stderr);
1749  av_bprint_finalize(&buf, NULL);
1750 
1751  last_time = cur_time;
1752  }
1753  }
1754 }
1755 
1756 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1757 {
1758  Frame *vp;
1759 
1760 #if defined(DEBUG_SYNC)
1761  printf("frame_type=%c pts=%0.3f\n",
1762  av_get_picture_type_char(src_frame->pict_type), pts);
1763 #endif
1764 
1765  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1766  return -1;
1767 
1768  vp->sar = src_frame->sample_aspect_ratio;
1769  vp->uploaded = 0;
1770 
1771  vp->width = src_frame->width;
1772  vp->height = src_frame->height;
1773  vp->format = src_frame->format;
1774 
1775  vp->pts = pts;
1776  vp->duration = duration;
1777  vp->pos = pos;
1778  vp->serial = serial;
1779 
1780  set_default_window_size(vp->width, vp->height, vp->sar);
1781 
1782  av_frame_move_ref(vp->frame, src_frame);
1783  frame_queue_push(&is->pictq);
1784  return 0;
1785 }
1786 
1788 {
1789  int got_picture;
1790 
1791  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1792  return -1;
1793 
1794  if (got_picture) {
1795  double dpts = NAN;
1796 
1797  if (frame->pts != AV_NOPTS_VALUE)
1798  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1799 
1800  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1801 
1803  if (frame->pts != AV_NOPTS_VALUE) {
1804  double diff = dpts - get_master_clock(is);
1805  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1806  diff - is->frame_last_filter_delay < 0 &&
1807  is->viddec.pkt_serial == is->vidclk.serial &&
1808  is->videoq.nb_packets) {
1809  is->frame_drops_early++;
1811  got_picture = 0;
1812  }
1813  }
1814  }
1815  }
1816 
1817  return got_picture;
1818 }
1819 
1820 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1821  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1822 {
1823  int ret, i;
1824  int nb_filters = graph->nb_filters;
1826 
1827  if (filtergraph) {
1830  if (!outputs || !inputs) {
1831  ret = AVERROR(ENOMEM);
1832  goto fail;
1833  }
1834 
1835  outputs->name = av_strdup("in");
1836  outputs->filter_ctx = source_ctx;
1837  outputs->pad_idx = 0;
1838  outputs->next = NULL;
1839 
1840  inputs->name = av_strdup("out");
1841  inputs->filter_ctx = sink_ctx;
1842  inputs->pad_idx = 0;
1843  inputs->next = NULL;
1844 
1845  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1846  goto fail;
1847  } else {
1848  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1849  goto fail;
1850  }
1851 
1852  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1853  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1854  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1855 
1856  ret = avfilter_graph_config(graph, NULL);
1857 fail:
1860  return ret;
1861 }
1862 
1863 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1864 {
1866  char sws_flags_str[512] = "";
1867  int ret;
1868  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1869  AVCodecParameters *codecpar = is->video_st->codecpar;
1870  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1871  const AVDictionaryEntry *e = NULL;
1872  int nb_pix_fmts = 0;
1873  int i, j;
1875 
1876  if (!par)
1877  return AVERROR(ENOMEM);
1878 
1879  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1880  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map); j++) {
1881  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1882  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1883  break;
1884  }
1885  }
1886  }
1887 
1888  while ((e = av_dict_iterate(sws_dict, e))) {
1889  if (!strcmp(e->key, "sws_flags")) {
1890  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1891  } else
1892  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1893  }
1894  if (strlen(sws_flags_str))
1895  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1896 
1897  graph->scale_sws_opts = av_strdup(sws_flags_str);
1898 
1899 
1900  filt_src = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffer"),
1901  "ffplay_buffer");
1902  if (!filt_src) {
1903  ret = AVERROR(ENOMEM);
1904  goto fail;
1905  }
1906 
1907  par->format = frame->format;
1908  par->time_base = is->video_st->time_base;
1909  par->width = frame->width;
1910  par->height = frame->height;
1911  par->sample_aspect_ratio = codecpar->sample_aspect_ratio;
1912  par->color_space = frame->colorspace;
1913  par->color_range = frame->color_range;
1914  par->alpha_mode = frame->alpha_mode;
1915  par->frame_rate = fr;
1916  par->hw_frames_ctx = frame->hw_frames_ctx;
1917  ret = av_buffersrc_parameters_set(filt_src, par);
1918  if (ret < 0)
1919  goto fail;
1920 
1921  ret = avfilter_init_dict(filt_src, NULL);
1922  if (ret < 0)
1923  goto fail;
1924 
1925  filt_out = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffersink"),
1926  "ffplay_buffersink");
1927  if (!filt_out) {
1928  ret = AVERROR(ENOMEM);
1929  goto fail;
1930  }
1931 
1932  if ((ret = av_opt_set_array(filt_out, "pixel_formats", AV_OPT_SEARCH_CHILDREN,
1933  0, nb_pix_fmts, AV_OPT_TYPE_PIXEL_FMT, pix_fmts)) < 0)
1934  goto fail;
1935  if (!vk_renderer &&
1936  (ret = av_opt_set_array(filt_out, "colorspaces", AV_OPT_SEARCH_CHILDREN,
1939  goto fail;
1940 
1941  if ((ret = av_opt_set_array(filt_out, "alphamodes", AV_OPT_SEARCH_CHILDREN,
1944  goto fail;
1945 
1946  ret = avfilter_init_dict(filt_out, NULL);
1947  if (ret < 0)
1948  goto fail;
1949 
1950  last_filter = filt_out;
1951 
1952 /* Note: this macro adds a filter before the lastly added filter, so the
1953  * processing order of the filters is in reverse */
1954 #define INSERT_FILT(name, arg) do { \
1955  AVFilterContext *filt_ctx; \
1956  \
1957  ret = avfilter_graph_create_filter(&filt_ctx, \
1958  avfilter_get_by_name(name), \
1959  "ffplay_" name, arg, NULL, graph); \
1960  if (ret < 0) \
1961  goto fail; \
1962  \
1963  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1964  if (ret < 0) \
1965  goto fail; \
1966  \
1967  last_filter = filt_ctx; \
1968 } while (0)
1969 
1970  if (autorotate) {
1971  double theta = 0.0;
1972  int32_t *displaymatrix = NULL;
1974  if (sd)
1975  displaymatrix = (int32_t *)sd->data;
1976  if (!displaymatrix) {
1977  const AVPacketSideData *psd = av_packet_side_data_get(is->video_st->codecpar->coded_side_data,
1978  is->video_st->codecpar->nb_coded_side_data,
1980  if (psd)
1981  displaymatrix = (int32_t *)psd->data;
1982  }
1983  theta = get_rotation(displaymatrix);
1984 
1985  if (fabs(theta - 90) < 1.0) {
1986  INSERT_FILT("transpose", displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1987  } else if (fabs(theta - 180) < 1.0) {
1988  if (displaymatrix[0] < 0)
1989  INSERT_FILT("hflip", NULL);
1990  if (displaymatrix[4] < 0)
1991  INSERT_FILT("vflip", NULL);
1992  } else if (fabs(theta - 270) < 1.0) {
1993  INSERT_FILT("transpose", displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1994  } else if (fabs(theta) > 1.0) {
1995  char rotate_buf[64];
1996  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1997  INSERT_FILT("rotate", rotate_buf);
1998  } else {
1999  if (displaymatrix && displaymatrix[4] < 0)
2000  INSERT_FILT("vflip", NULL);
2001  }
2002  }
2003 
2004  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
2005  goto fail;
2006 
2007  is->in_video_filter = filt_src;
2008  is->out_video_filter = filt_out;
2009 
2010 fail:
2011  av_freep(&par);
2012  return ret;
2013 }
2014 
2015 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
2016 {
2017  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
2018  char aresample_swr_opts[512] = "";
2019  const AVDictionaryEntry *e = NULL;
2020  AVBPrint bp;
2021  char asrc_args[256];
2022  int ret;
2023 
2024  avfilter_graph_free(&is->agraph);
2025  if (!(is->agraph = avfilter_graph_alloc()))
2026  return AVERROR(ENOMEM);
2027  is->agraph->nb_threads = filter_nbthreads;
2028 
2030 
2031  while ((e = av_dict_iterate(swr_opts, e)))
2032  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2033  if (strlen(aresample_swr_opts))
2034  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2035  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2036 
2037  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
2038 
2039  ret = snprintf(asrc_args, sizeof(asrc_args),
2040  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
2041  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2042  1, is->audio_filter_src.freq, bp.str);
2043 
2044  ret = avfilter_graph_create_filter(&filt_asrc,
2045  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2046  asrc_args, NULL, is->agraph);
2047  if (ret < 0)
2048  goto end;
2049 
2050  filt_asink = avfilter_graph_alloc_filter(is->agraph, avfilter_get_by_name("abuffersink"),
2051  "ffplay_abuffersink");
2052  if (!filt_asink) {
2053  ret = AVERROR(ENOMEM);
2054  goto end;
2055  }
2056 
2057  if ((ret = av_opt_set(filt_asink, "sample_formats", "s16", AV_OPT_SEARCH_CHILDREN)) < 0)
2058  goto end;
2059 
2060  if (force_output_format) {
2061  if ((ret = av_opt_set_array(filt_asink, "channel_layouts", AV_OPT_SEARCH_CHILDREN,
2062  0, 1, AV_OPT_TYPE_CHLAYOUT, &is->audio_tgt.ch_layout)) < 0)
2063  goto end;
2064  if ((ret = av_opt_set_array(filt_asink, "samplerates", AV_OPT_SEARCH_CHILDREN,
2065  0, 1, AV_OPT_TYPE_INT, &is->audio_tgt.freq)) < 0)
2066  goto end;
2067  }
2068 
2069  ret = avfilter_init_dict(filt_asink, NULL);
2070  if (ret < 0)
2071  goto end;
2072 
2073  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2074  goto end;
2075 
2076  is->in_audio_filter = filt_asrc;
2077  is->out_audio_filter = filt_asink;
2078 
2079 end:
2080  if (ret < 0)
2081  avfilter_graph_free(&is->agraph);
2082  av_bprint_finalize(&bp, NULL);
2083 
2084  return ret;
2085 }
2086 
2087 static int audio_thread(void *arg)
2088 {
2089  VideoState *is = arg;
2091  Frame *af;
2092  int last_serial = -1;
2093  int reconfigure;
2094  int got_frame = 0;
2095  AVRational tb;
2096  int ret = 0;
2097 
2098  if (!frame)
2099  return AVERROR(ENOMEM);
2100 
2101  do {
2102  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2103  goto the_end;
2104 
2105  if (got_frame) {
2106  tb = (AVRational){1, frame->sample_rate};
2107 
2108  reconfigure =
2109  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2110  frame->format, frame->ch_layout.nb_channels) ||
2111  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2112  is->audio_filter_src.freq != frame->sample_rate ||
2113  is->auddec.pkt_serial != last_serial;
2114 
2115  if (reconfigure) {
2116  char buf1[1024], buf2[1024];
2117  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2118  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2120  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2121  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2122  frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2123 
2124  is->audio_filter_src.fmt = frame->format;
2125  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2126  if (ret < 0)
2127  goto the_end;
2128  is->audio_filter_src.freq = frame->sample_rate;
2129  last_serial = is->auddec.pkt_serial;
2130 
2131  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2132  goto the_end;
2133  }
2134 
2135  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2136  goto the_end;
2137 
2138  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2139  FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2140  tb = av_buffersink_get_time_base(is->out_audio_filter);
2141  if (!(af = frame_queue_peek_writable(&is->sampq)))
2142  goto the_end;
2143 
2144  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2145  af->pos = fd ? fd->pkt_pos : -1;
2146  af->serial = is->auddec.pkt_serial;
2147  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2148 
2150  frame_queue_push(&is->sampq);
2151 
2152  if (is->audioq.serial != is->auddec.pkt_serial)
2153  break;
2154  }
2155  if (ret == AVERROR_EOF)
2156  is->auddec.finished = is->auddec.pkt_serial;
2157  }
2158  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2159  the_end:
2160  avfilter_graph_free(&is->agraph);
2161  av_frame_free(&frame);
2162  return ret;
2163 }
2164 
2165 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2166 {
2168  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2169  if (!d->decoder_tid) {
2170  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2171  return AVERROR(ENOMEM);
2172  }
2173  return 0;
2174 }
2175 
2176 static int video_thread(void *arg)
2177 {
2178  VideoState *is = arg;
2180  double pts;
2181  double duration;
2182  int ret;
2183  AVRational tb = is->video_st->time_base;
2184  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2185 
2186  AVFilterGraph *graph = NULL;
2187  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2188  int last_w = 0;
2189  int last_h = 0;
2190  enum AVPixelFormat last_format = -2;
2191  int last_serial = -1;
2192  int last_vfilter_idx = 0;
2193 
2194  if (!frame)
2195  return AVERROR(ENOMEM);
2196 
2197  for (;;) {
2199  if (ret < 0)
2200  goto the_end;
2201  if (!ret)
2202  continue;
2203 
2204  if ( last_w != frame->width
2205  || last_h != frame->height
2206  || last_format != frame->format
2207  || last_serial != is->viddec.pkt_serial
2208  || last_vfilter_idx != is->vfilter_idx) {
2210  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2211  last_w, last_h,
2212  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2213  frame->width, frame->height,
2214  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2215  avfilter_graph_free(&graph);
2216  graph = avfilter_graph_alloc();
2217  if (!graph) {
2218  ret = AVERROR(ENOMEM);
2219  goto the_end;
2220  }
2221  graph->nb_threads = filter_nbthreads;
2222  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2223  SDL_Event event;
2224  event.type = FF_QUIT_EVENT;
2225  event.user.data1 = is;
2226  SDL_PushEvent(&event);
2227  goto the_end;
2228  }
2229  filt_in = is->in_video_filter;
2230  filt_out = is->out_video_filter;
2231  last_w = frame->width;
2232  last_h = frame->height;
2233  last_format = frame->format;
2234  last_serial = is->viddec.pkt_serial;
2235  last_vfilter_idx = is->vfilter_idx;
2236  frame_rate = av_buffersink_get_frame_rate(filt_out);
2237  }
2238 
2239  ret = av_buffersrc_add_frame(filt_in, frame);
2240  if (ret < 0)
2241  goto the_end;
2242 
2243  while (ret >= 0) {
2244  FrameData *fd;
2245 
2246  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2247 
2248  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2249  if (ret < 0) {
2250  if (ret == AVERROR_EOF)
2251  is->viddec.finished = is->viddec.pkt_serial;
2252  ret = 0;
2253  break;
2254  }
2255 
2256  fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2257 
2258  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2259  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2260  is->frame_last_filter_delay = 0;
2261  tb = av_buffersink_get_time_base(filt_out);
2262  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2263  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2264  ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2266  if (is->videoq.serial != is->viddec.pkt_serial)
2267  break;
2268  }
2269 
2270  if (ret < 0)
2271  goto the_end;
2272  }
2273  the_end:
2274  avfilter_graph_free(&graph);
2275  av_frame_free(&frame);
2276  return 0;
2277 }
2278 
2279 static int subtitle_thread(void *arg)
2280 {
2281  VideoState *is = arg;
2282  Frame *sp;
2283  int got_subtitle;
2284  double pts;
2285 
2286  for (;;) {
2287  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2288  return 0;
2289 
2290  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2291  break;
2292 
2293  pts = 0;
2294 
2295  if (got_subtitle && sp->sub.format == 0) {
2296  if (sp->sub.pts != AV_NOPTS_VALUE)
2297  pts = sp->sub.pts / (double)AV_TIME_BASE;
2298  sp->pts = pts;
2299  sp->serial = is->subdec.pkt_serial;
2300  sp->width = is->subdec.avctx->width;
2301  sp->height = is->subdec.avctx->height;
2302  sp->uploaded = 0;
2303 
2304  /* now we can update the picture count */
2305  frame_queue_push(&is->subpq);
2306  } else if (got_subtitle) {
2307  avsubtitle_free(&sp->sub);
2308  }
2309  }
2310  return 0;
2311 }
2312 
2313 /* copy samples for viewing in editor window */
2314 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2315 {
2316  int size, len;
2317 
2318  size = samples_size / sizeof(short);
2319  while (size > 0) {
2320  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2321  if (len > size)
2322  len = size;
2323  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2324  samples += len;
2325  is->sample_array_index += len;
2326  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2327  is->sample_array_index = 0;
2328  size -= len;
2329  }
2330 }
2331 
2332 /* return the wanted number of samples to get better sync if sync_type is video
2333  * or external master clock */
2334 static int synchronize_audio(VideoState *is, int nb_samples)
2335 {
2336  int wanted_nb_samples = nb_samples;
2337 
2338  /* if not master, then we try to remove or add samples to correct the clock */
2340  double diff, avg_diff;
2341  int min_nb_samples, max_nb_samples;
2342 
2343  diff = get_clock(&is->audclk) - get_master_clock(is);
2344 
2345  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2346  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2347  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2348  /* not enough measures to have a correct estimate */
2349  is->audio_diff_avg_count++;
2350  } else {
2351  /* estimate the A-V difference */
2352  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2353 
2354  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2355  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2356  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2357  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2358  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2359  }
2360  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2361  diff, avg_diff, wanted_nb_samples - nb_samples,
2362  is->audio_clock, is->audio_diff_threshold);
2363  }
2364  } else {
2365  /* too big difference : may be initial PTS errors, so
2366  reset A-V filter */
2367  is->audio_diff_avg_count = 0;
2368  is->audio_diff_cum = 0;
2369  }
2370  }
2371 
2372  return wanted_nb_samples;
2373 }
2374 
2375 /**
2376  * Decode one audio frame and return its uncompressed size.
2377  *
2378  * The processed audio frame is decoded, converted if required, and
2379  * stored in is->audio_buf, with size in bytes given by the return
2380  * value.
2381  */
2383 {
2384  int data_size, resampled_data_size;
2385  av_unused double audio_clock0;
2386  int wanted_nb_samples;
2387  Frame *af;
2388 
2389  if (is->paused)
2390  return -1;
2391 
2392  do {
2393 #if defined(_WIN32)
2394  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2395  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2396  return -1;
2397  av_usleep (1000);
2398  }
2399 #endif
2400  if (!(af = frame_queue_peek_readable(&is->sampq)))
2401  return -1;
2402  frame_queue_next(&is->sampq);
2403  } while (af->serial != is->audioq.serial);
2404 
2406  af->frame->nb_samples,
2407  af->frame->format, 1);
2408 
2409  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2410 
2411  if (af->frame->format != is->audio_src.fmt ||
2412  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2413  af->frame->sample_rate != is->audio_src.freq ||
2414  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2415  int ret;
2416  swr_free(&is->swr_ctx);
2417  ret = swr_alloc_set_opts2(&is->swr_ctx,
2418  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2419  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2420  0, NULL);
2421  if (ret < 0 || swr_init(is->swr_ctx) < 0) {
2423  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2425  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2426  swr_free(&is->swr_ctx);
2427  return -1;
2428  }
2429  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2430  return -1;
2431  is->audio_src.freq = af->frame->sample_rate;
2432  is->audio_src.fmt = af->frame->format;
2433  }
2434 
2435  if (is->swr_ctx) {
2436  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2437  uint8_t **out = &is->audio_buf1;
2438  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2439  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2440  int len2;
2441  if (out_size < 0) {
2442  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2443  return -1;
2444  }
2445  if (wanted_nb_samples != af->frame->nb_samples) {
2446  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2447  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2448  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2449  return -1;
2450  }
2451  }
2452  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2453  if (!is->audio_buf1)
2454  return AVERROR(ENOMEM);
2455  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2456  if (len2 < 0) {
2457  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2458  return -1;
2459  }
2460  if (len2 == out_count) {
2461  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2462  if (swr_init(is->swr_ctx) < 0)
2463  swr_free(&is->swr_ctx);
2464  }
2465  is->audio_buf = is->audio_buf1;
2466  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2467  } else {
2468  is->audio_buf = af->frame->data[0];
2469  resampled_data_size = data_size;
2470  }
2471 
2472  audio_clock0 = is->audio_clock;
2473  /* update the audio clock with the pts */
2474  if (!isnan(af->pts))
2475  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2476  else
2477  is->audio_clock = NAN;
2478  is->audio_clock_serial = af->serial;
2479 #ifdef DEBUG
2480  {
2481  static double last_clock;
2482  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2483  is->audio_clock - last_clock,
2484  is->audio_clock, audio_clock0);
2485  last_clock = is->audio_clock;
2486  }
2487 #endif
2488  return resampled_data_size;
2489 }
2490 
2491 /* prepare a new audio buffer */
2492 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2493 {
2494  VideoState *is = opaque;
2495  int audio_size, len1;
2496 
2498 
2499  while (len > 0) {
2500  if (is->audio_buf_index >= is->audio_buf_size) {
2501  audio_size = audio_decode_frame(is);
2502  if (audio_size < 0) {
2503  /* if error, just output silence */
2504  is->audio_buf = NULL;
2505  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2506  } else {
2507  if (is->show_mode != SHOW_MODE_VIDEO)
2508  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2509  is->audio_buf_size = audio_size;
2510  }
2511  is->audio_buf_index = 0;
2512  }
2513  len1 = is->audio_buf_size - is->audio_buf_index;
2514  if (len1 > len)
2515  len1 = len;
2516  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2517  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2518  else {
2519  memset(stream, 0, len1);
2520  if (!is->muted && is->audio_buf)
2521  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2522  }
2523  len -= len1;
2524  stream += len1;
2525  is->audio_buf_index += len1;
2526  }
2527  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2528  /* Let's assume the audio driver that is used by SDL has two periods. */
2529  if (!isnan(is->audio_clock)) {
2530  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2531  sync_clock_to_slave(&is->extclk, &is->audclk);
2532  }
2533 }
2534 
2535 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2536 {
2537  SDL_AudioSpec wanted_spec, spec;
2538  const char *env;
2539  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2540  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2541  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2542  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2543 
2544  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2545  if (env) {
2546  wanted_nb_channels = atoi(env);
2547  av_channel_layout_uninit(wanted_channel_layout);
2548  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2549  }
2550  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2551  av_channel_layout_uninit(wanted_channel_layout);
2552  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2553  }
2554  wanted_nb_channels = wanted_channel_layout->nb_channels;
2555  wanted_spec.channels = wanted_nb_channels;
2556  wanted_spec.freq = wanted_sample_rate;
2557  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2558  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2559  return -1;
2560  }
2561  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2562  next_sample_rate_idx--;
2563  wanted_spec.format = AUDIO_S16SYS;
2564  wanted_spec.silence = 0;
2565  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2566  wanted_spec.callback = sdl_audio_callback;
2567  wanted_spec.userdata = opaque;
2568  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2569  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2570  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2571  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2572  if (!wanted_spec.channels) {
2573  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2574  wanted_spec.channels = wanted_nb_channels;
2575  if (!wanted_spec.freq) {
2577  "No more combinations to try, audio open failed\n");
2578  return -1;
2579  }
2580  }
2581  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2582  }
2583  if (spec.format != AUDIO_S16SYS) {
2585  "SDL advised audio format %d is not supported!\n", spec.format);
2586  return -1;
2587  }
2588  if (spec.channels != wanted_spec.channels) {
2589  av_channel_layout_uninit(wanted_channel_layout);
2590  av_channel_layout_default(wanted_channel_layout, spec.channels);
2591  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2593  "SDL advised channel count %d is not supported!\n", spec.channels);
2594  return -1;
2595  }
2596  }
2597 
2598  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2599  audio_hw_params->freq = spec.freq;
2600  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2601  return -1;
2602  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2603  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2604  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2605  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2606  return -1;
2607  }
2608  return spec.size;
2609 }
2610 
2611 static int create_hwaccel(AVBufferRef **device_ctx)
2612 {
2613  enum AVHWDeviceType type;
2614  int ret;
2615  AVBufferRef *vk_dev;
2616 
2617  *device_ctx = NULL;
2618 
2619  if (!hwaccel)
2620  return 0;
2621 
2623  if (type == AV_HWDEVICE_TYPE_NONE)
2624  return AVERROR(ENOTSUP);
2625 
2626  if (!vk_renderer) {
2627  av_log(NULL, AV_LOG_ERROR, "Vulkan renderer is not available\n");
2628  return AVERROR(ENOTSUP);
2629  }
2630 
2632  if (ret < 0)
2633  return ret;
2634 
2635  ret = av_hwdevice_ctx_create_derived(device_ctx, type, vk_dev, 0);
2636  if (!ret)
2637  return 0;
2638 
2639  if (ret != AVERROR(ENOSYS))
2640  return ret;
2641 
2642  av_log(NULL, AV_LOG_WARNING, "Derive %s from vulkan not supported.\n", hwaccel);
2643  ret = av_hwdevice_ctx_create(device_ctx, type, NULL, NULL, 0);
2644  return ret;
2645 }
2646 
2647 /* open a given stream. Return 0 if OK */
2648 static int stream_component_open(VideoState *is, int stream_index)
2649 {
2650  AVFormatContext *ic = is->ic;
2651  AVCodecContext *avctx;
2652  const AVCodec *codec;
2653  const char *forced_codec_name = NULL;
2654  AVDictionary *opts = NULL;
2655  int sample_rate;
2656  AVChannelLayout ch_layout = { 0 };
2657  int ret = 0;
2658  int stream_lowres = lowres;
2659 
2660  if (stream_index < 0 || stream_index >= ic->nb_streams)
2661  return -1;
2662 
2663  avctx = avcodec_alloc_context3(NULL);
2664  if (!avctx)
2665  return AVERROR(ENOMEM);
2666 
2667  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2668  if (ret < 0)
2669  goto fail;
2670  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2671 
2672  codec = avcodec_find_decoder(avctx->codec_id);
2673 
2674  switch(avctx->codec_type){
2675  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2676  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2677  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2678  }
2679  if (forced_codec_name)
2680  codec = avcodec_find_decoder_by_name(forced_codec_name);
2681  if (!codec) {
2682  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2683  "No codec could be found with name '%s'\n", forced_codec_name);
2684  else av_log(NULL, AV_LOG_WARNING,
2685  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2686  ret = AVERROR(EINVAL);
2687  goto fail;
2688  }
2689 
2690  avctx->codec_id = codec->id;
2691  if (stream_lowres > codec->max_lowres) {
2692  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2693  codec->max_lowres);
2694  stream_lowres = codec->max_lowres;
2695  }
2696  avctx->lowres = stream_lowres;
2697 
2698  if (fast)
2699  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2700 
2701  ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
2702  ic->streams[stream_index], codec, &opts, NULL);
2703  if (ret < 0)
2704  goto fail;
2705 
2706  if (!av_dict_get(opts, "threads", NULL, 0))
2707  av_dict_set(&opts, "threads", "auto", 0);
2708  if (stream_lowres)
2709  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2710 
2711  av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2712 
2713  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2714  ret = create_hwaccel(&avctx->hw_device_ctx);
2715  if (ret < 0)
2716  goto fail;
2717  }
2718 
2719  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2720  goto fail;
2721  }
2723  if (ret < 0)
2724  goto fail;
2725 
2726  is->eof = 0;
2727  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2728  switch (avctx->codec_type) {
2729  case AVMEDIA_TYPE_AUDIO:
2730  {
2731  AVFilterContext *sink;
2732 
2733  is->audio_filter_src.freq = avctx->sample_rate;
2734  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2735  if (ret < 0)
2736  goto fail;
2737  is->audio_filter_src.fmt = avctx->sample_fmt;
2738  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2739  goto fail;
2740  sink = is->out_audio_filter;
2741  sample_rate = av_buffersink_get_sample_rate(sink);
2742  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2743  if (ret < 0)
2744  goto fail;
2745  }
2746 
2747  /* prepare audio output */
2748  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2749  goto fail;
2750  is->audio_hw_buf_size = ret;
2751  is->audio_src = is->audio_tgt;
2752  is->audio_buf_size = 0;
2753  is->audio_buf_index = 0;
2754 
2755  /* init averaging filter */
2756  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2757  is->audio_diff_avg_count = 0;
2758  /* since we do not have a precise anough audio FIFO fullness,
2759  we correct audio sync only if larger than this threshold */
2760  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2761 
2762  is->audio_stream = stream_index;
2763  is->audio_st = ic->streams[stream_index];
2764 
2765  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2766  goto fail;
2767  if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
2768  is->auddec.start_pts = is->audio_st->start_time;
2769  is->auddec.start_pts_tb = is->audio_st->time_base;
2770  }
2771  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2772  goto out;
2773  SDL_PauseAudioDevice(audio_dev, 0);
2774  break;
2775  case AVMEDIA_TYPE_VIDEO:
2776  is->video_stream = stream_index;
2777  is->video_st = ic->streams[stream_index];
2778 
2779  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2780  goto fail;
2781  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2782  goto out;
2783  is->queue_attachments_req = 1;
2784  break;
2785  case AVMEDIA_TYPE_SUBTITLE:
2786  is->subtitle_stream = stream_index;
2787  is->subtitle_st = ic->streams[stream_index];
2788 
2789  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2790  goto fail;
2791  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2792  goto out;
2793  break;
2794  default:
2795  break;
2796  }
2797  goto out;
2798 
2799 fail:
2800  avcodec_free_context(&avctx);
2801 out:
2802  av_channel_layout_uninit(&ch_layout);
2803  av_dict_free(&opts);
2804 
2805  return ret;
2806 }
2807 
2808 static int decode_interrupt_cb(void *ctx)
2809 {
2810  VideoState *is = ctx;
2811  return is->abort_request;
2812 }
2813 
2814 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2815  return stream_id < 0 ||
2816  queue->abort_request ||
2818  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2819 }
2820 
2822 {
2823  if( !strcmp(s->iformat->name, "rtp")
2824  || !strcmp(s->iformat->name, "rtsp")
2825  || !strcmp(s->iformat->name, "sdp")
2826  )
2827  return 1;
2828 
2829  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2830  || !strncmp(s->url, "udp:", 4)
2831  )
2832  )
2833  return 1;
2834  return 0;
2835 }
2836 
2837 /* this thread gets the stream from the disk or the network */
2838 static int read_thread(void *arg)
2839 {
2840  VideoState *is = arg;
2841  AVFormatContext *ic = NULL;
2842  int err, i, ret;
2843  int st_index[AVMEDIA_TYPE_NB];
2844  AVPacket *pkt = NULL;
2845  int64_t stream_start_time;
2846  int pkt_in_play_range = 0;
2847  const AVDictionaryEntry *t;
2848  SDL_mutex *wait_mutex = SDL_CreateMutex();
2849  int scan_all_pmts_set = 0;
2850  int64_t pkt_ts;
2851 
2852  if (!wait_mutex) {
2853  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2854  ret = AVERROR(ENOMEM);
2855  goto fail;
2856  }
2857 
2858  memset(st_index, -1, sizeof(st_index));
2859  is->eof = 0;
2860 
2861  pkt = av_packet_alloc();
2862  if (!pkt) {
2863  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2864  ret = AVERROR(ENOMEM);
2865  goto fail;
2866  }
2867  ic = avformat_alloc_context();
2868  if (!ic) {
2869  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2870  ret = AVERROR(ENOMEM);
2871  goto fail;
2872  }
2875  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2876  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2877  scan_all_pmts_set = 1;
2878  }
2879  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2880  if (err < 0) {
2881  print_error(is->filename, err);
2882  ret = -1;
2883  goto fail;
2884  }
2885  if (scan_all_pmts_set)
2886  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2888 
2890  if (ret < 0)
2891  goto fail;
2892  is->ic = ic;
2893 
2894  if (genpts)
2895  ic->flags |= AVFMT_FLAG_GENPTS;
2896 
2897  if (find_stream_info) {
2898  AVDictionary **opts;
2899  int orig_nb_streams = ic->nb_streams;
2900 
2902  if (err < 0) {
2904  "Error setting up avformat_find_stream_info() options\n");
2905  ret = err;
2906  goto fail;
2907  }
2908 
2909  err = avformat_find_stream_info(ic, opts);
2910 
2911  for (i = 0; i < orig_nb_streams; i++)
2912  av_dict_free(&opts[i]);
2913  av_freep(&opts);
2914 
2915  if (err < 0) {
2917  "%s: could not find codec parameters\n", is->filename);
2918  ret = -1;
2919  goto fail;
2920  }
2921  }
2922 
2923  if (ic->pb)
2924  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2925 
2926  if (seek_by_bytes < 0)
2928  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2929  strcmp("ogg", ic->iformat->name);
2930 
2931  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2932 
2933  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2934  window_title = av_asprintf("%s - %s", t->value, input_filename);
2935 
2936  /* if seeking requested, we execute it */
2937  if (start_time != AV_NOPTS_VALUE) {
2938  int64_t timestamp;
2939 
2940  timestamp = start_time;
2941  /* add the stream start time */
2942  if (ic->start_time != AV_NOPTS_VALUE)
2943  timestamp += ic->start_time;
2944  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2945  if (ret < 0) {
2946  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2947  is->filename, (double)timestamp / AV_TIME_BASE);
2948  }
2949  }
2950 
2951  is->realtime = is_realtime(ic);
2952 
2953  if (show_status)
2954  av_dump_format(ic, 0, is->filename, 0);
2955 
2956  for (i = 0; i < ic->nb_streams; i++) {
2957  AVStream *st = ic->streams[i];
2958  enum AVMediaType type = st->codecpar->codec_type;
2959  st->discard = AVDISCARD_ALL;
2960  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2962  st_index[type] = i;
2963  }
2964  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2965  if (wanted_stream_spec[i] && st_index[i] == -1) {
2966  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2967  st_index[i] = INT_MAX;
2968  }
2969  }
2970 
2971  if (!video_disable)
2972  st_index[AVMEDIA_TYPE_VIDEO] =
2974  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2975  if (!audio_disable)
2976  st_index[AVMEDIA_TYPE_AUDIO] =
2978  st_index[AVMEDIA_TYPE_AUDIO],
2979  st_index[AVMEDIA_TYPE_VIDEO],
2980  NULL, 0);
2982  st_index[AVMEDIA_TYPE_SUBTITLE] =
2984  st_index[AVMEDIA_TYPE_SUBTITLE],
2985  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2986  st_index[AVMEDIA_TYPE_AUDIO] :
2987  st_index[AVMEDIA_TYPE_VIDEO]),
2988  NULL, 0);
2989 
2990  is->show_mode = show_mode;
2991  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2992  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2993  AVCodecParameters *codecpar = st->codecpar;
2995  if (codecpar->width)
2996  set_default_window_size(codecpar->width, codecpar->height, sar);
2997  }
2998 
2999  /* open the streams */
3000  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
3002  }
3003 
3004  ret = -1;
3005  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
3007  }
3008  if (is->show_mode == SHOW_MODE_NONE)
3009  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
3010 
3011  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
3013  }
3014 
3015  if (is->video_stream < 0 && is->audio_stream < 0) {
3016  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
3017  is->filename);
3018  ret = -1;
3019  goto fail;
3020  }
3021 
3022  if (infinite_buffer < 0 && is->realtime)
3023  infinite_buffer = 1;
3024 
3025  for (;;) {
3026  if (is->abort_request)
3027  break;
3028  if (is->paused != is->last_paused) {
3029  is->last_paused = is->paused;
3030  if (is->paused)
3031  is->read_pause_return = av_read_pause(ic);
3032  else
3033  av_read_play(ic);
3034  }
3035 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3036  if (is->paused &&
3037  (!strcmp(ic->iformat->name, "rtsp") ||
3038  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3039  /* wait 10 ms to avoid trying to get another packet */
3040  /* XXX: horrible */
3041  SDL_Delay(10);
3042  continue;
3043  }
3044 #endif
3045  if (is->seek_req) {
3046  int64_t seek_target = is->seek_pos;
3047  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3048  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3049 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3050 // of the seek_pos/seek_rel variables
3051 
3052  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3053  if (ret < 0) {
3055  "%s: error while seeking\n", is->ic->url);
3056  } else {
3057  if (is->audio_stream >= 0)
3058  packet_queue_flush(&is->audioq);
3059  if (is->subtitle_stream >= 0)
3060  packet_queue_flush(&is->subtitleq);
3061  if (is->video_stream >= 0)
3062  packet_queue_flush(&is->videoq);
3063  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3064  set_clock(&is->extclk, NAN, 0);
3065  } else {
3066  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3067  }
3068  }
3069  is->seek_req = 0;
3070  is->queue_attachments_req = 1;
3071  is->eof = 0;
3072  if (is->paused)
3074  }
3075  if (is->queue_attachments_req) {
3076  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
3077  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
3078  goto fail;
3079  packet_queue_put(&is->videoq, pkt);
3080  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3081  }
3082  is->queue_attachments_req = 0;
3083  }
3084 
3085  /* if the queue are full, no need to read more */
3086  if (infinite_buffer<1 &&
3087  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3088  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3089  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
3090  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3091  /* wait 10 ms */
3092  SDL_LockMutex(wait_mutex);
3093  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3094  SDL_UnlockMutex(wait_mutex);
3095  continue;
3096  }
3097  if (!is->paused &&
3098  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3099  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3100  if (loop != 1 && (!loop || --loop)) {
3102  } else if (autoexit) {
3103  ret = AVERROR_EOF;
3104  goto fail;
3105  }
3106  }
3107  ret = av_read_frame(ic, pkt);
3108  if (ret < 0) {
3109  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3110  if (is->video_stream >= 0)
3111  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3112  if (is->audio_stream >= 0)
3113  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3114  if (is->subtitle_stream >= 0)
3115  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3116  is->eof = 1;
3117  }
3118  if (ic->pb && ic->pb->error) {
3119  if (autoexit)
3120  goto fail;
3121  else
3122  break;
3123  }
3124  SDL_LockMutex(wait_mutex);
3125  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3126  SDL_UnlockMutex(wait_mutex);
3127  continue;
3128  } else {
3129  is->eof = 0;
3130  }
3131  /* check if packet is in play range specified by user, then queue, otherwise discard */
3132  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3133  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3134  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3135  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3137  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3138  <= ((double)duration / 1000000);
3139  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3140  packet_queue_put(&is->audioq, pkt);
3141  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3142  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3143  packet_queue_put(&is->videoq, pkt);
3144  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3145  packet_queue_put(&is->subtitleq, pkt);
3146  } else {
3148  }
3149  }
3150 
3151  ret = 0;
3152  fail:
3153  if (ic && !is->ic)
3154  avformat_close_input(&ic);
3155 
3156  av_packet_free(&pkt);
3157  if (ret != 0) {
3158  SDL_Event event;
3159 
3160  event.type = FF_QUIT_EVENT;
3161  event.user.data1 = is;
3162  SDL_PushEvent(&event);
3163  }
3164  SDL_DestroyMutex(wait_mutex);
3165  return 0;
3166 }
3167 
3168 static VideoState *stream_open(const char *filename,
3169  const AVInputFormat *iformat)
3170 {
3171  VideoState *is;
3172 
3173  is = av_mallocz(sizeof(VideoState));
3174  if (!is)
3175  return NULL;
3176  is->last_video_stream = is->video_stream = -1;
3177  is->last_audio_stream = is->audio_stream = -1;
3178  is->last_subtitle_stream = is->subtitle_stream = -1;
3179  is->filename = av_strdup(filename);
3180  if (!is->filename)
3181  goto fail;
3182  is->iformat = iformat;
3183  is->ytop = 0;
3184  is->xleft = 0;
3185 
3186  /* start video display */
3187  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3188  goto fail;
3189  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3190  goto fail;
3191  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3192  goto fail;
3193 
3194  if (packet_queue_init(&is->videoq) < 0 ||
3195  packet_queue_init(&is->audioq) < 0 ||
3196  packet_queue_init(&is->subtitleq) < 0)
3197  goto fail;
3198 
3199  if (!(is->continue_read_thread = SDL_CreateCond())) {
3200  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3201  goto fail;
3202  }
3203 
3204  init_clock(&is->vidclk, &is->videoq.serial);
3205  init_clock(&is->audclk, &is->audioq.serial);
3206  init_clock(&is->extclk, &is->extclk.serial);
3207  is->audio_clock_serial = -1;
3208  if (startup_volume < 0)
3209  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3210  if (startup_volume > 100)
3211  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3213  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3214  is->audio_volume = startup_volume;
3215  is->muted = 0;
3216  is->av_sync_type = av_sync_type;
3217  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3218  if (!is->read_tid) {
3219  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3220 fail:
3221  stream_close(is);
3222  return NULL;
3223  }
3224  return is;
3225 }
3226 
3228 {
3229  AVFormatContext *ic = is->ic;
3230  int start_index, stream_index;
3231  int old_index;
3232  AVStream *st;
3233  AVProgram *p = NULL;
3234  int nb_streams = is->ic->nb_streams;
3235 
3236  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3237  start_index = is->last_video_stream;
3238  old_index = is->video_stream;
3239  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3240  start_index = is->last_audio_stream;
3241  old_index = is->audio_stream;
3242  } else {
3243  start_index = is->last_subtitle_stream;
3244  old_index = is->subtitle_stream;
3245  }
3246  stream_index = start_index;
3247 
3248  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3249  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3250  if (p) {
3251  nb_streams = p->nb_stream_indexes;
3252  for (start_index = 0; start_index < nb_streams; start_index++)
3253  if (p->stream_index[start_index] == stream_index)
3254  break;
3255  if (start_index == nb_streams)
3256  start_index = -1;
3257  stream_index = start_index;
3258  }
3259  }
3260 
3261  for (;;) {
3262  if (++stream_index >= nb_streams)
3263  {
3265  {
3266  stream_index = -1;
3267  is->last_subtitle_stream = -1;
3268  goto the_end;
3269  }
3270  if (start_index == -1)
3271  return;
3272  stream_index = 0;
3273  }
3274  if (stream_index == start_index)
3275  return;
3276  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3277  if (st->codecpar->codec_type == codec_type) {
3278  /* check that parameters are OK */
3279  switch (codec_type) {
3280  case AVMEDIA_TYPE_AUDIO:
3281  if (st->codecpar->sample_rate != 0 &&
3282  st->codecpar->ch_layout.nb_channels != 0)
3283  goto the_end;
3284  break;
3285  case AVMEDIA_TYPE_VIDEO:
3286  case AVMEDIA_TYPE_SUBTITLE:
3287  goto the_end;
3288  default:
3289  break;
3290  }
3291  }
3292  }
3293  the_end:
3294  if (p && stream_index != -1)
3295  stream_index = p->stream_index[stream_index];
3296  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3298  old_index,
3299  stream_index);
3300 
3301  stream_component_close(is, old_index);
3302  stream_component_open(is, stream_index);
3303 }
3304 
3305 
3307 {
3309  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3310 }
3311 
3313 {
3314  int next = is->show_mode;
3315  do {
3316  next = (next + 1) % SHOW_MODE_NB;
3317  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3318  if (is->show_mode != next) {
3319  is->force_refresh = 1;
3320  is->show_mode = next;
3321  }
3322 }
3323 
3324 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3325  double remaining_time = 0.0;
3326  SDL_PumpEvents();
3327  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3329  SDL_ShowCursor(0);
3330  cursor_hidden = 1;
3331  }
3332  if (remaining_time > 0.0)
3333  av_usleep((int64_t)(remaining_time * 1000000.0));
3334  remaining_time = REFRESH_RATE;
3335  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3336  video_refresh(is, &remaining_time);
3337  SDL_PumpEvents();
3338  }
3339 }
3340 
3341 static void seek_chapter(VideoState *is, int incr)
3342 {
3344  int i;
3345 
3346  if (!is->ic->nb_chapters)
3347  return;
3348 
3349  /* find the current chapter */
3350  for (i = 0; i < is->ic->nb_chapters; i++) {
3351  AVChapter *ch = is->ic->chapters[i];
3352  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3353  i--;
3354  break;
3355  }
3356  }
3357 
3358  i += incr;
3359  i = FFMAX(i, 0);
3360  if (i >= is->ic->nb_chapters)
3361  return;
3362 
3363  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3364  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3365  AV_TIME_BASE_Q), 0, 0);
3366 }
3367 
3368 /* handle an event sent by the GUI */
3369 static void event_loop(VideoState *cur_stream)
3370 {
3371  SDL_Event event;
3372  double incr, pos, frac;
3373 
3374  for (;;) {
3375  double x;
3376  refresh_loop_wait_event(cur_stream, &event);
3377  switch (event.type) {
3378  case SDL_KEYDOWN:
3379  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3380  do_exit(cur_stream);
3381  break;
3382  }
3383  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3384  if (!cur_stream->width)
3385  continue;
3386  switch (event.key.keysym.sym) {
3387  case SDLK_f:
3388  toggle_full_screen(cur_stream);
3389  cur_stream->force_refresh = 1;
3390  break;
3391  case SDLK_p:
3392  case SDLK_SPACE:
3393  toggle_pause(cur_stream);
3394  break;
3395  case SDLK_m:
3396  toggle_mute(cur_stream);
3397  break;
3398  case SDLK_KP_MULTIPLY:
3399  case SDLK_0:
3400  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3401  break;
3402  case SDLK_KP_DIVIDE:
3403  case SDLK_9:
3404  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3405  break;
3406  case SDLK_s: // S: Step to next frame
3407  step_to_next_frame(cur_stream);
3408  break;
3409  case SDLK_a:
3411  break;
3412  case SDLK_v:
3414  break;
3415  case SDLK_c:
3419  break;
3420  case SDLK_t:
3422  break;
3423  case SDLK_w:
3424  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3425  if (++cur_stream->vfilter_idx >= nb_vfilters)
3426  cur_stream->vfilter_idx = 0;
3427  } else {
3428  cur_stream->vfilter_idx = 0;
3429  toggle_audio_display(cur_stream);
3430  }
3431  break;
3432  case SDLK_PAGEUP:
3433  if (cur_stream->ic->nb_chapters <= 1) {
3434  incr = 600.0;
3435  goto do_seek;
3436  }
3437  seek_chapter(cur_stream, 1);
3438  break;
3439  case SDLK_PAGEDOWN:
3440  if (cur_stream->ic->nb_chapters <= 1) {
3441  incr = -600.0;
3442  goto do_seek;
3443  }
3444  seek_chapter(cur_stream, -1);
3445  break;
3446  case SDLK_LEFT:
3447  incr = seek_interval ? -seek_interval : -10.0;
3448  goto do_seek;
3449  case SDLK_RIGHT:
3450  incr = seek_interval ? seek_interval : 10.0;
3451  goto do_seek;
3452  case SDLK_UP:
3453  incr = 60.0;
3454  goto do_seek;
3455  case SDLK_DOWN:
3456  incr = -60.0;
3457  do_seek:
3458  if (seek_by_bytes) {
3459  pos = -1;
3460  if (pos < 0 && cur_stream->video_stream >= 0)
3461  pos = frame_queue_last_pos(&cur_stream->pictq);
3462  if (pos < 0 && cur_stream->audio_stream >= 0)
3463  pos = frame_queue_last_pos(&cur_stream->sampq);
3464  if (pos < 0)
3465  pos = avio_tell(cur_stream->ic->pb);
3466  if (cur_stream->ic->bit_rate)
3467  incr *= cur_stream->ic->bit_rate / 8.0;
3468  else
3469  incr *= 180000.0;
3470  pos += incr;
3471  stream_seek(cur_stream, pos, incr, 1);
3472  } else {
3473  pos = get_master_clock(cur_stream);
3474  if (isnan(pos))
3475  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3476  pos += incr;
3477  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3478  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3479  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3480  }
3481  break;
3482  default:
3483  break;
3484  }
3485  break;
3486  case SDL_MOUSEBUTTONDOWN:
3487  if (exit_on_mousedown) {
3488  do_exit(cur_stream);
3489  break;
3490  }
3491  if (event.button.button == SDL_BUTTON_LEFT) {
3492  static int64_t last_mouse_left_click = 0;
3493  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3494  toggle_full_screen(cur_stream);
3495  cur_stream->force_refresh = 1;
3496  last_mouse_left_click = 0;
3497  } else {
3498  last_mouse_left_click = av_gettime_relative();
3499  }
3500  }
3501  case SDL_MOUSEMOTION:
3502  if (cursor_hidden) {
3503  SDL_ShowCursor(1);
3504  cursor_hidden = 0;
3505  }
3507  if (event.type == SDL_MOUSEBUTTONDOWN) {
3508  if (event.button.button != SDL_BUTTON_RIGHT)
3509  break;
3510  x = event.button.x;
3511  } else {
3512  if (!(event.motion.state & SDL_BUTTON_RMASK))
3513  break;
3514  x = event.motion.x;
3515  }
3516  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3517  uint64_t size = avio_size(cur_stream->ic->pb);
3518  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3519  } else {
3520  int64_t ts;
3521  int ns, hh, mm, ss;
3522  int tns, thh, tmm, tss;
3523  tns = cur_stream->ic->duration / 1000000LL;
3524  thh = tns / 3600;
3525  tmm = (tns % 3600) / 60;
3526  tss = (tns % 60);
3527  frac = x / cur_stream->width;
3528  ns = frac * tns;
3529  hh = ns / 3600;
3530  mm = (ns % 3600) / 60;
3531  ss = (ns % 60);
3533  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3534  hh, mm, ss, thh, tmm, tss);
3535  ts = frac * cur_stream->ic->duration;
3536  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3537  ts += cur_stream->ic->start_time;
3538  stream_seek(cur_stream, ts, 0, 0);
3539  }
3540  break;
3541  case SDL_WINDOWEVENT:
3542  switch (event.window.event) {
3543  case SDL_WINDOWEVENT_SIZE_CHANGED:
3544  screen_width = cur_stream->width = event.window.data1;
3545  screen_height = cur_stream->height = event.window.data2;
3546  if (cur_stream->vis_texture) {
3547  SDL_DestroyTexture(cur_stream->vis_texture);
3548  cur_stream->vis_texture = NULL;
3549  }
3550  if (vk_renderer)
3552  case SDL_WINDOWEVENT_EXPOSED:
3553  cur_stream->force_refresh = 1;
3554  }
3555  break;
3556  case SDL_QUIT:
3557  case FF_QUIT_EVENT:
3558  do_exit(cur_stream);
3559  break;
3560  default:
3561  break;
3562  }
3563  }
3564 }
3565 
3566 static int opt_width(void *optctx, const char *opt, const char *arg)
3567 {
3568  double num;
3569  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3570  if (ret < 0)
3571  return ret;
3572 
3573  screen_width = num;
3574  return 0;
3575 }
3576 
3577 static int opt_height(void *optctx, const char *opt, const char *arg)
3578 {
3579  double num;
3580  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3581  if (ret < 0)
3582  return ret;
3583 
3584  screen_height = num;
3585  return 0;
3586 }
3587 
3588 static int opt_format(void *optctx, const char *opt, const char *arg)
3589 {
3591  if (!file_iformat) {
3592  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3593  return AVERROR(EINVAL);
3594  }
3595  return 0;
3596 }
3597 
3598 static int opt_sync(void *optctx, const char *opt, const char *arg)
3599 {
3600  if (!strcmp(arg, "audio"))
3602  else if (!strcmp(arg, "video"))
3604  else if (!strcmp(arg, "ext"))
3606  else {
3607  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3608  exit(1);
3609  }
3610  return 0;
3611 }
3612 
3613 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3614 {
3615  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3616  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3617  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3618 
3619  if (show_mode == SHOW_MODE_NONE) {
3620  double num;
3621  int ret = parse_number(opt, arg, OPT_TYPE_INT, 0, SHOW_MODE_NB-1, &num);
3622  if (ret < 0)
3623  return ret;
3624  show_mode = num;
3625  }
3626  return 0;
3627 }
3628 
3629 static int opt_input_file(void *optctx, const char *filename)
3630 {
3631  if (input_filename) {
3633  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3634  filename, input_filename);
3635  return AVERROR(EINVAL);
3636  }
3637  if (!strcmp(filename, "-"))
3638  filename = "fd:";
3639  input_filename = av_strdup(filename);
3640  if (!input_filename)
3641  return AVERROR(ENOMEM);
3642 
3643  return 0;
3644 }
3645 
3646 static int opt_codec(void *optctx, const char *opt, const char *arg)
3647 {
3648  const char *spec = strchr(opt, ':');
3649  const char **name;
3650  if (!spec) {
3652  "No media specifier was specified in '%s' in option '%s'\n",
3653  arg, opt);
3654  return AVERROR(EINVAL);
3655  }
3656  spec++;
3657 
3658  switch (spec[0]) {
3659  case 'a' : name = &audio_codec_name; break;
3660  case 's' : name = &subtitle_codec_name; break;
3661  case 'v' : name = &video_codec_name; break;
3662  default:
3664  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3665  return AVERROR(EINVAL);
3666  }
3667 
3668  av_freep(name);
3669  *name = av_strdup(arg);
3670  return *name ? 0 : AVERROR(ENOMEM);
3671 }
3672 
3673 static int dummy;
3674 
3675 static const OptionDef options[] = {
3677  { "x", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3678  { "y", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3679  { "fs", OPT_TYPE_BOOL, 0, { &is_full_screen }, "force full screen" },
3680  { "an", OPT_TYPE_BOOL, 0, { &audio_disable }, "disable audio" },
3681  { "vn", OPT_TYPE_BOOL, 0, { &video_disable }, "disable video" },
3682  { "sn", OPT_TYPE_BOOL, 0, { &subtitle_disable }, "disable subtitling" },
3683  { "ast", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3684  { "vst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3685  { "sst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3686  { "ss", OPT_TYPE_TIME, 0, { &start_time }, "seek to a given position in seconds", "pos" },
3687  { "t", OPT_TYPE_TIME, 0, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
3688  { "bytes", OPT_TYPE_INT, 0, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3689  { "seek_interval", OPT_TYPE_FLOAT, 0, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3690  { "nodisp", OPT_TYPE_BOOL, 0, { &display_disable }, "disable graphical display" },
3691  { "noborder", OPT_TYPE_BOOL, 0, { &borderless }, "borderless window" },
3692  { "alwaysontop", OPT_TYPE_BOOL, 0, { &alwaysontop }, "window always on top" },
3693  { "volume", OPT_TYPE_INT, 0, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3694  { "f", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3695  { "stats", OPT_TYPE_BOOL, OPT_EXPERT, { &show_status }, "show status", "" },
3696  { "fast", OPT_TYPE_BOOL, OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3697  { "genpts", OPT_TYPE_BOOL, OPT_EXPERT, { &genpts }, "generate pts", "" },
3698  { "drp", OPT_TYPE_INT, OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3699  { "lowres", OPT_TYPE_INT, OPT_EXPERT, { &lowres }, "", "" },
3700  { "sync", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3701  { "autoexit", OPT_TYPE_BOOL, OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3702  { "exitonkeydown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3703  { "exitonmousedown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3704  { "loop", OPT_TYPE_INT, OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3705  { "framedrop", OPT_TYPE_BOOL, OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3706  { "infbuf", OPT_TYPE_BOOL, OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3707  { "window_title", OPT_TYPE_STRING, 0, { &window_title }, "set window title", "window title" },
3708  { "left", OPT_TYPE_INT, OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3709  { "top", OPT_TYPE_INT, OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3710  { "vf", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3711  { "af", OPT_TYPE_STRING, 0, { &afilters }, "set audio filters", "filter_graph" },
3712  { "rdftspeed", OPT_TYPE_INT, OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3713  { "showmode", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3714  { "i", OPT_TYPE_BOOL, 0, { &dummy}, "read specified file", "input_file"},
3715  { "codec", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3716  { "acodec", OPT_TYPE_STRING, OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3717  { "scodec", OPT_TYPE_STRING, OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3718  { "vcodec", OPT_TYPE_STRING, OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3719  { "autorotate", OPT_TYPE_BOOL, 0, { &autorotate }, "automatically rotate video", "" },
3720  { "find_stream_info", OPT_TYPE_BOOL, OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3721  "read and decode the streams to fill missing information with heuristics" },
3722  { "filter_threads", OPT_TYPE_INT, OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3723  { "enable_vulkan", OPT_TYPE_BOOL, 0, { &enable_vulkan }, "enable vulkan renderer" },
3724  { "vulkan_params", OPT_TYPE_STRING, OPT_EXPERT, { &vulkan_params }, "vulkan configuration using a list of key=value pairs separated by ':'" },
3725  { "hwaccel", OPT_TYPE_STRING, OPT_EXPERT, { &hwaccel }, "use HW accelerated decoding" },
3726  { NULL, },
3727 };
3728 
3729 static void show_usage(void)
3730 {
3731  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3732  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3733  av_log(NULL, AV_LOG_INFO, "\n");
3734 }
3735 
3736 void show_help_default(const char *opt, const char *arg)
3737 {
3739  show_usage();
3740  show_help_options(options, "Main options:", 0, OPT_EXPERT);
3741  show_help_options(options, "Advanced options:", OPT_EXPERT, 0);
3742  printf("\n");
3746  printf("\nWhile playing:\n"
3747  "q, ESC quit\n"
3748  "f toggle full screen\n"
3749  "p, SPC pause\n"
3750  "m toggle mute\n"
3751  "9, 0 decrease and increase volume respectively\n"
3752  "/, * decrease and increase volume respectively\n"
3753  "a cycle audio channel in the current program\n"
3754  "v cycle video channel\n"
3755  "t cycle subtitle channel in the current program\n"
3756  "c cycle program\n"
3757  "w cycle video filters or show modes\n"
3758  "s activate frame-step mode\n"
3759  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3760  "down/up seek backward/forward 1 minute\n"
3761  "page down/page up seek backward/forward 10 minutes\n"
3762  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3763  "left double-click toggle full screen\n"
3764  );
3765 }
3766 
3767 /* Called from the main */
3768 int main(int argc, char **argv)
3769 {
3770  int flags, ret;
3771  VideoState *is;
3772 
3773  init_dynload();
3774 
3776  parse_loglevel(argc, argv, options);
3777 
3778  /* register all codecs, demux and protocols */
3779 #if CONFIG_AVDEVICE
3781 #endif
3783 
3784  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3785  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3786 
3787  show_banner(argc, argv, options);
3788 
3789  ret = parse_options(NULL, argc, argv, options, opt_input_file);
3790  if (ret < 0)
3791  exit(ret == AVERROR_EXIT ? 0 : 1);
3792 
3793  if (!input_filename) {
3794  show_usage();
3795  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3797  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3798  exit(1);
3799  }
3800 
3801  if (display_disable) {
3802  video_disable = 1;
3803  }
3804  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3805  if (audio_disable)
3806  flags &= ~SDL_INIT_AUDIO;
3807  else {
3808  /* Try to work around an occasional ALSA buffer underflow issue when the
3809  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3810  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3811  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3812  }
3813  if (display_disable)
3814  flags &= ~SDL_INIT_VIDEO;
3815  if (SDL_Init (flags)) {
3816  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3817  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3818  exit(1);
3819  }
3820 
3821  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3822  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3823 
3824  if (!display_disable) {
3825  int flags = SDL_WINDOW_HIDDEN;
3826  if (alwaysontop)
3827 #if SDL_VERSION_ATLEAST(2,0,5)
3828  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3829 #else
3830  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3831 #endif
3832  if (borderless)
3833  flags |= SDL_WINDOW_BORDERLESS;
3834  else
3835  flags |= SDL_WINDOW_RESIZABLE;
3836 
3837 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3838  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3839 #endif
3840  if (hwaccel && !enable_vulkan) {
3841  av_log(NULL, AV_LOG_INFO, "Enable vulkan renderer to support hwaccel %s\n", hwaccel);
3842  enable_vulkan = 1;
3843  }
3844  if (enable_vulkan) {
3846  if (vk_renderer) {
3847 #if SDL_VERSION_ATLEAST(2, 0, 6)
3848  flags |= SDL_WINDOW_VULKAN;
3849 #endif
3850  } else {
3851  av_log(NULL, AV_LOG_WARNING, "Doesn't support vulkan renderer, fallback to SDL renderer\n");
3852  enable_vulkan = 0;
3853  }
3854  }
3855  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3856  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3857  if (!window) {
3858  av_log(NULL, AV_LOG_FATAL, "Failed to create window: %s", SDL_GetError());
3859  do_exit(NULL);
3860  }
3861 
3862  if (vk_renderer) {
3863  AVDictionary *dict = NULL;
3864 
3865  if (vulkan_params) {
3866  int ret = av_dict_parse_string(&dict, vulkan_params, "=", ":", 0);
3867  if (ret < 0) {
3868  av_log(NULL, AV_LOG_FATAL, "Failed to parse, %s\n", vulkan_params);
3869  do_exit(NULL);
3870  }
3871  }
3873  av_dict_free(&dict);
3874  if (ret < 0) {
3875  av_log(NULL, AV_LOG_FATAL, "Failed to create vulkan renderer, %s\n", av_err2str(ret));
3876  do_exit(NULL);
3877  }
3878  } else {
3879  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3880  if (!renderer) {
3881  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3882  renderer = SDL_CreateRenderer(window, -1, 0);
3883  }
3884  if (renderer) {
3885  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3886  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3887  }
3888  if (!renderer || !renderer_info.num_texture_formats) {
3889  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3890  do_exit(NULL);
3891  }
3892  }
3893  }
3894 
3896  if (!is) {
3897  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3898  do_exit(NULL);
3899  }
3900 
3901  event_loop(is);
3902 
3903  /* never returns */
3904 
3905  return 0;
3906 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
flags
const SwsFlags flags[]
Definition: swscale.c:61
AVSubtitle
Definition: avcodec.h:2082
rect::w
int w
Definition: f_ebur128.c:78
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1306
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:212
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:105
RGB
Definition: cms.c:66
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:433
show_help_options
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:107
AVCodec
AVCodec.
Definition: codec.h:172
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:203
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:282
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:370
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
Frame::width
int width
Definition: ffplay.c:159
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:486
av_clip
#define av_clip
Definition: common.h:100
VideoState::rdft
AVTXContext * rdft
Definition: ffplay.c:263
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:132
av_sync_type
static int av_sync_type
Definition: ffplay.c:325
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
printf
__device__ int printf(const char *,...)
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2087
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:363
FrameData::pkt_pos
int64_t pkt_pos
Definition: ffplay.c:148
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1483
AVALPHA_MODE_STRAIGHT
@ AVALPHA_MODE_STRAIGHT
Alpha channel is independent of color values.
Definition: pixfmt.h:803
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:801
VideoState::agraph
AVFilterGraph * agraph
Definition: ffplay.c:297
configure_audio_filters
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:2015
opt_add_vfilter
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:393
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:785
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
Decoder::finished
int finished
Definition: ffplay.c:191
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:615
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
FrameData
Definition: ffmpeg.h:681
check_avoptions
int check_avoptions(AVDictionary *m)
Definition: cmdutils.c:1535
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:807
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Definition: avformat.c:376
out
FILE * out
Definition: movenc.c:55
VideoState::rdft_fn
av_tx_fn rdft_fn
Definition: ffplay.c:264
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1024
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2176
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:231
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:659
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1338
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:513
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:84
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1364
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:47
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:192
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:173
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:241
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:578
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:670
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:176
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:155
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:815
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:325
display_disable
static int display_disable
Definition: ffplay.c:320
screen_width
static int screen_width
Definition: ffplay.c:310
ffplay_renderer.h
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:56
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:904
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:102
AVTXContext
Definition: tx_priv.h:235
rect
Definition: f_ebur128.c:78
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1524
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:478
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:263
VideoState::auddec
Decoder auddec
Definition: ffplay.c:225
int64_t
long long int64_t
Definition: coverity.c:34
screen_left
static int screen_left
Definition: ffplay.c:312
AudioParams::frame_size
int frame_size
Definition: ffplay.c:133
AVSubtitleRect
Definition: avcodec.h:2055
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2086
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:196
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2165
rect::y
int y
Definition: f_ebur128.c:78
FrameQueue::size
int size
Definition: ffplay.c:171
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:190
sws_freeContext
void sws_freeContext(SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2244
av_unused
#define av_unused
Definition: attributes.h:131
normalize.log
log
Definition: normalize.py:21
Frame::sar
AVRational sar
Definition: ffplay.c:162
out_size
int out_size
Definition: movenc.c:56
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:270
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1756
mode
Definition: swscale.c:56
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AudioParams
Definition: ffplay.c:129
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:275
VideoState::audio_filter_src
struct AudioParams audio_filter_src
Definition: ffplay.c:251
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1332
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:280
AVFrame::width
int width
Definition: frame.h:499
VideoState::xleft
int xleft
Definition: ffplay.c:289
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:409
Frame::pts
double pts
Definition: ffplay.c:156
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:767
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:233
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:688
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:340
parse_number
int parse_number(const char *context, const char *numstr, enum OptionType type, double min, double max, double *dst)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:84
AV_HWDEVICE_TYPE_NONE
@ AV_HWDEVICE_TYPE_NONE
Definition: hwcontext.h:28
AVPacket::data
uint8_t * data
Definition: packet.h:558
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:182
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:66
b
#define b
Definition: input.c:42
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:190
vk_renderer_create
int vk_renderer_create(VkRenderer *renderer, SDL_Window *window, AVDictionary *opt)
Definition: ffplay_renderer.c:850
AVChapter::start
int64_t start
Definition: avformat.h:1226
Clock
Definition: ffplay.c:137
data
const char data[16]
Definition: mxf.c:149
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:126
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:61
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:188
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:58
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2375
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:239
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:512
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:236
av_hwdevice_find_type_by_name
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
Definition: hwcontext.c:110
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:104
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:75
AVComplexFloat
Definition: tx.h:27
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:140
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:348
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:576
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:370
video_disable
static int video_disable
Definition: ffplay.c:315
Frame::uploaded
int uploaded
Definition: ffplay.c:163
mathematics.h
AVDictionary
Definition: dict.c:32
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:324
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1539
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:152
opt_input_file
static int opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3629
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1270
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1534
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:72
vk_renderer_destroy
void vk_renderer_destroy(VkRenderer *renderer)
Definition: ffplay_renderer.c:871
VideoState::paused
int paused
Definition: ffplay.c:206
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1420
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:326
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1453
VideoState::width
int width
Definition: ffplay.c:289
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:305
dummy
static int dummy
Definition: ffplay.c:3673
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:359
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:75
PacketQueue
Definition: ffplay.c:113
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2279
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:599
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:299
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:258
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
OptionDef
Definition: cmdutils.h:191
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2382
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:316
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:221
genpts
static int genpts
Definition: ffplay.c:329
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:253
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3598
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1531
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
Definition: ffplay.c:905
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:223
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:369
FrameQueue::rindex
int rindex
Definition: ffplay.c:169
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1374
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:192
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:367
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:696
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1534
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:70
startup_volume
static int startup_volume
Definition: ffplay.c:323
window
static SDL_Window * window
Definition: ffplay.c:361
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:140
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:183
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:167
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3306
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:144
VideoState::extclk
Clock extclk
Definition: ffplay.c:219
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:210
alwaysontop
static int alwaysontop
Definition: ffplay.c:322
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:239
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:469
AVPacket::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: packet.h:594
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:95
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1039
fail
#define fail()
Definition: checkasm.h:200
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:85
FrameQueue
Definition: ffplay.c:167
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:440
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2056
sws_getCachedContext
SwsContext * sws_getCachedContext(SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2336
VideoState::video_stream
int video_stream
Definition: ffplay.c:281
autoexit
static int autoexit
Definition: ffplay.c:332
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:494
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1223
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:966
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:770
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3613
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:193
pts
static int64_t pts
Definition: transcode_aac.c:644
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1414
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:237
OPT_TYPE_FLOAT
@ OPT_TYPE_FLOAT
Definition: cmdutils.h:86
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:235
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:299
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
fast
static int fast
Definition: ffplay.c:328
loop
static int loop
Definition: ffplay.c:335
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:557
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:265
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *const *out_arg, int out_count, const uint8_t *const *in_arg, int in_count)
Convert audio.
Definition: swresample.c:719
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3577
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:412
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
fn
Definition: ops_tmpl_float.c:116
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1406
is_full_screen
static int is_full_screen
Definition: ffplay.c:356
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:909
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:950
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:164
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:236
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1496
vk_get_renderer
VkRenderer * vk_get_renderer(void)
Definition: ffplay_renderer.c:843
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:544
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2087
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1408
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:848
VideoState
Definition: ffplay.c:201
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:733
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2492
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1428
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:140
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:350
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:221
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:721
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:60
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:653
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
VideoState::ShowMode
ShowMode
Definition: ffplay.c:257
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:189
s
#define s(width, name)
Definition: cbs_vp9.c:198
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3736
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
default_height
static int default_height
Definition: ffplay.c:309
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1415
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:144
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:549
AVFormatContext::iformat
const struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1276
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:516
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:697
AVDictionaryEntry::key
char * key
Definition: dict.h:91
Clock::last_updated
double last_updated
Definition: ffplay.c:140
PacketQueue::duration
int64_t duration
Definition: ffplay.c:117
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:201
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2057
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:134
video_stream
static AVStream * video_stream
Definition: demux_decode.c:42
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:860
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:311
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:92
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:190
configure_video_filters
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
Definition: ffplay.c:1863
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:708
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVHWDeviceType
AVHWDeviceType
Definition: hwcontext.h:27
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:205
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: demux_utils.c:173
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3646
AVPacketSideData::data
uint8_t * data
Definition: packet.h:410
Clock::pts_drift
double pts_drift
Definition: ffplay.c:139
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:283
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: avformat.c:660
channels
channels
Definition: aptx.h:31
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:98
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:174
nb_streams
static int nb_streams
Definition: ffprobe.c:340
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
vk_renderer_get_hw_dev
int vk_renderer_get_hw_dev(VkRenderer *renderer, AVBufferRef **dev)
Definition: ffplay_renderer.c:856
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2088
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:205
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1388
av_opt_set_array
int av_opt_set_array(void *obj, const char *name, int search_flags, unsigned int start_elem, unsigned int nb_elems, enum AVOptionType val_type, const void *val)
Add, replace, or remove elements for an array option.
Definition: opt.c:2283
vk_renderer_display
int vk_renderer_display(VkRenderer *renderer, AVFrame *frame)
Definition: ffplay_renderer.c:861
screen_top
static int screen_top
Definition: ffplay.c:313
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:238
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:90
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:362
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1569
NAN
#define NAN
Definition: mathematics.h:115
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:514
VideoState::step
int step
Definition: ffplay.c:290
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2334
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:307
Clock::speed
double speed
Definition: ffplay.c:141
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:258
AVBufferSrcParameters::alpha_mode
enum AVAlphaMode alpha_mode
Video only, the alpha mode.
Definition: buffersrc.h:130
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
AVFormatContext
Format I/O context.
Definition: avformat.h:1264
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:470
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:647
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:75
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:767
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:541
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const struct AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2058
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3341
VkRenderer
Definition: ffplay_renderer.c:49
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1436
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:184
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
frame_queue_destroy
static void frame_queue_destroy(FrameQueue *f)
Definition: ffplay.c:709
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1414
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:783
NULL
#define NULL
Definition: coverity.c:32
avcodec_find_decoder_by_name
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:1055
FrameQueue::max_size
int max_size
Definition: ffplay.c:172
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
AV_DICT_MULTIKEY
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Definition: dict.h:84
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:207
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:74
Decoder
Definition: ffmpeg.h:446
AudioParams::freq
int freq
Definition: ffplay.c:130
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:342
AudioParams::ch_layout
AVChannelLayout ch_layout
Definition: ffplay.c:131
audio_open
static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2535
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:591
filter_codec_opts
int filter_codec_opts(const AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, const AVCodec *codec, AVDictionary **dst, AVDictionary **opts_used)
Filter out options for given codec.
Definition: cmdutils.c:1353
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3227
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:255
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:337
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1306
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:480
frame_queue_unref_item
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:682
FrameQueue::queue
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:168
VideoState::last_i_start
int last_i_start
Definition: ffplay.c:262
Decoder::packet_pending
int packet_pending
Definition: ffplay.c:192
cursor_last_shown
static int64_t cursor_last_shown
Definition: ffplay.c:343
options
Definition: swscale.c:43
frame_queue_peek
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:728
avfilter_inout_alloc
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:71
Frame::duration
double duration
Definition: ffplay.c:157
lowres
static int lowres
Definition: ffplay.c:330
double
double
Definition: af_crystalizer.c:132
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
Definition: pixfmt.h:93
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:145
time.h
Frame::pos
int64_t pos
Definition: ffplay.c:158
VideoState::frame_last_returned_time
double frame_last_returned_time
Definition: ffplay.c:279
set_clock_at
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1400
toggle_pause
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1513
TextureFormatEntry
Definition: ffplay.c:368
AVFilterGraph
Definition: avfilter.h:589
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:181
stream_component_open
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2648
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:244
AV_OPT_TYPE_CHLAYOUT
@ AV_OPT_TYPE_CHLAYOUT
Underlying C type is AVChannelLayout.
Definition: opt.h:331
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: packet.c:441
AVCodecParameters::ch_layout
AVChannelLayout ch_layout
Audio only.
Definition: codec_par.h:180
VideoState::rdft_data
AVComplexFloat * rdft_data
Definition: ffplay.c:267
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: packet.c:490
AV_PIX_FMT_NE
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:508
exp
int8_t exp
Definition: eval.c:73
VideoState::seek_req
int seek_req
Definition: ffplay.c:209
VideoState::SHOW_MODE_WAVES
@ SHOW_MODE_WAVES
Definition: ffplay.c:258
VideoState::audio_clock
double audio_clock
Definition: ffplay.c:233
VideoState::read_pause_return
int read_pause_return
Definition: ffplay.c:213
event_loop
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3369
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VideoState::ytop
int ytop
Definition: ffplay.c:289
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:184
av_packet_side_data_get
const AVPacketSideData * av_packet_side_data_get(const AVPacketSideData *sd, int nb_sd, enum AVPacketSideDataType type)
Get side information from a side data array.
Definition: packet.c:660
avcodec_find_decoder
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:1027
VideoState::sample_array
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:260
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1320
exit_on_mousedown
static int exit_on_mousedown
Definition: ffplay.c:334
AVBufferSrcParameters::frame_rate
AVRational frame_rate
Video only, the frame rate of the input video.
Definition: buffersrc.h:100
AVAlphaMode
AVAlphaMode
Correlation between the alpha channel and color values.
Definition: pixfmt.h:800
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:594
VideoState::iformat
const AVInputFormat * iformat
Definition: ffplay.c:203
Decoder::next_pts_tb
AVRational next_pts_tb
Definition: ffplay.c:197
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1697
VideoState::audioq
PacketQueue audioq
Definition: ffplay.c:240
codec_opts
AVDictionary * codec_opts
Definition: cmdutils.c:58
audio_callback_time
static int64_t audio_callback_time
Definition: ffplay.c:357
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
A generic parameter which can be set by the user for filtering.
Definition: opt.h:381
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: demux.c:2512
Frame::format
int format
Definition: ffplay.c:161
INSERT_FILT
#define INSERT_FILT(name, arg)
f
f
Definition: af_crystalizer.c:122
swr_alloc_set_opts2
int swr_alloc_set_opts2(struct SwrContext **ps, const AVChannelLayout *out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, const AVChannelLayout *in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:40
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:495
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:519
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
OPT_TYPE_INT
@ OPT_TYPE_INT
Definition: cmdutils.h:84
AVMediaType
AVMediaType
Definition: avutil.h:198
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:490
AVPacket::size
int size
Definition: packet.h:559
VideoState::in_audio_filter
AVFilterContext * in_audio_filter
Definition: ffplay.c:295
AVFifo
Definition: fifo.c:35
avformat_match_stream_specifier
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: avformat.c:614
VideoState::audio_write_buf_size
int audio_write_buf_size
Definition: ffplay.c:247
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:162
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: defs.h:227
height
#define height
Definition: dsp.h:89
FrameQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:175
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
frame_queue_peek_writable
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:743
OPT_AUDIO
#define OPT_AUDIO
Definition: cmdutils.h:209
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:107
Frame::sub
AVSubtitle sub
Definition: ffplay.c:154
VideoState::last_audio_stream
int last_audio_stream
Definition: ffplay.c:299
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
vfilters_list
static const char ** vfilters_list
Definition: ffplay.c:345
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:590
create_hwaccel
static int create_hwaccel(AVBufferRef **device_ctx)
Definition: ffplay.c:2611
decoder_init
static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:565
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
sdl_supported_color_spaces
static enum AVColorSpace sdl_supported_color_spaces[]
Definition: ffplay.c:939
start_time
static int64_t start_time
Definition: ffplay.c:326
audio_stream
static AVStream * audio_stream
Definition: demux_decode.c:42
VideoState::SHOW_MODE_NB
@ SHOW_MODE_NB
Definition: ffplay.c:258
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1031
Frame::serial
int serial
Definition: ffplay.c:155
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:542
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:368
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:62
size
int size
Definition: twinvq_data.h:10344
VideoState::xpos
int xpos
Definition: ffplay.c:268
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
read_thread
static int read_thread(void *arg)
Definition: ffplay.c:2838
AV_PIX_FMT_BGR555
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:532
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: seek.c:664
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
Clock::paused
int paused
Definition: ffplay.c:143
rect::h
int h
Definition: f_ebur128.c:78
VideoState::sub_texture
SDL_Texture * sub_texture
Definition: ffplay.c:271
swr_free
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:121
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
VideoState::vid_texture
SDL_Texture * vid_texture
Definition: ffplay.c:272
OPT_TYPE_INT64
@ OPT_TYPE_INT64
Definition: cmdutils.h:85
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
VideoState::sample_array_index
int sample_array_index
Definition: ffplay.c:261
wanted_stream_spec
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:317
infinite_buffer
static int infinite_buffer
Definition: ffplay.c:337
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2085
VideoState::max_frame_duration
double max_frame_duration
Definition: ffplay.c:284
avdevice.h
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: opt_common.c:235
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
packet_queue_destroy
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:503
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:557
AVBufferSrcParameters::time_base
AVRational time_base
The timebase to be used for the timestamps on the input frames.
Definition: buffersrc.h:82
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
VideoState::frame_drops_early
int frame_drops_early
Definition: ffplay.c:254
update_video_pts
static void update_video_pts(VideoState *is, double pts, int serial)
Definition: ffplay.c:1581
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
toggle_mute
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1519
decoder_abort
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:816
video_refresh
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1589
AV_CHANNEL_ORDER_NATIVE
@ AV_CHANNEL_ORDER_NATIVE
The native channel order, i.e.
Definition: channel_layout.h:125
seek_interval
static float seek_interval
Definition: ffplay.c:319
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:64
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:233
rect::x
int x
Definition: f_ebur128.c:78
VideoState::seek_pos
int64_t seek_pos
Definition: ffplay.c:211
OPT_TYPE_FUNC
@ OPT_TYPE_FUNC
Definition: cmdutils.h:81
frame_queue_push
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:775
audio_dev
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:364
OPT_TYPE_BOOL
@ OPT_TYPE_BOOL
Definition: cmdutils.h:82
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:295
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffplay.c:1333
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:809
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
packet_queue_abort
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:511
video_codec_name
static const char * video_codec_name
Definition: ffplay.c:341
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
iformat
static const AVInputFormat * iformat
Definition: ffprobe.c:332
packet_queue_flush
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:489
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:839
packet_queue_get
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:531
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
MAX_QUEUE_SIZE
#define MAX_QUEUE_SIZE
Definition: ffplay.c:64
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:406
MIN_FRAMES
#define MIN_FRAMES
Definition: ffplay.c:65
nb_vfilters
static int nb_vfilters
Definition: ffplay.c:346
VideoState::queue_attachments_req
int queue_attachments_req
Definition: ffplay.c:208
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:704
FrameQueue::windex
int windex
Definition: ffplay.c:170
VideoState::filename
char * filename
Definition: ffplay.c:288
VideoState::muted
int muted
Definition: ffplay.c:249
Decoder::start_pts
int64_t start_pts
Definition: ffplay.c:194
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:507
bprint.h
Clock::pts
double pts
Definition: ffplay.c:138
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:121
av_hwdevice_ctx_create_derived
int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr, enum AVHWDeviceType type, AVBufferRef *src_ref, int flags)
Create a new device of the specified type from an existing device.
Definition: hwcontext.c:718
VIDEO_PICTURE_QUEUE_SIZE
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:124
PacketQueue::serial
int serial
Definition: ffplay.c:119
AVSubtitle::format
uint16_t format
Definition: avcodec.h:2083
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:551
VideoState::show_mode
enum VideoState::ShowMode show_mode
AVBufferSrcParameters::width
int width
Video only, the display dimensions of the input frames.
Definition: buffersrc.h:87
VideoState::audio_src
struct AudioParams audio_src
Definition: ffplay.c:250
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:62
VideoState::audio_buf1
uint8_t * audio_buf1
Definition: ffplay.c:243
OPT_TYPE_TIME
@ OPT_TYPE_TIME
Definition: cmdutils.h:88
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
avfilter_graph_parse_ptr
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:920
setup_find_stream_info_opts
int setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *local_codec_opts, AVDictionary ***dst)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:1421
swr_opts
AVDictionary * swr_opts
Definition: cmdutils.c:57
compute_mod
static int compute_mod(int a, int b)
Definition: ffplay.c:1056
Decoder::start_pts_tb
AVRational start_pts_tb
Definition: ffplay.c:195
AVCodecParameters::height
int height
Definition: codec_par.h:135
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:253
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:488
Decoder::pkt
AVPacket * pkt
Definition: ffplay.c:187
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
video_open
static int video_open(VideoState *is)
Definition: ffplay.c:1350
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:690
get_sdl_pix_fmt_and_blendmode
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:887
show_status
static int show_status
Definition: ffplay.c:324
opt_format
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3588
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
parse_options
int parse_options(void *optctx, int argc, char **argv, const OptionDef *options, int(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:420
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:527
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
vk_renderer_resize
int vk_renderer_resize(VkRenderer *renderer, int width, int height)
Definition: ffplay_renderer.c:866
borderless
static int borderless
Definition: ffplay.c:321
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
update_sample_display
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2314
MyAVPacketList
Definition: ffplay.c:108
OPT_FUNC_ARG
#define OPT_FUNC_ARG
Definition: cmdutils.h:201
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1188
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1475
av_read_pause
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: demux_utils.c:182
len
int len
Definition: vorbis_enc_data.h:426
Frame::frame
AVFrame * frame
Definition: ffplay.c:153
AV_PIX_FMT_BGR565
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:531
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:45
vk_renderer
static VkRenderer * vk_renderer
Definition: ffplay.c:366
PacketQueue::nb_packets
int nb_packets
Definition: ffplay.c:115
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
sdl_supported_alpha_modes
static enum AVAlphaMode sdl_supported_alpha_modes[]
Definition: ffplay.c:945
FRAME_QUEUE_SIZE
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:127
frame_queue_peek_readable
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:759
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
AVIOInterruptCB::opaque
void * opaque
Definition: avio.h:61
AV_PIX_FMT_RGB565
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:526
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:556
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
Clock::serial
int serial
Definition: ffplay.c:142
VideoState::height
int height
Definition: ffplay.c:289
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVStream::disposition
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
Definition: avformat.h:813
AVFMT_FLAG_GENPTS
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1416
VideoState::subpq
FrameQueue subpq
Definition: ffplay.c:222
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
seek_by_bytes
static int seek_by_bytes
Definition: ffplay.c:318
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:744
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
EXTERNAL_CLOCK_MAX_FRAMES
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:67
av_guess_frame_rate
AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: avformat.c:683
AVSubtitleRect::h
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:2059
stream_open
static VideoState * stream_open(const char *filename, const AVInputFormat *iformat)
Definition: ffplay.c:3168
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVALPHA_MODE_UNSPECIFIED
@ AVALPHA_MODE_UNSPECIFIED
Unknown alpha handling, or no alpha channel.
Definition: pixfmt.h:801
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: avcodec.c:380
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:515
VideoState::vfilter_idx
int vfilter_idx
Definition: ffplay.c:292
filter_nbthreads
static int filter_nbthreads
Definition: ffplay.c:350
log_callback_help
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:70
cursor_hidden
static int cursor_hidden
Definition: ffplay.c:344
VideoState::SHOW_MODE_RDFT
@ SHOW_MODE_RDFT
Definition: ffplay.c:258
av_hwdevice_ctx_create
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
Definition: hwcontext.c:615
find_stream_info
static int find_stream_info
Definition: ffplay.c:349
packet_queue_put_private
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:417
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
pos
unsigned int pos
Definition: spdifenc.c:414
VideoState::audio_buf_index
int audio_buf_index
Definition: ffplay.c:246
avformat.h
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:122
VideoState::out_video_filter
AVFilterContext * out_video_filter
Definition: ffplay.c:294
dict.h
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:524
VideoState::last_paused
int last_paused
Definition: ffplay.c:207
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:400
remove_avoptions
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: cmdutils.c:1526
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:88
CMDUTILS_COMMON_OPTIONS
#define CMDUTILS_COMMON_OPTIONS
Definition: opt_common.h:199
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:74
rdftspeed
double rdftspeed
Definition: ffplay.c:342
AV_TX_FLOAT_RDFT
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
Definition: tx.h:90
MyAVPacketList::serial
int serial
Definition: ffplay.c:110
opt_width
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3566
enable_vulkan
static int enable_vulkan
Definition: ffplay.c:351
main
int main(int argc, char **argv)
Definition: ffplay.c:3768
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:569
show_usage
static void show_usage(void)
Definition: ffplay.c:3729
AVBufferSrcParameters::color_range
enum AVColorRange color_range
Definition: buffersrc.h:122
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:499
PacketQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:120
packet_queue_start
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:522
VideoState::vidclk
Clock vidclk
Definition: ffplay.c:218
audio_codec_name
static const char * audio_codec_name
Definition: ffplay.c:339
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AV_SYNC_FRAMEDUP_THRESHOLD
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:82
show_mode
static enum ShowMode show_mode
Definition: ffplay.c:338
PacketQueue::cond
SDL_cond * cond
Definition: ffplay.c:121
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
PacketQueue::size
int size
Definition: ffplay.c:116
sws_scale
int attribute_align_arg sws_scale(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1502
opt_common.h
AVInputFormat::flags
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:563
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:918
AVRational::den
int den
Denominator.
Definition: rational.h:60
VideoState::in_video_filter
AVFilterContext * in_video_filter
Definition: ffplay.c:293
VideoState::subtitle_stream
int subtitle_stream
Definition: ffplay.c:274
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
VideoState::abort_request
int abort_request
Definition: ffplay.c:204
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:825
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:442
VideoState::audio_buf1_size
unsigned int audio_buf1_size
Definition: ffplay.c:245
VideoState::eof
int eof
Definition: ffplay.c:286
av_dict_parse_string
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
Definition: dict.c:210
AV_SYNC_THRESHOLD_MAX
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:80
decoder_destroy
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:677
av_get_packed_sample_fmt
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:77
VideoState::read_tid
SDL_Thread * read_tid
Definition: ffplay.c:202
VideoState::audio_volume
int audio_volume
Definition: ffplay.c:248
VideoState::subdec
Decoder subdec
Definition: ffplay.c:227
AVIOContext::eof_reached
int eof_reached
true if was unable to read due to error or eof
Definition: avio.h:238
stream_has_enough_packets
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2814
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
VideoState::out_audio_filter
AVFilterContext * out_audio_filter
Definition: ffplay.c:296
av_find_input_format
const AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:146
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_OPT_TYPE_PIXEL_FMT
@ AV_OPT_TYPE_PIXEL_FMT
Underlying C type is enum AVPixelFormat.
Definition: opt.h:307
AVFormatContext::duration
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1399
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:560
GROW_ARRAY
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:532
SUBPICTURE_QUEUE_SIZE
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:125
AVFilterContext
An instance of a filter.
Definition: avfilter.h:274
input_filename
static const char * input_filename
Definition: ffplay.c:306
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:449
stream_toggle_pause
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1500
VideoState::continue_read_thread
SDL_cond * continue_read_thread
Definition: ffplay.c:301
vulkan_params
static char * vulkan_params
Definition: ffplay.c:352
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set() that converts the value to a string and stores it.
Definition: dict.c:177
toggle_audio_display
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3312
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:439
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:356
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:480
VideoState::real_data
float * real_data
Definition: ffplay.c:266
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
get_video_frame
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1787
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
default_width
static int default_width
Definition: ffplay.c:308
configure_filtergraph
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
Definition: ffplay.c:1820
AVIOInterruptCB::callback
int(* callback)(void *)
Definition: avio.h:60
VideoState::realtime
int realtime
Definition: ffplay.c:215
VideoState::sub_convert_ctx
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:285
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:90
OPT_TYPE_STRING
@ OPT_TYPE_STRING
Definition: cmdutils.h:83
AVPacket
This structure stores compressed data.
Definition: packet.h:535
audio_disable
static int audio_disable
Definition: ffplay.c:314
refresh_loop_wait_event
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3324
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
stream_component_close
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1212
VideoState::subtitleq
PacketQueue subtitleq
Definition: ffplay.c:276
cmdutils.h
AVPacket::pos
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:578
cmp_audio_fmts
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:407
Decoder::decoder_tid
SDL_Thread * decoder_tid
Definition: ffplay.c:198
int32_t
int32_t
Definition: audioconvert.c:56
framedrop
static int framedrop
Definition: ffplay.c:336
VideoState::audio_stream
int audio_stream
Definition: ffplay.c:229
VideoState::audio_buf_size
unsigned int audio_buf_size
Definition: ffplay.c:244
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVFormatContext::start_time
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1389
PacketQueue::abort_request
int abort_request
Definition: ffplay.c:118
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
VideoState::ic
AVFormatContext * ic
Definition: ffplay.c:214
VideoState::viddec
Decoder viddec
Definition: ffplay.c:226
ns
#define ns(max_value, name, subs,...)
Definition: cbs_av1.c:640
h
h
Definition: vp9dsp_template.c:2070
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
AVDictionaryEntry::value
char * value
Definition: dict.h:92
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:793
VideoState::audio_buf
uint8_t * audio_buf
Definition: ffplay.c:242
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:592
avstring.h
frame_queue_peek_last
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:738
VideoState::last_vis_time
double last_vis_time
Definition: ffplay.c:269
width
#define width
Definition: dsp.h:89
stream_seek
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
Definition: ffplay.c:1486
decoder_reorder_pts
static int decoder_reorder_pts
Definition: ffplay.c:331
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:692
VideoState::audio_tgt
struct AudioParams audio_tgt
Definition: ffplay.c:252
afilters
static char * afilters
Definition: ffplay.c:347
AVChapter::time_base
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1225
SwsContext
Main external API structure.
Definition: swscale.h:189
VideoState::audclk
Clock audclk
Definition: ffplay.c:217
avfilter_get_class
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1660
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:746
print_error
static void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.h:468
FrameQueue::pktq
PacketQueue * pktq
Definition: ffplay.c:177
short
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
Definition: writing_filters.txt:89
snprintf
#define snprintf
Definition: snprintf.h:34
video_audio_display
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1061
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AV_SYNC_THRESHOLD_MIN
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:78
buffersrc.h
AudioParams::bytes_per_sec
int bytes_per_sec
Definition: ffplay.c:134
check_external_clock_speed
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1471
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2084
SAMPLE_CORRECTION_PERCENT_MAX
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:87
EXTERNAL_CLOCK_SPEED_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:91
packet_queue_put_nullpacket
static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
Definition: ffplay.c:462
duration
static int64_t duration
Definition: ffplay.c:327
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
MyAVPacketList::pkt
AVPacket * pkt
Definition: ffplay.c:109
swscale.h
is_realtime
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2821
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:311
PacketQueue::pkt_list
AVFifo * pkt_list
Definition: ffplay.c:114
Frame::height
int height
Definition: ffplay.c:160
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2808
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3367
VideoState::frame_timer
double frame_timer
Definition: ffplay.c:278
tx.h
VideoState::audio_clock_serial
int audio_clock_serial
Definition: ffplay.c:234
avdevice_register_all
FF_VISIBILITY_POP_HIDDEN av_cold void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:67
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:349
realloc_texture
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:836
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:528
exit_on_keydown
static int exit_on_keydown
Definition: ffplay.c:333