summarylogtreecommitdiffstats
path: root/openscenegraph-ffmpeg3.patch
blob: 586b6010e165626171f0f0cc0b4418492ee011ee (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
Description: Replace deprecated FFmpeg API
Author: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
Last-Update: <2015-11-02>

--- openscenegraph-3.2.1.orig/OpenSceneGraph/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp
+++ openscenegraph-3.2.1/OpenSceneGraph/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp
@@ -71,7 +71,7 @@ void FFmpegDecoderVideo::open(AVStream *
     findAspectRatio();
 
     // Find out whether we support Alpha channel
-    m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);
+    m_alpha_channel = (m_context->pix_fmt == AV_PIX_FMT_YUVA420P);
 
     // Find out the framerate
     m_frame_rate = av_q2d(stream->avg_frame_rate);
@@ -91,20 +91,19 @@ void FFmpegDecoderVideo::open(AVStream *
         throw std::runtime_error("avcodec_open() failed");
 
     // Allocate video frame
-    m_frame.reset(avcodec_alloc_frame());
+    m_frame.reset(av_frame_alloc());
 
     // Allocate converted RGB frame
-    m_frame_rgba.reset(avcodec_alloc_frame());
-    m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB24, width(), height()));
+    m_frame_rgba.reset(av_frame_alloc());
+    m_buffer_rgba[0].resize(avpicture_get_size(AV_PIX_FMT_RGB24, width(), height()));
     m_buffer_rgba[1].resize(m_buffer_rgba[0].size());
 
     // Assign appropriate parts of the buffer to image planes in m_frame_rgba
-    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], PIX_FMT_RGB24, width(), height());
+    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], AV_PIX_FMT_RGB24, width(), height());
 
     // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
     m_context->opaque = this;
-    m_context->get_buffer = getBuffer;
-    m_context->release_buffer = releaseBuffer;
+    m_context->get_buffer2 = getBuffer;
 }
 
 
@@ -263,8 +262,8 @@ int FFmpegDecoderVideo::convert(AVPictur
 #ifdef USE_SWSCALE
     if (m_swscale_ctx==0)
     {
-        m_swscale_ctx = sws_getContext(src_width, src_height, (PixelFormat) src_pix_fmt,
-                                      src_width, src_height, (PixelFormat) dst_pix_fmt,
+        m_swscale_ctx = sws_getContext(src_width, src_height, (AVPixelFormat) src_pix_fmt,
+                                      src_width, src_height, (AVPixelFormat) dst_pix_fmt,
                                       /*SWS_BILINEAR*/ SWS_BICUBIC, NULL, NULL, NULL);
     }
 
@@ -311,14 +310,14 @@ void FFmpegDecoderVideo::publishFrame(co
     AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
 
     // Assign appropriate parts of the buffer to image planes in m_frame_rgba
-    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB24, width(), height());
+    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], AV_PIX_FMT_RGB24, width(), height());
 
     // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
 
-    if (m_context->pix_fmt == PIX_FMT_YUVA420P)
+    if (m_context->pix_fmt == AV_PIX_FMT_YUVA420P)
         yuva420pToRgba(dst, src, width(), height());
     else
-        convert(dst, PIX_FMT_RGB24, src, m_context->pix_fmt, width(), height());
+        convert(dst, AV_PIX_FMT_RGB24, src, m_context->pix_fmt, width(), height());
 
     // Wait 'delay' seconds before publishing the picture.
     int i_delay = static_cast<int>(delay * 1000000 + 0.5);
@@ -345,7 +344,7 @@ void FFmpegDecoderVideo::publishFrame(co
 
 void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const src, int width, int height)
 {
-    convert(dst, PIX_FMT_RGB24, src, m_context->pix_fmt, width, height);
+    convert(dst, AV_PIX_FMT_RGB24, src, m_context->pix_fmt, width, height);
 
     const size_t bpp = 4;
 
@@ -363,31 +362,28 @@ void FFmpegDecoderVideo::yuva420pToRgba(
     }
 }
 
-
-
-int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture)
+int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture, int flags)
 {
+    AVBufferRef *ref;
     const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque);
 
-    const int result = avcodec_default_get_buffer(context, picture);
+    const int result = avcodec_default_get_buffer2(context, picture, flags);
     int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );
 
     *p_pts = this_->m_packet_pts;
     picture->opaque = p_pts;
 
+    ref = av_buffer_create((uint8_t *)picture->opaque, sizeof(int64_t), FFmpegDecoderVideo::freeBuffer, picture->buf[0], flags);
+    picture->buf[0] = ref;
+
     return result;
 }
 
-
-
-void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame * const picture)
+void FFmpegDecoderVideo::freeBuffer(void *opaque, uint8_t *data)
 {
-    if (picture != 0)
-        av_freep(&picture->opaque);
-
-    avcodec_default_release_buffer(context, picture);
+    AVBufferRef *ref = (AVBufferRef *)opaque;
+    av_buffer_unref(&ref);
+    av_free(data);
 }
 
-
-
 } // namespace osgFFmpeg
--- openscenegraph-3.2.1.orig/OpenSceneGraph/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp
+++ openscenegraph-3.2.1/OpenSceneGraph/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp
@@ -94,8 +94,8 @@ private:
                 int src_pix_fmt, int src_width, int src_height);
 
 
-    static int getBuffer(AVCodecContext * context, AVFrame * picture);
-    static void releaseBuffer(AVCodecContext * context, AVFrame * picture);
+    static int getBuffer(AVCodecContext * context, AVFrame * picture, int flags);
+    static void freeBuffer(void * opaque, uint8_t *data);
 
     PacketQueue &           m_packets;
     FFmpegClocks &          m_clocks;
--- openscenegraph-3.2.1.orig/OpenSceneGraph/src/osgPlugins/ffmpeg/FFmpegParameters.cpp
+++ openscenegraph-3.2.1/OpenSceneGraph/src/osgPlugins/ffmpeg/FFmpegParameters.cpp
@@ -19,7 +19,7 @@ extern "C"
     #include <libavutil/pixdesc.h>
 }
 
-inline PixelFormat osg_av_get_pix_fmt(const char *name) { return av_get_pix_fmt(name); }
+inline AVPixelFormat osg_av_get_pix_fmt(const char *name) { return av_get_pix_fmt(name); }
 
 
 namespace osgFFmpeg {
--- src/OpenSceneGraph-3.4.0/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp.orig	2016-02-18 21:25:39.627923629 +0000
+++ src/OpenSceneGraph-3.4.0/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp	2016-02-18 21:26:17.071140100 +0000
@@ -227,8 +227,7 @@
         if (avcodec_open2(m_context, p_codec, NULL) < 0)
             throw std::runtime_error("avcodec_open() failed");
 
-        m_context->get_buffer = avcodec_default_get_buffer;
-        m_context->release_buffer = avcodec_default_release_buffer;
+        m_context->get_buffer2 = avcodec_default_get_buffer2;
 
     }