2 * This file is part of FFmpeg.
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <stdatomic.h>
23 #include "refstruct.h"
29 #include "mem_internal.h"
32 #ifndef REFSTRUCT_CHECKED
34 #define ASSERT_LEVEL 0
36 #define REFSTRUCT_CHECKED (ASSERT_LEVEL >= 1)
40 #define ff_assert(cond) av_assert0(cond)
42 #define ff_assert(cond) ((void)0)
45 #define REFSTRUCT_COOKIE AV_NE((uint64_t)MKBETAG('R', 'e', 'f', 'S') << 32 | MKBETAG('t', 'r', 'u', 'c'), \
46 MKTAG('R', 'e', 'f', 'S') | (uint64_t)MKTAG('t', 'r', 'u', 'c') << 32)
49 #define REFCOUNT_OFFSET FFALIGN(sizeof(RefCount), FFMAX(ALIGN_64, _Alignof(max_align_t)))
51 #define REFCOUNT_OFFSET FFALIGN(sizeof(RefCount), ALIGN_64)
54 typedef struct RefCount
{
56 * An uintptr_t is big enough to hold the address of every reference,
57 * so no overflow can happen when incrementing the refcount as long as
58 * the user does not throw away references.
60 atomic_uintptr_t refcount
;
61 AVRefStructOpaque opaque
;
62 void (*free_cb
)(AVRefStructOpaque opaque
, void *obj
);
63 void (*free
)(void *ref
);
70 static RefCount
*get_refcount(void *obj
)
72 RefCount
*ref
= (RefCount
*)((char*)obj
- REFCOUNT_OFFSET
);
73 ff_assert(ref
->cookie
== REFSTRUCT_COOKIE
);
77 static const RefCount
*cget_refcount(const void *obj
)
79 const RefCount
*ref
= (const RefCount
*)((const char*)obj
- REFCOUNT_OFFSET
);
80 ff_assert(ref
->cookie
== REFSTRUCT_COOKIE
);
84 static void *get_userdata(void *buf
)
86 return (char*)buf
+ REFCOUNT_OFFSET
;
89 static void refcount_init(RefCount
*ref
, AVRefStructOpaque opaque
,
90 void (*free_cb
)(AVRefStructOpaque opaque
, void *obj
))
92 atomic_init(&ref
->refcount
, 1);
94 ref
->free_cb
= free_cb
;
98 ref
->cookie
= REFSTRUCT_COOKIE
;
102 void *av_refstruct_alloc_ext_c(size_t size
, unsigned flags
, AVRefStructOpaque opaque
,
103 void (*free_cb
)(AVRefStructOpaque opaque
, void *obj
))
107 if (size
> SIZE_MAX
- REFCOUNT_OFFSET
)
109 buf
= av_malloc(size
+ REFCOUNT_OFFSET
);
112 refcount_init(buf
, opaque
, free_cb
);
113 obj
= get_userdata(buf
);
114 if (!(flags
& AV_REFSTRUCT_FLAG_NO_ZEROING
))
115 memset(obj
, 0, size
);
120 void av_refstruct_unref(void *objp
)
125 memcpy(&obj
, objp
, sizeof(obj
));
128 memcpy(objp
, &(void *){ NULL
}, sizeof(obj
));
130 ref
= get_refcount(obj
);
131 if (atomic_fetch_sub_explicit(&ref
->refcount
, 1, memory_order_acq_rel
) == 1) {
133 ref
->free_cb(ref
->opaque
, obj
);
140 void *av_refstruct_ref(void *obj
)
142 RefCount
*ref
= get_refcount(obj
);
144 atomic_fetch_add_explicit(&ref
->refcount
, 1, memory_order_relaxed
);
149 const void *av_refstruct_ref_c(const void *obj
)
151 /* Casting const away here is fine, as it is only supposed
152 * to apply to the user's data and not our bookkeeping data. */
153 RefCount
*ref
= get_refcount((void*)obj
);
155 atomic_fetch_add_explicit(&ref
->refcount
, 1, memory_order_relaxed
);
160 void av_refstruct_replace(void *dstp
, const void *src
)
163 memcpy(&dst
, dstp
, sizeof(dst
));
167 av_refstruct_unref(dstp
);
169 dst
= av_refstruct_ref_c(src
);
170 memcpy(dstp
, &dst
, sizeof(dst
));
174 int av_refstruct_exclusive(const void *obj
)
176 const RefCount
*ref
= cget_refcount(obj
);
177 /* Casting const away here is safe, because it is a load.
178 * It is necessary because atomic_load_explicit() does not
179 * accept const atomics in C11 (see also N1807). */
180 return atomic_load_explicit((atomic_uintptr_t
*)&ref
->refcount
, memory_order_acquire
) == 1;
183 struct AVRefStructPool
{
185 AVRefStructOpaque opaque
;
186 int (*init_cb
)(AVRefStructOpaque opaque
, void *obj
);
187 void (*reset_cb
)(AVRefStructOpaque opaque
, void *obj
);
188 void (*free_entry_cb
)(AVRefStructOpaque opaque
, void *obj
);
189 void (*free_cb
)(AVRefStructOpaque opaque
);
192 unsigned entry_flags
;
195 /** The number of outstanding entries not in available_entries. */
196 atomic_uintptr_t refcount
;
198 * This is a linked list of available entries;
199 * the RefCount's opaque pointer is used as next pointer
200 * for available entries.
201 * While the entries are in use, the opaque is a pointer
202 * to the corresponding AVRefStructPool.
204 RefCount
*available_entries
;
208 static void pool_free(AVRefStructPool
*pool
)
210 ff_mutex_destroy(&pool
->mutex
);
212 pool
->free_cb(pool
->opaque
);
213 av_free(get_refcount(pool
));
216 static void pool_free_entry(AVRefStructPool
*pool
, RefCount
*ref
)
218 if (pool
->free_entry_cb
)
219 pool
->free_entry_cb(pool
->opaque
, get_userdata(ref
));
223 static void pool_return_entry(void *ref_
)
225 RefCount
*ref
= ref_
;
226 AVRefStructPool
*pool
= ref
->opaque
.nc
;
228 ff_mutex_lock(&pool
->mutex
);
229 if (!pool
->uninited
) {
230 ref
->opaque
.nc
= pool
->available_entries
;
231 pool
->available_entries
= ref
;
234 ff_mutex_unlock(&pool
->mutex
);
237 pool_free_entry(pool
, ref
);
239 if (atomic_fetch_sub_explicit(&pool
->refcount
, 1, memory_order_acq_rel
) == 1)
243 static void pool_reset_entry(AVRefStructOpaque opaque
, void *entry
)
245 AVRefStructPool
*pool
= opaque
.nc
;
247 pool
->reset_cb(pool
->opaque
, entry
);
250 static int refstruct_pool_get_ext(void *datap
, AVRefStructPool
*pool
)
254 memcpy(datap
, &(void *){ NULL
}, sizeof(void*));
256 ff_mutex_lock(&pool
->mutex
);
257 ff_assert(!pool
->uninited
);
258 if (pool
->available_entries
) {
259 RefCount
*ref
= pool
->available_entries
;
260 ret
= get_userdata(ref
);
261 pool
->available_entries
= ref
->opaque
.nc
;
262 ref
->opaque
.nc
= pool
;
263 atomic_init(&ref
->refcount
, 1);
265 ff_mutex_unlock(&pool
->mutex
);
269 ret
= av_refstruct_alloc_ext(pool
->size
, pool
->entry_flags
, pool
,
270 pool
->reset_cb
? pool_reset_entry
: NULL
);
272 return AVERROR(ENOMEM
);
273 ref
= get_refcount(ret
);
274 ref
->free
= pool_return_entry
;
276 int err
= pool
->init_cb(pool
->opaque
, ret
);
278 if (pool
->pool_flags
& AV_REFSTRUCT_POOL_FLAG_RESET_ON_INIT_ERROR
)
279 pool
->reset_cb(pool
->opaque
, ret
);
280 if (pool
->pool_flags
& AV_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR
)
281 pool
->free_entry_cb(pool
->opaque
, ret
);
287 atomic_fetch_add_explicit(&pool
->refcount
, 1, memory_order_relaxed
);
289 if (pool
->pool_flags
& AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
)
290 memset(ret
, 0, pool
->size
);
292 memcpy(datap
, &ret
, sizeof(ret
));
297 void *av_refstruct_pool_get(AVRefStructPool
*pool
)
300 refstruct_pool_get_ext(&ret
, pool
);
305 * Hint: The content of pool_unref() and refstruct_pool_uninit()
306 * could currently be merged; they are only separate functions
307 * in case we would ever introduce weak references.
309 static void pool_unref(void *ref
)
311 AVRefStructPool
*pool
= get_userdata(ref
);
312 if (atomic_fetch_sub_explicit(&pool
->refcount
, 1, memory_order_acq_rel
) == 1)
316 static void refstruct_pool_uninit(AVRefStructOpaque unused
, void *obj
)
318 AVRefStructPool
*pool
= obj
;
321 ff_mutex_lock(&pool
->mutex
);
322 ff_assert(!pool
->uninited
);
324 entry
= pool
->available_entries
;
325 pool
->available_entries
= NULL
;
326 ff_mutex_unlock(&pool
->mutex
);
329 void *next
= entry
->opaque
.nc
;
330 pool_free_entry(pool
, entry
);
335 AVRefStructPool
*av_refstruct_pool_alloc(size_t size
, unsigned flags
)
337 return av_refstruct_pool_alloc_ext(size
, flags
, NULL
, NULL
, NULL
, NULL
, NULL
);
340 AVRefStructPool
*av_refstruct_pool_alloc_ext_c(size_t size
, unsigned flags
,
341 AVRefStructOpaque opaque
,
342 int (*init_cb
)(AVRefStructOpaque opaque
, void *obj
),
343 void (*reset_cb
)(AVRefStructOpaque opaque
, void *obj
),
344 void (*free_entry_cb
)(AVRefStructOpaque opaque
, void *obj
),
345 void (*free_cb
)(AVRefStructOpaque opaque
))
347 AVRefStructPool
*pool
= av_refstruct_alloc_ext(sizeof(*pool
), 0, NULL
,
348 refstruct_pool_uninit
);
353 get_refcount(pool
)->free
= pool_unref
;
356 pool
->opaque
= opaque
;
357 pool
->init_cb
= init_cb
;
358 pool
->reset_cb
= reset_cb
;
359 pool
->free_entry_cb
= free_entry_cb
;
360 pool
->free_cb
= free_cb
;
361 #define COMMON_FLAGS AV_REFSTRUCT_POOL_FLAG_NO_ZEROING
362 pool
->entry_flags
= flags
& COMMON_FLAGS
;
363 // Filter out nonsense combinations to avoid checks later.
365 flags
&= ~AV_REFSTRUCT_POOL_FLAG_RESET_ON_INIT_ERROR
;
366 if (!pool
->free_entry_cb
)
367 flags
&= ~AV_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR
;
368 pool
->pool_flags
= flags
;
370 if (flags
& AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
) {
371 // We will zero the buffer before every use, so zeroing
372 // upon allocating the buffer is unnecessary.
373 pool
->entry_flags
|= AV_REFSTRUCT_FLAG_NO_ZEROING
;
376 atomic_init(&pool
->refcount
, 1);
378 err
= ff_mutex_init(&pool
->mutex
, NULL
);
380 // Don't call av_refstruct_uninit() on pool, as it hasn't been properly
381 // set up and is just a POD right now.
382 av_free(get_refcount(pool
));