提交 4122a5b9 编写于 作者: J jp9000

(API Change) Rename 'source_frame' + related

For the sake of naming consistency with the rest of obs.h, prefix this
structure and associated functions with obs_.

Renamed structures:
- struct source_frame (now obs_source_frame)

Renamed functions:
- source_frame_init (now obs_source_frame_init)
- source_frame_free (now obs_source_frame_free)
- source_frame_create (now obs_source_frame_create)
- source_frame_destroy (now obs_source_frame_destroy)

Affected functions:
- obs_source_output_video
- obs_source_get_frame
- obs_source_release_frame
上级 7b402245
...@@ -139,7 +139,7 @@ struct obs_core_video { ...@@ -139,7 +139,7 @@ struct obs_core_video {
bool textures_output[NUM_TEXTURES]; bool textures_output[NUM_TEXTURES];
bool textures_copied[NUM_TEXTURES]; bool textures_copied[NUM_TEXTURES];
bool textures_converted[NUM_TEXTURES]; bool textures_converted[NUM_TEXTURES];
struct source_frame convert_frames[NUM_TEXTURES]; struct obs_source_frame convert_frames[NUM_TEXTURES];
effect_t default_effect; effect_t default_effect;
effect_t solid_effect; effect_t solid_effect;
effect_t conversion_effect; effect_t conversion_effect;
...@@ -350,7 +350,7 @@ struct obs_source { ...@@ -350,7 +350,7 @@ struct obs_source {
float async_color_range_max[3]; float async_color_range_max[3];
int async_plane_offset[2]; int async_plane_offset[2];
bool async_flip; bool async_flip;
DARRAY(struct source_frame*) video_frames; DARRAY(struct obs_source_frame*)video_frames;
pthread_mutex_t video_mutex; pthread_mutex_t video_mutex;
uint32_t async_width; uint32_t async_width;
uint32_t async_height; uint32_t async_height;
......
...@@ -194,8 +194,8 @@ fail: ...@@ -194,8 +194,8 @@ fail:
return NULL; return NULL;
} }
void source_frame_init(struct source_frame *frame, enum video_format format, void obs_source_frame_init(struct obs_source_frame *frame,
uint32_t width, uint32_t height) enum video_format format, uint32_t width, uint32_t height)
{ {
struct video_frame vid_frame; struct video_frame vid_frame;
...@@ -238,7 +238,7 @@ void obs_source_destroy(struct obs_source *source) ...@@ -238,7 +238,7 @@ void obs_source_destroy(struct obs_source *source)
obs_source_release(source->filters.array[i]); obs_source_release(source->filters.array[i]);
for (i = 0; i < source->video_frames.num; i++) for (i = 0; i < source->video_frames.num; i++)
source_frame_destroy(source->video_frames.array[i]); obs_source_frame_destroy(source->video_frames.array[i]);
gs_entercontext(obs->video.graphics); gs_entercontext(obs->video.graphics);
texrender_destroy(source->async_convert_texrender); texrender_destroy(source->async_convert_texrender);
...@@ -699,7 +699,7 @@ static inline enum convert_type get_convert_type(enum video_format format) ...@@ -699,7 +699,7 @@ static inline enum convert_type get_convert_type(enum video_format format)
} }
static inline bool set_packed422_sizes(struct obs_source *source, static inline bool set_packed422_sizes(struct obs_source *source,
struct source_frame *frame) struct obs_source_frame *frame)
{ {
source->async_convert_height = frame->height; source->async_convert_height = frame->height;
source->async_convert_width = frame->width / 2; source->async_convert_width = frame->width / 2;
...@@ -708,7 +708,7 @@ static inline bool set_packed422_sizes(struct obs_source *source, ...@@ -708,7 +708,7 @@ static inline bool set_packed422_sizes(struct obs_source *source,
} }
static inline bool set_planar420_sizes(struct obs_source *source, static inline bool set_planar420_sizes(struct obs_source *source,
struct source_frame *frame) struct obs_source_frame *frame)
{ {
uint32_t size = frame->width * frame->height; uint32_t size = frame->width * frame->height;
size += size/2; size += size/2;
...@@ -723,7 +723,7 @@ static inline bool set_planar420_sizes(struct obs_source *source, ...@@ -723,7 +723,7 @@ static inline bool set_planar420_sizes(struct obs_source *source,
} }
static inline bool init_gpu_conversion(struct obs_source *source, static inline bool init_gpu_conversion(struct obs_source *source,
struct source_frame *frame) struct obs_source_frame *frame)
{ {
switch (get_convert_type(frame->format)) { switch (get_convert_type(frame->format)) {
case CONVERT_422_Y: case CONVERT_422_Y:
...@@ -758,7 +758,7 @@ static inline enum gs_color_format convert_video_format( ...@@ -758,7 +758,7 @@ static inline enum gs_color_format convert_video_format(
} }
static inline bool set_async_texture_size(struct obs_source *source, static inline bool set_async_texture_size(struct obs_source *source,
struct source_frame *frame) struct obs_source_frame *frame)
{ {
enum convert_type prev, cur; enum convert_type prev, cur;
prev = get_convert_type(source->async_format); prev = get_convert_type(source->async_format);
...@@ -804,7 +804,8 @@ static inline bool set_async_texture_size(struct obs_source *source, ...@@ -804,7 +804,8 @@ static inline bool set_async_texture_size(struct obs_source *source,
return true; return true;
} }
static void upload_raw_frame(texture_t tex, const struct source_frame *frame) static void upload_raw_frame(texture_t tex,
const struct obs_source_frame *frame)
{ {
switch (get_convert_type(frame->format)) { switch (get_convert_type(frame->format)) {
case CONVERT_422_U: case CONVERT_422_U:
...@@ -864,7 +865,7 @@ static inline void set_eparam(effect_t effect, const char *name, float val) ...@@ -864,7 +865,7 @@ static inline void set_eparam(effect_t effect, const char *name, float val)
} }
static bool update_async_texrender(struct obs_source *source, static bool update_async_texrender(struct obs_source *source,
const struct source_frame *frame) const struct obs_source_frame *frame)
{ {
texture_t tex = source->async_texture; texture_t tex = source->async_texture;
texrender_t texrender = source->async_convert_texrender; texrender_t texrender = source->async_convert_texrender;
...@@ -922,7 +923,7 @@ static bool update_async_texrender(struct obs_source *source, ...@@ -922,7 +923,7 @@ static bool update_async_texrender(struct obs_source *source,
} }
static bool update_async_texture(struct obs_source *source, static bool update_async_texture(struct obs_source *source,
const struct source_frame *frame) const struct obs_source_frame *frame)
{ {
texture_t tex = source->async_texture; texture_t tex = source->async_texture;
texrender_t texrender = source->async_convert_texrender; texrender_t texrender = source->async_convert_texrender;
...@@ -1036,7 +1037,7 @@ static void obs_source_draw_async_texture(struct obs_source *source) ...@@ -1036,7 +1037,7 @@ static void obs_source_draw_async_texture(struct obs_source *source)
static void obs_source_render_async_video(obs_source_t source) static void obs_source_render_async_video(obs_source_t source)
{ {
struct source_frame *frame = obs_source_get_frame(source); struct obs_source_frame *frame = obs_source_get_frame(source);
if (frame) { if (frame) {
if (!set_async_texture_size(source, frame)) if (!set_async_texture_size(source, frame))
return; return;
...@@ -1237,8 +1238,8 @@ obs_data_t obs_source_getsettings(obs_source_t source) ...@@ -1237,8 +1238,8 @@ obs_data_t obs_source_getsettings(obs_source_t source)
return source->context.settings; return source->context.settings;
} }
static inline struct source_frame *filter_async_video(obs_source_t source, static inline struct obs_source_frame *filter_async_video(obs_source_t source,
struct source_frame *in) struct obs_source_frame *in)
{ {
size_t i; size_t i;
for (i = source->filters.num; i > 0; i--) { for (i = source->filters.num; i > 0; i--) {
...@@ -1255,8 +1256,8 @@ static inline struct source_frame *filter_async_video(obs_source_t source, ...@@ -1255,8 +1256,8 @@ static inline struct source_frame *filter_async_video(obs_source_t source,
return in; return in;
} }
static inline void copy_frame_data_line(struct source_frame *dst, static inline void copy_frame_data_line(struct obs_source_frame *dst,
const struct source_frame *src, uint32_t plane, uint32_t y) const struct obs_source_frame *src, uint32_t plane, uint32_t y)
{ {
uint32_t pos_src = y * src->linesize[plane]; uint32_t pos_src = y * src->linesize[plane];
uint32_t pos_dst = y * dst->linesize[plane]; uint32_t pos_dst = y * dst->linesize[plane];
...@@ -1266,8 +1267,9 @@ static inline void copy_frame_data_line(struct source_frame *dst, ...@@ -1266,8 +1267,9 @@ static inline void copy_frame_data_line(struct source_frame *dst,
memcpy(dst->data[plane] + pos_dst, src->data[plane] + pos_src, bytes); memcpy(dst->data[plane] + pos_dst, src->data[plane] + pos_src, bytes);
} }
static inline void copy_frame_data_plane(struct source_frame *dst, static inline void copy_frame_data_plane(struct obs_source_frame *dst,
const struct source_frame *src, uint32_t plane, uint32_t lines) const struct obs_source_frame *src,
uint32_t plane, uint32_t lines)
{ {
if (dst->linesize[plane] != src->linesize[plane]) if (dst->linesize[plane] != src->linesize[plane])
for (uint32_t y = 0; y < lines; y++) for (uint32_t y = 0; y < lines; y++)
...@@ -1277,8 +1279,8 @@ static inline void copy_frame_data_plane(struct source_frame *dst, ...@@ -1277,8 +1279,8 @@ static inline void copy_frame_data_plane(struct source_frame *dst,
dst->linesize[plane] * lines); dst->linesize[plane] * lines);
} }
static void copy_frame_data(struct source_frame *dst, static void copy_frame_data(struct obs_source_frame *dst,
const struct source_frame *src) const struct obs_source_frame *src)
{ {
dst->flip = src->flip; dst->flip = src->flip;
dst->full_range = src->full_range; dst->full_range = src->full_range;
...@@ -1313,11 +1315,12 @@ static void copy_frame_data(struct source_frame *dst, ...@@ -1313,11 +1315,12 @@ static void copy_frame_data(struct source_frame *dst,
} }
} }
static inline struct source_frame *cache_video(const struct source_frame *frame) static inline struct obs_source_frame *cache_video(
const struct obs_source_frame *frame)
{ {
/* TODO: use an actual cache */ /* TODO: use an actual cache */
struct source_frame *new_frame = source_frame_create(frame->format, struct obs_source_frame *new_frame = obs_source_frame_create(
frame->width, frame->height); frame->format, frame->width, frame->height);
copy_frame_data(new_frame, frame); copy_frame_data(new_frame, frame);
return new_frame; return new_frame;
...@@ -1332,12 +1335,12 @@ static inline void cycle_frames(struct obs_source *source) ...@@ -1332,12 +1335,12 @@ static inline void cycle_frames(struct obs_source *source)
} }
void obs_source_output_video(obs_source_t source, void obs_source_output_video(obs_source_t source,
const struct source_frame *frame) const struct obs_source_frame *frame)
{ {
if (!source || !frame) if (!source || !frame)
return; return;
struct source_frame *output = cache_video(frame); struct obs_source_frame *output = cache_video(frame);
pthread_mutex_lock(&source->filter_mutex); pthread_mutex_lock(&source->filter_mutex);
output = filter_async_video(source, output); output = filter_async_video(source, output);
...@@ -1502,8 +1505,8 @@ static inline bool frame_out_of_bounds(obs_source_t source, uint64_t ts) ...@@ -1502,8 +1505,8 @@ static inline bool frame_out_of_bounds(obs_source_t source, uint64_t ts)
static bool ready_async_frame(obs_source_t source, uint64_t sys_time) static bool ready_async_frame(obs_source_t source, uint64_t sys_time)
{ {
struct source_frame *next_frame = source->video_frames.array[0]; struct obs_source_frame *next_frame = source->video_frames.array[0];
struct source_frame *frame = NULL; struct obs_source_frame *frame = NULL;
uint64_t sys_offset = sys_time - source->last_sys_timestamp; uint64_t sys_offset = sys_time - source->last_sys_timestamp;
uint64_t frame_time = next_frame->timestamp; uint64_t frame_time = next_frame->timestamp;
uint64_t frame_offset = 0; uint64_t frame_offset = 0;
...@@ -1518,7 +1521,7 @@ static bool ready_async_frame(obs_source_t source, uint64_t sys_time) ...@@ -1518,7 +1521,7 @@ static bool ready_async_frame(obs_source_t source, uint64_t sys_time)
} }
while (frame_offset <= sys_offset) { while (frame_offset <= sys_offset) {
source_frame_destroy(frame); obs_source_frame_destroy(frame);
if (source->video_frames.num == 1) if (source->video_frames.num == 1)
return true; return true;
...@@ -1538,16 +1541,16 @@ static bool ready_async_frame(obs_source_t source, uint64_t sys_time) ...@@ -1538,16 +1541,16 @@ static bool ready_async_frame(obs_source_t source, uint64_t sys_time)
frame_offset = frame_time - source->last_frame_ts; frame_offset = frame_time - source->last_frame_ts;
} }
source_frame_destroy(frame); obs_source_frame_destroy(frame);
return frame != NULL; return frame != NULL;
} }
static inline struct source_frame *get_closest_frame(obs_source_t source, static inline struct obs_source_frame *get_closest_frame(obs_source_t source,
uint64_t sys_time) uint64_t sys_time)
{ {
if (ready_async_frame(source, sys_time)) { if (ready_async_frame(source, sys_time)) {
struct source_frame *frame = source->video_frames.array[0]; struct obs_source_frame *frame = source->video_frames.array[0];
da_erase(source->video_frames, 0); da_erase(source->video_frames, 0);
return frame; return frame;
} }
...@@ -1561,9 +1564,9 @@ static inline struct source_frame *get_closest_frame(obs_source_t source, ...@@ -1561,9 +1564,9 @@ static inline struct source_frame *get_closest_frame(obs_source_t source,
* the frame with the closest timing to ensure sync. Also ensures that timing * the frame with the closest timing to ensure sync. Also ensures that timing
* with audio is synchronized. * with audio is synchronized.
*/ */
struct source_frame *obs_source_get_frame(obs_source_t source) struct obs_source_frame *obs_source_get_frame(obs_source_t source)
{ {
struct source_frame *frame = NULL; struct obs_source_frame *frame = NULL;
uint64_t sys_time; uint64_t sys_time;
if (!source) if (!source)
...@@ -1602,10 +1605,11 @@ unlock: ...@@ -1602,10 +1605,11 @@ unlock:
return frame; return frame;
} }
void obs_source_release_frame(obs_source_t source, struct source_frame *frame) void obs_source_release_frame(obs_source_t source,
struct obs_source_frame *frame)
{ {
if (source && frame) { if (source && frame) {
source_frame_destroy(frame); obs_source_frame_destroy(frame);
obs_source_release(source); obs_source_release(source);
} }
} }
......
...@@ -234,8 +234,8 @@ struct obs_source_info { ...@@ -234,8 +234,8 @@ struct obs_source_info {
* @return New video frame data. This can defer video data to * @return New video frame data. This can defer video data to
* be drawn later if time is needed for processing * be drawn later if time is needed for processing
*/ */
struct source_frame *(*filter_video)(void *data, struct obs_source_frame *(*filter_video)(void *data,
const struct source_frame *frame); const struct obs_source_frame *frame);
/** /**
* Called to filter raw audio data. * Called to filter raw audio data.
......
...@@ -305,7 +305,8 @@ static inline uint32_t make_aligned_linesize_offset(uint32_t offset, ...@@ -305,7 +305,8 @@ static inline uint32_t make_aligned_linesize_offset(uint32_t offset,
static void fix_gpu_converted_alignment(struct obs_core_video *video, static void fix_gpu_converted_alignment(struct obs_core_video *video,
struct video_data *frame, int cur_texture) struct video_data *frame, int cur_texture)
{ {
struct source_frame *new_frame = &video->convert_frames[cur_texture]; struct obs_source_frame *new_frame =
&video->convert_frames[cur_texture];
uint32_t src_linesize = frame->linesize[0]; uint32_t src_linesize = frame->linesize[0];
uint32_t dst_linesize = video->output_width * 4; uint32_t dst_linesize = video->output_width * 4;
uint32_t src_pos = 0; uint32_t src_pos = 0;
...@@ -353,7 +354,8 @@ static bool convert_frame(struct obs_core_video *video, ...@@ -353,7 +354,8 @@ static bool convert_frame(struct obs_core_video *video,
struct video_data *frame, struct video_data *frame,
const struct video_output_info *info, int cur_texture) const struct video_output_info *info, int cur_texture)
{ {
struct source_frame *new_frame = &video->convert_frames[cur_texture]; struct obs_source_frame *new_frame =
&video->convert_frames[cur_texture];
if (info->format == VIDEO_FORMAT_I420) { if (info->format == VIDEO_FORMAT_I420) {
compress_uyvx_to_i420( compress_uyvx_to_i420(
......
...@@ -187,9 +187,9 @@ static bool obs_init_textures(struct obs_video_info *ovi) ...@@ -187,9 +187,9 @@ static bool obs_init_textures(struct obs_video_info *ovi)
return false; return false;
if (yuv) if (yuv)
source_frame_init(&video->convert_frames[i], obs_source_frame_init(&video->convert_frames[i],
ovi->output_format, ovi->output_format,
ovi->output_width, ovi->output_height); ovi->output_width,ovi->output_height);
} }
return true; return true;
...@@ -349,7 +349,7 @@ static void obs_free_video(void) ...@@ -349,7 +349,7 @@ static void obs_free_video(void)
texture_destroy(video->render_textures[i]); texture_destroy(video->render_textures[i]);
texture_destroy(video->convert_textures[i]); texture_destroy(video->convert_textures[i]);
texture_destroy(video->output_textures[i]); texture_destroy(video->output_textures[i]);
source_frame_free(&video->convert_frames[i]); obs_source_frame_free(&video->convert_frames[i]);
video->copy_surfaces[i] = NULL; video->copy_surfaces[i] = NULL;
video->render_textures[i] = NULL; video->render_textures[i] = NULL;
......
...@@ -182,7 +182,7 @@ struct source_audio { ...@@ -182,7 +182,7 @@ struct source_audio {
* If a YUV format is specified, it will be automatically upsampled and * If a YUV format is specified, it will be automatically upsampled and
* converted to RGB via shader on the graphics processor. * converted to RGB via shader on the graphics processor.
*/ */
struct source_frame { struct obs_source_frame {
uint8_t *data[MAX_AV_PLANES]; uint8_t *data[MAX_AV_PLANES];
uint32_t linesize[MAX_AV_PLANES]; uint32_t linesize[MAX_AV_PLANES];
uint32_t width; uint32_t width;
...@@ -708,18 +708,18 @@ EXPORT void obs_source_load(obs_source_t source); ...@@ -708,18 +708,18 @@ EXPORT void obs_source_load(obs_source_t source);
/** Outputs asynchronous video data */ /** Outputs asynchronous video data */
EXPORT void obs_source_output_video(obs_source_t source, EXPORT void obs_source_output_video(obs_source_t source,
const struct source_frame *frame); const struct obs_source_frame *frame);
/** Outputs audio data (always asynchronous) */ /** Outputs audio data (always asynchronous) */
EXPORT void obs_source_output_audio(obs_source_t source, EXPORT void obs_source_output_audio(obs_source_t source,
const struct source_audio *audio); const struct source_audio *audio);
/** Gets the current async video frame */ /** Gets the current async video frame */
EXPORT struct source_frame *obs_source_get_frame(obs_source_t source); EXPORT struct obs_source_frame *obs_source_get_frame(obs_source_t source);
/** Releases the current async video frame */ /** Releases the current async video frame */
EXPORT void obs_source_release_frame(obs_source_t source, EXPORT void obs_source_release_frame(obs_source_t source,
struct source_frame *frame); struct obs_source_frame *frame);
/** Default RGB filter handler for generic effect filters */ /** Default RGB filter handler for generic effect filters */
EXPORT void obs_source_process_filter(obs_source_t filter, effect_t effect, EXPORT void obs_source_process_filter(obs_source_t filter, effect_t effect,
...@@ -1127,28 +1127,28 @@ EXPORT const char *obs_service_get_password(obs_service_t service); ...@@ -1127,28 +1127,28 @@ EXPORT const char *obs_service_get_password(obs_service_t service);
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */
/* Source frame allocation functions */ /* Source frame allocation functions */
EXPORT void source_frame_init(struct source_frame *frame, EXPORT void obs_source_frame_init(struct obs_source_frame *frame,
enum video_format format, uint32_t width, uint32_t height); enum video_format format, uint32_t width, uint32_t height);
static inline void source_frame_free(struct source_frame *frame) static inline void obs_source_frame_free(struct obs_source_frame *frame)
{ {
if (frame) { if (frame) {
bfree(frame->data[0]); bfree(frame->data[0]);
memset(frame, 0, sizeof(struct source_frame)); memset(frame, 0, sizeof(*frame));
} }
} }
static inline struct source_frame *source_frame_create( static inline struct obs_source_frame *obs_source_frame_create(
enum video_format format, uint32_t width, uint32_t height) enum video_format format, uint32_t width, uint32_t height)
{ {
struct source_frame *frame; struct obs_source_frame *frame;
frame = (struct source_frame*)bzalloc(sizeof(struct source_frame)); frame = (struct obs_source_frame*)bzalloc(sizeof(*frame));
source_frame_init(frame, format, width, height); obs_source_frame_init(frame, format, width, height);
return frame; return frame;
} }
static inline void source_frame_destroy(struct source_frame *frame) static inline void obs_source_frame_destroy(struct obs_source_frame *frame)
{ {
if (frame) { if (frame) {
bfree(frame->data[0]); bfree(frame->data[0]);
......
...@@ -276,17 +276,17 @@ static void v4l2_destroy_mmap(struct v4l2_data *data) ...@@ -276,17 +276,17 @@ static void v4l2_destroy_mmap(struct v4l2_data *data)
* Prepare the output frame structure for obs and compute plane offsets * Prepare the output frame structure for obs and compute plane offsets
* *
* Basically all data apart from memory pointers and the timestamp is known * Basically all data apart from memory pointers and the timestamp is known
* before the capture starts. This function prepares the source_frame struct * before the capture starts. This function prepares the obs_source_frame
* with all the data that is already known. * struct with all the data that is already known.
* *
* v4l2 uses a continuous memory segment for all planes so we simply compute * v4l2 uses a continuous memory segment for all planes so we simply compute
* offsets to add to the start address in order to give obs the correct data * offsets to add to the start address in order to give obs the correct data
* pointers for the individual planes. * pointers for the individual planes.
*/ */
static void v4l2_prep_obs_frame(struct v4l2_data *data, static void v4l2_prep_obs_frame(struct v4l2_data *data,
struct source_frame *frame, size_t *plane_offsets) struct obs_source_frame *frame, size_t *plane_offsets)
{ {
memset(frame, 0, sizeof(struct source_frame)); memset(frame, 0, sizeof(struct obs_source_frame));
memset(plane_offsets, 0, sizeof(size_t) * MAX_AV_PLANES); memset(plane_offsets, 0, sizeof(size_t) * MAX_AV_PLANES);
frame->width = data->width; frame->width = data->width;
...@@ -333,7 +333,7 @@ static void *v4l2_thread(void *vptr) ...@@ -333,7 +333,7 @@ static void *v4l2_thread(void *vptr)
uint8_t *start; uint8_t *start;
struct timeval tv; struct timeval tv;
struct v4l2_buffer buf; struct v4l2_buffer buf;
struct source_frame out; struct obs_source_frame out;
size_t plane_offsets[MAX_AV_PLANES]; size_t plane_offsets[MAX_AV_PLANES];
if (v4l2_start_capture(data) < 0) if (v4l2_start_capture(data) < 0)
......
...@@ -71,7 +71,7 @@ struct av_capture { ...@@ -71,7 +71,7 @@ struct av_capture {
obs_source_t source; obs_source_t source;
struct source_frame frame; struct obs_source_frame frame;
}; };
static inline enum video_format format_from_subtype(FourCharCode subtype) static inline enum video_format format_from_subtype(FourCharCode subtype)
...@@ -124,7 +124,7 @@ static inline enum video_colorspace get_colorspace(CMFormatDescriptionRef desc) ...@@ -124,7 +124,7 @@ static inline enum video_colorspace get_colorspace(CMFormatDescriptionRef desc)
} }
static inline bool update_colorspace(struct av_capture *capture, static inline bool update_colorspace(struct av_capture *capture,
struct source_frame *frame, CMFormatDescriptionRef desc, struct obs_source_frame *frame, CMFormatDescriptionRef desc,
bool full_range) bool full_range)
{ {
enum video_colorspace colorspace = get_colorspace(desc); enum video_colorspace colorspace = get_colorspace(desc);
...@@ -151,7 +151,7 @@ static inline bool update_colorspace(struct av_capture *capture, ...@@ -151,7 +151,7 @@ static inline bool update_colorspace(struct av_capture *capture,
} }
static inline bool update_frame(struct av_capture *capture, static inline bool update_frame(struct av_capture *capture,
struct source_frame *frame, CMSampleBufferRef sample_buffer) struct obs_source_frame *frame, CMSampleBufferRef sample_buffer)
{ {
CMFormatDescriptionRef desc = CMFormatDescriptionRef desc =
CMSampleBufferGetFormatDescription(sample_buffer); CMSampleBufferGetFormatDescription(sample_buffer);
...@@ -225,7 +225,7 @@ static inline bool update_frame(struct av_capture *capture, ...@@ -225,7 +225,7 @@ static inline bool update_frame(struct av_capture *capture,
if (count < 1 || !capture) if (count < 1 || !capture)
return; return;
struct source_frame *frame = &capture->frame; struct obs_source_frame *frame = &capture->frame;
CMTime target_pts = CMTime target_pts =
CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer); CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer);
......
...@@ -60,7 +60,7 @@ struct DShowInput { ...@@ -60,7 +60,7 @@ struct DShowInput {
VideoConfig videoConfig; VideoConfig videoConfig;
AudioConfig audioConfig; AudioConfig audioConfig;
source_frame frame; obs_source_frame frame;
inline DShowInput(obs_source_t source_) inline DShowInput(obs_source_t source_)
: source (source_), : source (source_),
......
...@@ -53,7 +53,7 @@ static void *video_thread(void *data) ...@@ -53,7 +53,7 @@ static void *video_thread(void *data)
uint32_t pixels[20*20]; uint32_t pixels[20*20];
uint64_t cur_time = os_gettime_ns(); uint64_t cur_time = os_gettime_ns();
struct source_frame frame = { struct obs_source_frame frame = {
.data = {[0] = (uint8_t*)pixels}, .data = {[0] = (uint8_t*)pixels},
.linesize = {[0] = 20*4}, .linesize = {[0] = 20*4},
.width = 20, .width = 20,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册