개발은 하는건가..

[VC++] 화면을 동영상으로 저장하기 본문

C, C++, MFC

[VC++] 화면을 동영상으로 저장하기

수동애비 2022. 9. 8. 10:17
반응형

별도로 생성된 스레드에서 대상 윈도우의 DC 로부터 비트맵을 복제 후 ffmpeg 으로 인코딩 하여 파일로 저장한다.

const double frameInterval = 1000.0 / 30;
int calcInterval = 0;
BYTE *pImageData = NULL;
HDC hdcMemory = NULL;
BITMAPINFO *pDibBitmapInfo = &(m_DibBitmapInfo);
HBITMAP hDibBitmap = NULL, hOldBitmap = NULL;;
LONGLONG nFrameCnt = 0;
LONGLONG nStartTs = 0;
DWORD dwSleep = 0;

int startX = pThis->m_TargetRect.left;
int startY = pThis->m_TargetRect.top;

int width = pThis->m_TargetRect.Width();
int height = pThis->m_TargetRect.Height();
    
nStartTs = GetTickCount64();

while (m_bIsRecording == TRUE) {
	HDC hdcScreen = ::GetDC(m_hTargetWnd);
		
	if (hdcScreen != NULL) {
		if (pImageData == NULL) {
			hDibBitmap = ::CreateDIBSection(hdcScreen, pDibBitmapInfo, DIB_RGB_COLORS, (void **)&pImageData, 0, 0);
			hdcMemory = ::CreateCompatibleDC(hdcScreen);
			hOldBitmap = (HBITMAP)::SelectObject(hdcMemory, hDibBitmap);
		}

		::BitBlt(hdcMemory, 0, 0, width, height, hdcScreen, startX, startY, SRCCOPY);
		::ReleaseDC(pThis->m_hTargetWnd, hdcScreen);

		m_videoEncoder.Write(pImageData);	

		// Fps 를 맞추기 위해 지연시간을 조절.
		calcInterval = (int)((GetTickCount64() - nStartTs) - (nFrameCnt * frameInterval));
	
		dwSleep = (DWORD)(max(0, frameInterval - calcInterval));

		Sleep(dwSleep);

		nFrameCnt++;
	}		
	else {
		Sleep(1);
	}		
}

 

VideoEncoder 클래스 구현체

CVideoEncoder::~CVideoEncoder()
{
	Close();
}


BOOL CVideoEncoder::Open(const char *filename, const Params &params)
{
	Close();

	do
	{
		avformat_alloc_output_context2(&m_Context.format_context, nullptr, nullptr, filename);
		if (!m_Context.format_context)
		{
			std::cout << "could not allocate output format" << std::endl;
			break;
		}

		m_Context.codec = avcodec_find_encoder(AV_CODEC_ID_H264);
		if (!m_Context.codec)
		{
			std::cout << "could not find encoder" << std::endl;
			break;
		}

		m_Context.stream = avformat_new_stream(m_Context.format_context, nullptr);
		if (!m_Context.stream)
		{
			std::cout << "could not create stream" << std::endl;
			break;
		}
		m_Context.stream->id = (int)(m_Context.format_context->nb_streams - 1);

		m_Context.codec_context = avcodec_alloc_context3(m_Context.codec);

		if (!m_Context.codec_context)
		{
			std::cout << "could not allocate mContext codec context" << std::endl;
			break;
		}

		m_Context.codec_context->codec_id = m_Context.format_context->oformat->video_codec;
		m_Context.codec_context->bit_rate = params.bitrate;
		m_Context.codec_context->width = static_cast<int>(params.width);
		m_Context.codec_context->height = static_cast<int>(params.height);
		m_Context.stream->time_base = av_d2q(1.0 / params.fps, 120);
		m_Context.codec_context->time_base = m_Context.stream->time_base;
		m_Context.codec_context->pix_fmt = params.dst_format;
		m_Context.codec_context->gop_size = (int)params.fps;
		m_Context.codec_context->max_b_frames = 2;

		if (m_Context.format_context->oformat->flags & AVFMT_GLOBALHEADER)
			m_Context.codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

		int ret = 0;
		if (params.preset)
		{
			ret = av_opt_set(m_Context.codec_context->priv_data, "preset", params.preset, 0);
			if (ret != 0)
			{
				std::cout << "could not set preset: " << params.preset << std::endl;
				break;
			}
		}

		{
			ret = av_opt_set_int(m_Context.codec_context->priv_data, "crf", params.crf, 0);
			if (ret != 0)
			{
				std::cout << "could not set crf: " << params.crf << std::endl;
				break;
			}
		}

		ret = avcodec_open2(m_Context.codec_context, m_Context.codec, nullptr);
		if (ret != 0)
		{
			std::cout << "could not open codec: " << ret << std::endl;
			break;
		}

		m_Context.frame = av_frame_alloc();
		if (!m_Context.frame)
		{
			std::cout << "could not allocate mContext frame" << std::endl;
			break;
		}
		m_Context.frame->format = m_Context.codec_context->pix_fmt;
		m_Context.frame->width = m_Context.codec_context->width;
		m_Context.frame->height = m_Context.codec_context->height;

		ret = av_frame_get_buffer(m_Context.frame, 32);
		if (ret < 0)
		{
			std::cout << "could not allocate the mContext frame data" << std::endl;
			break;
		}

		ret = avcodec_parameters_from_context(m_Context.stream->codecpar, m_Context.codec_context);
		if (ret < 0)
		{
			std::cout << "could not copy the stream parameters" << std::endl;
			break;
		}

		m_Context.sws_context = sws_getContext(
			m_Context.codec_context->width, m_Context.codec_context->height, params.src_format,   // src
			m_Context.codec_context->width, m_Context.codec_context->height, params.dst_format, // dst
			SWS_BICUBIC, nullptr, nullptr, nullptr
		);

		if (!m_Context.sws_context)
		{
			std::cout << "could not initialize the conversion context" << std::endl;
			break;
		}

		av_dump_format(m_Context.format_context, 0, filename, 1);

		ret = avio_open(&m_Context.format_context->pb, filename, AVIO_FLAG_WRITE);
		if (ret != 0)
		{
			std::cout << "could not open " << filename << std::endl;
			break;
		}

		ret = avformat_write_header(m_Context.format_context, nullptr);
		if (ret < 0)
		{
			std::cout << "could not write" << std::endl;
			ret = avio_close(m_Context.format_context->pb);
			if (ret != 0)
				std::cout << "failed to close file" << std::endl;
			break;
		}

		m_Context.frame_index = 0;
		m_isOpen = TRUE;

		m_Context.pFrameBuffer = (BYTE*)malloc(params.width * params.height * (params.bpp / 8));
		m_Context.nFrameBpp = params.bpp;

		return TRUE;
	} while (FALSE);

	Close();

	return FALSE;
}

void CVideoEncoder::Close()
{
	if (m_isOpen)
	{
		avcodec_send_frame(m_Context.codec_context, nullptr);

		FlushPackets();

		av_write_trailer(m_Context.format_context);

		auto ret = avio_close(m_Context.format_context->pb);
		if (ret != 0)
			std::cout << "failed to close file" << std::endl;
	}

	if (m_Context.sws_context)
		sws_freeContext(m_Context.sws_context);

	if (m_Context.frame)
		av_frame_free(&m_Context.frame);

	if (m_Context.codec_context)
		avcodec_free_context(&m_Context.codec_context);

	if (m_Context.codec_context)
		avcodec_close(m_Context.codec_context);

	if (m_Context.format_context)
		avformat_free_context(m_Context.format_context);

	if (m_Context.pFrameBuffer != NULL) {
		free(m_Context.pFrameBuffer);
	}

	m_Context = {};
	m_isOpen = FALSE;
}


BOOL CVideoEncoder::WriteUseFrameBuffer()
{
	if (m_Context.pFrameBuffer != NULL) {
		return Write(m_Context.pFrameBuffer);
	}

	return FALSE;
}


BOOL CVideoEncoder::Write(const unsigned char *data)
{
	if (!m_isOpen)
		return FALSE;

	auto ret = av_frame_make_writable(m_Context.frame);
	if (ret < 0)
	{
		std::cout << "frame not writable" << std::endl;
		return FALSE;
	}

	const int in_linesize[1] = { (int)(m_Context.codec_context->width * (m_Context.nFrameBpp >> 3)) };

	sws_scale(m_Context.sws_context,
		&data, in_linesize, 0, m_Context.codec_context->height,  // src
		m_Context.frame->data, m_Context.frame->linesize // dst
	);

	m_Context.frame->pts = m_Context.frame_index++;

	ret = avcodec_send_frame(m_Context.codec_context, m_Context.frame);
	
	if (ret < 0){
		std::cout << "error sending a frame for encoding" << std::endl;
		return FALSE;
	}

	return FlushPackets();
}

BOOL CVideoEncoder::IsOpen() 
{
	return m_isOpen;
}


BOOL CVideoEncoder::FlushPackets()
{
	int ret;
	do
	{
		AVPacket packet = { 0 };

		ret = avcodec_receive_packet(m_Context.codec_context, &packet);
		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
			break;

		if (ret < 0)
		{
			std::cout << "error encoding a frame: " << ret << std::endl;
			return FALSE;
		}

		av_packet_rescale_ts(&packet, m_Context.codec_context->time_base, m_Context.stream->time_base);
		packet.stream_index = m_Context.stream->index;

		ret = av_interleaved_write_frame(m_Context.format_context, &packet);
		av_packet_unref(&packet);

		if (ret < 0)
		{
			std::cout << "error while writing output packet: " << ret << std::endl;
			return FALSE;
		}
	} while (ret >= 0);

	return TRUE;
}
Comments