源码:Qt+FFmpeg录屏录音

NanaRecorder

之前的录屏项目ScreenCapture存在音视频同步问题,所以重写了第二个版本:NanaRecorder。
录制桌面和系统声音(扬声器)。

录制流程

主线程:UI线程,调用Recorder接口
采集线程:采集到帧后->格式转换/重采样->写进FIFO
编码线程:循环从FIFO读取帧->编码->写进文件

环境依赖

- VS2019
- Qt5.12.9 
- FFmpeg4.1

UI


以下是第一版代码:ScreenCapture

  • 录屏功能支持:开始,暂停,结束。
  • 使用Qt+C++封装FFmpeg API,没有使用废弃的FFmpeg API。
  • 主线程:Qt GUI线程,以后可接入录屏UI。
  • MuxThreadProc:复用线程,启动音视频采集线程。打开输入/输出流,然后从fifoBuffer读取帧,编码生成各种格式视频。
  • ScreenRecordThreadProc:视频采集线程,从输入流采集帧,缩放后写入fifoBuffer。
  • SoundRecordThreadProc:音频采集线程,从输入流采集样本,重采样后写入fifoBuffer。

ScreenRecordImpl.h

#pragma once
#include <Windows.h>
#include <atomic>
#include <QObject>
#include <QString>
#include <QMutex>
#include <condition_variable>#ifdef   __cplusplus
extern "C"
{
#endif
struct AVFormatContext;
struct AVCodecContext;
struct AVCodec;
struct AVFifoBuffer;
struct AVAudioFifo;
struct AVFrame;
struct SwsContext;
struct SwrContext;
#ifdef __cplusplus
};
#endifclass ScreenRecordImpl : public QObject
{Q_OBJECT
private:enum RecordState {NotStarted,Started,Paused,Stopped,Unknown,};
public:ScreenRecordImpl(QObject * parent = Q_NULLPTR);void Init(const QVariantMap& map);private slots :void Start();void Pause();void Stop();private://从fifobuf读取音视频帧,写入输出流,复用,生成文件void MuxThreadProc();//从视频输入流读取帧,写入fifobufvoid ScreenRecordThreadProc();//从音频输入流读取帧,写入fifobufvoid SoundRecordThreadProc();int OpenVideo();int OpenAudio();int OpenOutput();QString GetSpeakerDeviceName();//获取麦克风设备名称QString GetMicrophoneDeviceName();AVFrame* AllocAudioFrame(AVCodecContext* c, int nbSamples);void InitVideoBuffer();void InitAudioBuffer();void FlushVideoDecoder();void FlushAudioDecoder();//void FlushVideoEncoder();//void FlushAudioEncoder();void FlushEncoders();void Release();private:QString              m_filePath;int                  m_width;int                 m_height;int                    m_fps;int                   m_audioBitrate;int m_vIndex;        //输入视频流索引int m_aIndex;      //输入音频流索引int m_vOutIndex;   //输出视频流索引int m_aOutIndex;   //输出音频流索引AVFormatContext        *m_vFmtCtx;AVFormatContext      *m_aFmtCtx;AVFormatContext      *m_oFmtCtx;AVCodecContext       *m_vDecodeCtx;AVCodecContext        *m_aDecodeCtx;AVCodecContext        *m_vEncodeCtx;AVCodecContext        *m_aEncodeCtx;SwsContext            *m_swsCtx;SwrContext            *m_swrCtx;AVFifoBuffer      *m_vFifoBuf;AVAudioFifo         *m_aFifoBuf;AVFrame             *m_vOutFrame;uint8_t                *m_vOutFrameBuf;int                 m_vOutFrameSize;int                 m_nbSamples;RecordState         m_state;std::condition_variable m_cvNotPause;   //当点击暂停的时候,两个采集线程挂起std::mutex                m_mtxPause;std::condition_variable m_cvVBufNotFull;std::condition_variable m_cvVBufNotEmpty;std::mutex              m_mtxVBuf;std::condition_variable m_cvABufNotFull;std::condition_variable m_cvABufNotEmpty;std::mutex               m_mtxABuf;int64_t                   m_vCurPts;int64_t                   m_aCurPts;
};

ScreenRecordImpl.cpp

#ifdef   __cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/imgutils.h"
#include "libswresample/swresample.h"
#include <libavutil\avassert.h>
#ifdef __cplusplus
};
#endif#include "ScreenRecordImpl.h"
#include <QDebug>
#include <QAudioDeviceInfo>
#include <thread>
#include <fstream>#include <dshow.h>using namespace std;int g_vCollectFrameCnt = 0;    //视频采集帧数
int g_vEncodeFrameCnt = 0; //视频编码帧数
int g_aCollectFrameCnt = 0;    //音频采集帧数
int g_aEncodeFrameCnt = 0; //音频编码帧数ScreenRecordImpl::ScreenRecordImpl(QObject * parent) :QObject(parent), m_fps(30), m_vIndex(-1), m_aIndex(-1), m_vFmtCtx(nullptr), m_aFmtCtx(nullptr), m_oFmtCtx(nullptr), m_vDecodeCtx(nullptr), m_aDecodeCtx(nullptr), m_vEncodeCtx(nullptr), m_aEncodeCtx(nullptr), m_vFifoBuf(nullptr), m_aFifoBuf(nullptr), m_swsCtx(nullptr), m_swrCtx(nullptr), m_state(RecordState::NotStarted), m_vCurPts(0), m_aCurPts(0)
{
}void ScreenRecordImpl::Init(const QVariantMap& map)
{m_filePath = map["filePath"].toString();m_width = map["width"].toInt();m_height = map["height"].toInt();m_fps = map["fps"].toInt();m_audioBitrate = map["audioBitrate"].toInt();
}void ScreenRecordImpl::Start()
{if (m_state == RecordState::NotStarted){qDebug() << "start record";m_state = RecordState::Started;std::thread muxThread(&ScreenRecordImpl::MuxThreadProc, this);muxThread.detach();}else if (m_state == RecordState::Paused){qDebug() << "continue record";m_state = RecordState::Started;m_cvNotPause.notify_one();}
}void ScreenRecordImpl::Pause()
{qDebug() << "pause record";m_state = RecordState::Paused;
}void ScreenRecordImpl::Stop()
{qDebug() << "stop record";RecordState state = m_state;m_state = RecordState::Stopped;if (state == RecordState::Paused)m_cvNotPause.notify_one();
}int ScreenRecordImpl::OpenVideo()
{int ret = -1;AVInputFormat *ifmt = av_find_input_format("gdigrab");AVDictionary *options = nullptr;AVCodec *decoder = nullptr;av_dict_set(&options, "framerate", QString::number(m_fps).toStdString().c_str(), NULL);if (avformat_open_input(&m_vFmtCtx, "desktop", ifmt, &options) != 0){qDebug() << "Cant not open video input stream";return -1;}if (avformat_find_stream_info(m_vFmtCtx, nullptr) < 0){printf("Couldn't find stream information.(无法获取视频流信息)\n");return -1;}for (int i = 0; i < m_vFmtCtx->nb_streams; ++i){AVStream *stream = m_vFmtCtx->streams[i];if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){decoder = avcodec_find_decoder(stream->codecpar->codec_id);if (decoder == nullptr){printf("Codec not found.(没有找到解码器)\n");return -1;}//从视频流中拷贝参数到codecCtxm_vDecodeCtx = avcodec_alloc_context3(decoder);if ((ret = avcodec_parameters_to_context(m_vDecodeCtx, stream->codecpar)) < 0){qDebug() << "Video avcodec_parameters_to_context failed,error code: " << ret;return -1;}m_vIndex = i;break;}}if (avcodec_open2(m_vDecodeCtx, decoder, nullptr) < 0){printf("Could not open codec.(无法打开解码器)\n");return -1;}m_swsCtx = sws_getContext(m_vDecodeCtx->width, m_vDecodeCtx->height, m_vDecodeCtx->pix_fmt,m_width, m_height, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);return 0;
}static char *dup_wchar_to_utf8(wchar_t *w)
{char *s = NULL;int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);s = (char *)av_malloc(l);if (s)WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);return s;
}static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt)
{const enum AVSampleFormat *p = codec->sample_fmts;while (*p != AV_SAMPLE_FMT_NONE) {if (*p == sample_fmt)return 1;p++;}return 0;
}int ScreenRecordImpl::OpenAudio()
{int ret = -1;AVCodec *decoder = nullptr;qDebug() << GetMicrophoneDeviceName();AVInputFormat *ifmt = av_find_input_format("dshow");QString audioDeviceName = "audio=" + GetMicrophoneDeviceName();if (avformat_open_input(&m_aFmtCtx, audioDeviceName.toStdString().c_str(), ifmt, nullptr) < 0){qDebug() << "Can not open audio input stream";return -1;}if (avformat_find_stream_info(m_aFmtCtx, nullptr) < 0)return -1;for (int i = 0; i < m_aFmtCtx->nb_streams; ++i){AVStream * stream = m_aFmtCtx->streams[i];if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO){decoder = avcodec_find_decoder(stream->codecpar->codec_id);if (decoder == nullptr){printf("Codec not found.(没有找到解码器)\n");return -1;}//从视频流中拷贝参数到codecCtxm_aDecodeCtx = avcodec_alloc_context3(decoder);if ((ret = avcodec_parameters_to_context(m_aDecodeCtx, stream->codecpar)) < 0){qDebug() << "Audio avcodec_parameters_to_context failed,error code: " << ret;return -1;}m_aIndex = i;break;}}if (0 > avcodec_open2(m_aDecodeCtx, decoder, NULL)){printf("can not find or open audio decoder!\n");return -1;}return 0;
}int ScreenRecordImpl::OpenOutput()
{int ret = -1;AVStream *vStream = nullptr, *aStream = nullptr;const char *outFileName = "test.mp4";ret = avformat_alloc_output_context2(&m_oFmtCtx, nullptr, nullptr, outFileName);if (ret < 0){qDebug() << "avformat_alloc_output_context2 failed";return -1;}if (m_vFmtCtx->streams[m_vIndex]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){vStream = avformat_new_stream(m_oFmtCtx, nullptr);if (!vStream){printf("can not new stream for output!\n");return -1;}//AVFormatContext第一个创建的流索引是0,第二个创建的流索引是1m_vOutIndex = vStream->index;vStream->time_base = AVRational{ 1, m_fps };m_vEncodeCtx = avcodec_alloc_context3(NULL);if (nullptr == m_vEncodeCtx){qDebug() << "avcodec_alloc_context3 failed";return -1;}m_vEncodeCtx->width = m_width;m_vEncodeCtx->height = m_height;m_vEncodeCtx->codec_type = AVMEDIA_TYPE_VIDEO;m_vEncodeCtx->time_base.num = 1;m_vEncodeCtx->time_base.den = m_fps;m_vEncodeCtx->pix_fmt = AV_PIX_FMT_YUV420P;m_vEncodeCtx->codec_id = AV_CODEC_ID_H264;m_vEncodeCtx->bit_rate = 800 * 1000;m_vEncodeCtx->rc_max_rate = 800 * 1000;m_vEncodeCtx->rc_buffer_size = 500 * 1000;//设置图像组层的大小, gop_size越大,文件越小 m_vEncodeCtx->gop_size = 30;m_vEncodeCtx->max_b_frames = 3;//设置h264中相关的参数,不设置avcodec_open2会失败m_vEncodeCtx->qmin = 10;   //2m_vEncodeCtx->qmax = 31; //31m_vEncodeCtx->max_qdiff = 4;m_vEncodeCtx->me_range = 16;    //0 m_vEncodeCtx->max_qdiff = 4;    //3 m_vEncodeCtx->qcompress = 0.6;  //0.5//查找视频编码器AVCodec *encoder;encoder = avcodec_find_encoder(m_vEncodeCtx->codec_id);if (!encoder){qDebug() << "Can not find the encoder, id: " << m_vEncodeCtx->codec_id;return -1;}m_vEncodeCtx->codec_tag = 0;//正确设置sps/ppsm_vEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;//打开视频编码器ret = avcodec_open2(m_vEncodeCtx, encoder, nullptr);if (ret < 0){qDebug() << "Can not open encoder id: " << encoder->id << "error code: " << ret;return -1;}//将codecCtx中的参数传给输出流ret = avcodec_parameters_from_context(vStream->codecpar, m_vEncodeCtx);if (ret < 0){qDebug() << "Output avcodec_parameters_from_context,error code:" << ret;return -1;}}if (m_aFmtCtx->streams[m_aIndex]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO){aStream = avformat_new_stream(m_oFmtCtx, NULL);if (!aStream){printf("can not new audio stream for output!\n");return -1;}m_aOutIndex = aStream->index;AVCodec *encoder = avcodec_find_encoder(m_oFmtCtx->oformat->audio_codec);if (!encoder){qDebug() << "Can not find audio encoder, id: " << m_oFmtCtx->oformat->audio_codec;return -1;}m_aEncodeCtx = avcodec_alloc_context3(encoder);if (nullptr == m_vEncodeCtx){qDebug() << "audio avcodec_alloc_context3 failed";return -1;}m_aEncodeCtx->sample_fmt = encoder->sample_fmts ? encoder->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;m_aEncodeCtx->bit_rate = m_audioBitrate;m_aEncodeCtx->sample_rate = 44100;if (encoder->supported_samplerates) {m_aEncodeCtx->sample_rate = encoder->supported_samplerates[0];for (int i = 0; encoder->supported_samplerates[i]; ++i){if (encoder->supported_samplerates[i] == 44100)m_aEncodeCtx->sample_rate = 44100;}}m_aEncodeCtx->channels = av_get_channel_layout_nb_channels(m_aEncodeCtx->channel_layout);m_aEncodeCtx->channel_layout = AV_CH_LAYOUT_STEREO;if (encoder->channel_layouts) {m_aEncodeCtx->channel_layout = encoder->channel_layouts[0];for (int i = 0; encoder->channel_layouts[i]; ++i) {if (encoder->channel_layouts[i] == AV_CH_LAYOUT_STEREO)m_aEncodeCtx->channel_layout = AV_CH_LAYOUT_STEREO;}}m_aEncodeCtx->channels = av_get_channel_layout_nb_channels(m_aEncodeCtx->channel_layout);aStream->time_base = AVRational{ 1, m_aEncodeCtx->sample_rate };m_aEncodeCtx->codec_tag = 0;m_aEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;if (!check_sample_fmt(encoder, m_aEncodeCtx->sample_fmt)) {qDebug() << "Encoder does not support sample format " << av_get_sample_fmt_name(m_aEncodeCtx->sample_fmt);return -1;}//打开音频编码器,打开后frame_size被设置ret = avcodec_open2(m_aEncodeCtx, encoder, 0);if (ret < 0){qDebug() << "Can not open the audio encoder, id: " << encoder->id << "error code: " << ret;return -1;}//将codecCtx中的参数传给音频输出流ret = avcodec_parameters_from_context(aStream->codecpar, m_aEncodeCtx);if (ret < 0){qDebug() << "Output audio avcodec_parameters_from_context,error code:" << ret;return -1;}m_swrCtx = swr_alloc();if (!m_swrCtx){qDebug() << "swr_alloc failed";return -1;}av_opt_set_int(m_swrCtx, "in_channel_count", m_aDecodeCtx->channels, 0);   //2av_opt_set_int(m_swrCtx, "in_sample_rate", m_aDecodeCtx->sample_rate, 0);   //44100av_opt_set_sample_fmt(m_swrCtx, "in_sample_fmt", m_aDecodeCtx->sample_fmt, 0);  //AV_SAMPLE_FMT_S16av_opt_set_int(m_swrCtx, "out_channel_count", m_aEncodeCtx->channels, 0);   //2av_opt_set_int(m_swrCtx, "out_sample_rate", m_aEncodeCtx->sample_rate, 0);  //44100av_opt_set_sample_fmt(m_swrCtx, "out_sample_fmt", m_aEncodeCtx->sample_fmt, 0); //AV_SAMPLE_FMT_FLTPif ((ret = swr_init(m_swrCtx)) < 0) {qDebug() << "swr_init failed";return -1;}}//打开输出文件if (!(m_oFmtCtx->oformat->flags & AVFMT_NOFILE)){if (avio_open(&m_oFmtCtx->pb, outFileName, AVIO_FLAG_WRITE) < 0){printf("can not open output file handle!\n");return -1;}}//写文件头if (avformat_write_header(m_oFmtCtx, nullptr) < 0){printf("can not write the header of the output file!\n");return -1;}return 0;
}QString ScreenRecordImpl::GetSpeakerDeviceName()
{char sName[256] = { 0 };QString speaker = "";bool bRet = false;::CoInitialize(NULL);ICreateDevEnum* pCreateDevEnum;//enumrate all speaker devicesHRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum,NULL,CLSCTX_INPROC_SERVER,IID_ICreateDevEnum,(void**)&pCreateDevEnum);IEnumMoniker* pEm;hr = pCreateDevEnum->CreateClassEnumerator(CLSID_AudioRendererCategory, &pEm, 0);if (hr != NOERROR){::CoUninitialize();return "";}pEm->Reset();ULONG cFetched;IMoniker *pM;while (hr = pEm->Next(1, &pM, &cFetched), hr == S_OK){IPropertyBag* pBag = NULL;hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag);if (SUCCEEDED(hr)){VARIANT var;var.vt = VT_BSTR;hr = pBag->Read(L"FriendlyName", &var, NULL);//还有其他属性,像描述信息等等if (hr == NOERROR){//获取设备名称WideCharToMultiByte(CP_ACP, 0, var.bstrVal, -1, sName, 256, "", NULL);speaker = QString::fromLocal8Bit(sName);SysFreeString(var.bstrVal);}pBag->Release();}pM->Release();bRet = true;}pCreateDevEnum = NULL;pEm = NULL;::CoUninitialize();return speaker;
}QString ScreenRecordImpl::GetMicrophoneDeviceName()
{char sName[256] = { 0 };QString capture = "";bool bRet = false;::CoInitialize(NULL);ICreateDevEnum* pCreateDevEnum;//enumrate all audio capture devicesHRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum,NULL,CLSCTX_INPROC_SERVER,IID_ICreateDevEnum,(void**)&pCreateDevEnum);IEnumMoniker* pEm;hr = pCreateDevEnum->CreateClassEnumerator(CLSID_AudioInputDeviceCategory, &pEm, 0);if (hr != NOERROR){::CoUninitialize();return "";}pEm->Reset();ULONG cFetched;IMoniker *pM;while (hr = pEm->Next(1, &pM, &cFetched), hr == S_OK){IPropertyBag* pBag = NULL;hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag);if (SUCCEEDED(hr)){VARIANT var;var.vt = VT_BSTR;hr = pBag->Read(L"FriendlyName", &var, NULL);//还有其他属性,像描述信息等等if (hr == NOERROR){//获取设备名称WideCharToMultiByte(CP_ACP, 0, var.bstrVal, -1, sName, 256, "", NULL);capture = QString::fromLocal8Bit(sName);SysFreeString(var.bstrVal);}pBag->Release();}pM->Release();bRet = true;}pCreateDevEnum = NULL;pEm = NULL;::CoUninitialize();return capture;
}AVFrame* ScreenRecordImpl::AllocAudioFrame(AVCodecContext* c, int nbSamples)
{AVFrame *frame = av_frame_alloc();int ret;frame->format = c->sample_fmt;frame->channel_layout = c->channel_layout ? c->channel_layout: AV_CH_LAYOUT_STEREO;frame->sample_rate = c->sample_rate;frame->nb_samples = nbSamples;if (nbSamples){ret = av_frame_get_buffer(frame, 0);if (ret < 0) {qDebug() << "av_frame_get_buffer failed";return nullptr;}}return frame;
}void ScreenRecordImpl::InitVideoBuffer()
{m_vOutFrameSize = av_image_get_buffer_size(m_vEncodeCtx->pix_fmt, m_width, m_height, 1);m_vOutFrameBuf = (uint8_t *)av_malloc(m_vOutFrameSize);m_vOutFrame = av_frame_alloc();//先让AVFrame指针指向buf,后面再写入数据到bufav_image_fill_arrays(m_vOutFrame->data, m_vOutFrame->linesize, m_vOutFrameBuf, m_vEncodeCtx->pix_fmt, m_width, m_height, 1);//申请30帧缓存if (!(m_vFifoBuf = av_fifo_alloc_array(30, m_vOutFrameSize))){qDebug() << "av_fifo_alloc_array failed";return;}
}void ScreenRecordImpl::InitAudioBuffer()
{m_nbSamples = m_aEncodeCtx->frame_size;if (!m_nbSamples){qDebug() << "m_nbSamples==0";m_nbSamples = 1024;}m_aFifoBuf = av_audio_fifo_alloc(m_aEncodeCtx->sample_fmt, m_aEncodeCtx->channels, 30 * m_nbSamples);if (!m_aFifoBuf){qDebug() << "av_audio_fifo_alloc failed";return;}
}void ScreenRecordImpl::FlushVideoDecoder()
{int ret = -1;int y_size = m_width * m_height;AVFrame *oldFrame = av_frame_alloc();AVFrame *newFrame = av_frame_alloc();ret = avcodec_send_packet(m_vDecodeCtx, nullptr);if (ret != 0){qDebug() << "flush video avcodec_send_packet failed, ret: " << ret;return;}while (ret >= 0){ret = avcodec_receive_frame(m_vDecodeCtx, oldFrame);if (ret < 0){if (ret == AVERROR(EAGAIN)){qDebug() << "flush EAGAIN avcodec_receive_frame";ret = 1;continue;}else if (ret == AVERROR_EOF){qDebug() << "flush video decoder finished";break;}qDebug() << "flush video avcodec_receive_frame error, ret: " << ret;return;}++g_vCollectFrameCnt;sws_scale(m_swsCtx, (const uint8_t* const*)oldFrame->data, oldFrame->linesize, 0,m_vEncodeCtx->height, newFrame->data, newFrame->linesize);{unique_lock<mutex> lk(m_mtxVBuf);m_cvVBufNotFull.wait(lk, [this] { return av_fifo_space(m_vFifoBuf) >= m_vOutFrameSize; });}av_fifo_generic_write(m_vFifoBuf, newFrame->data[0], y_size, NULL);av_fifo_generic_write(m_vFifoBuf, newFrame->data[1], y_size / 4, NULL);av_fifo_generic_write(m_vFifoBuf, newFrame->data[2], y_size / 4, NULL);m_cvVBufNotEmpty.notify_one();}qDebug() << "video collect frame count: " << g_vCollectFrameCnt;
}//void ScreenRecordImpl::FlushVideoEncoder()
//{
//  int ret = -1;
//  AVPacket pkt = { 0 };
//  av_init_packet(&pkt);
//  ret = avcodec_send_frame(m_vEncodeCtx, nullptr);
//  qDebug() << "avcodec_send_frame ret:" << ret;
//  while (ret >= 0)
//  {
//      ret = avcodec_receive_packet(m_vEncodeCtx, &pkt);
//      if (ret < 0)
//      {
//          av_packet_unref(&pkt);
//          if (ret == AVERROR(EAGAIN))
//          {
//              qDebug() << "flush EAGAIN avcodec_receive_packet";
//              ret = 1;
//              continue;
//          }
//          else if (ret == AVERROR_EOF)
//          {
//              qDebug() << "flush video encoder finished";
//              break;
//          }
//          qDebug() << "flush video avcodec_receive_packet failed, ret: " << ret;
//          return;
//      }
//      pkt.stream_index = m_vOutIndex;
//      av_packet_rescale_ts(&pkt, m_vEncodeCtx->time_base, m_oFmtCtx->streams[m_vOutIndex]->time_base);
//
//      ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);
//      if (ret == 0)
//          qDebug() << "flush Write video packet id: " << ++g_vEncodeFrameCnt;
//      else
//          qDebug() << "video av_interleaved_write_frame failed, ret:" << ret;
//      av_free_packet(&pkt);
//  }
//}void ScreenRecordImpl::FlushAudioDecoder()
{int ret = -1;AVPacket pkt = { 0 };av_init_packet(&pkt);int dstNbSamples, maxDstNbSamples;AVFrame *rawFrame = av_frame_alloc();AVFrame *newFrame = AllocAudioFrame(m_aEncodeCtx, m_nbSamples);maxDstNbSamples = dstNbSamples = av_rescale_rnd(m_nbSamples,m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);ret = avcodec_send_packet(m_aDecodeCtx, nullptr);if (ret != 0){qDebug() << "flush audio avcodec_send_packet  failed, ret: " << ret;return;}while (ret >= 0){ret = avcodec_receive_frame(m_aDecodeCtx, rawFrame);if (ret < 0){if (ret == AVERROR(EAGAIN)){qDebug() << "flush audio EAGAIN avcodec_receive_frame";ret = 1;continue;}else if (ret == AVERROR_EOF){qDebug() << "flush audio decoder finished";break;}qDebug() << "flush audio avcodec_receive_frame error, ret: " << ret;return;}++g_aCollectFrameCnt;dstNbSamples = av_rescale_rnd(swr_get_delay(m_swrCtx, m_aDecodeCtx->sample_rate) + rawFrame->nb_samples,m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);if (dstNbSamples > maxDstNbSamples){qDebug() << "flush audio newFrame realloc";av_freep(&newFrame->data[0]);ret = av_samples_alloc(newFrame->data, newFrame->linesize, m_aEncodeCtx->channels,dstNbSamples, m_aEncodeCtx->sample_fmt, 1);if (ret < 0){qDebug() << "flush av_samples_alloc failed";return;}maxDstNbSamples = dstNbSamples;m_aEncodeCtx->frame_size = dstNbSamples;m_nbSamples = newFrame->nb_samples;}newFrame->nb_samples = swr_convert(m_swrCtx, newFrame->data, dstNbSamples,(const uint8_t **)rawFrame->data, rawFrame->nb_samples);if (newFrame->nb_samples < 0){qDebug() << "flush swr_convert failed";return;}{unique_lock<mutex> lk(m_mtxABuf);m_cvABufNotFull.wait(lk, [newFrame, this] { return av_audio_fifo_space(m_aFifoBuf) >= newFrame->nb_samples; });}if (av_audio_fifo_write(m_aFifoBuf, (void **)newFrame->data, newFrame->nb_samples) < newFrame->nb_samples){qDebug() << "av_audio_fifo_write";return;}m_cvABufNotEmpty.notify_one();}qDebug() << "audio collect frame count: " << g_aCollectFrameCnt;
}//void ScreenRecordImpl::FlushAudioEncoder()
//{
//}void ScreenRecordImpl::FlushEncoders()
{int ret = -1;bool vBeginFlush = false;bool aBeginFlush = false;m_vCurPts = m_aCurPts = 0;int nFlush = 2;while (1){AVPacket pkt = { 0 };av_init_packet(&pkt);if (av_compare_ts(m_vCurPts, m_oFmtCtx->streams[m_vOutIndex]->time_base,m_aCurPts, m_oFmtCtx->streams[m_aOutIndex]->time_base) <= 0){if (!vBeginFlush){vBeginFlush = true;ret = avcodec_send_frame(m_vEncodeCtx, nullptr);if (ret != 0){qDebug() << "flush video avcodec_send_frame failed, ret: " << ret;return;}}ret = avcodec_receive_packet(m_vEncodeCtx, &pkt);if (ret < 0){av_packet_unref(&pkt);if (ret == AVERROR(EAGAIN)){qDebug() << "flush video EAGAIN avcodec_receive_packet";ret = 1;continue;}else if (ret == AVERROR_EOF){qDebug() << "flush video encoder finished";//break;if (!(--nFlush))break;m_vCurPts = INT_MAX;continue;}qDebug() << "flush video avcodec_receive_packet failed, ret: " << ret;return;}pkt.stream_index = m_vOutIndex;//将pts从编码层的timebase转成复用层的timebaseav_packet_rescale_ts(&pkt, m_vEncodeCtx->time_base, m_oFmtCtx->streams[m_vOutIndex]->time_base);m_vCurPts = pkt.pts;qDebug() << "m_vCurPts: " << m_vCurPts;ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);if (ret == 0)qDebug() << "flush Write video packet id: " << ++g_vEncodeFrameCnt;elseqDebug() << "flush video av_interleaved_write_frame failed, ret:" << ret;av_free_packet(&pkt);}else{if (!aBeginFlush){aBeginFlush = true;ret = avcodec_send_frame(m_aEncodeCtx, nullptr);if (ret != 0){qDebug() << "flush audio avcodec_send_frame failed, ret: " << ret;return;}}ret = avcodec_receive_packet(m_aEncodeCtx, &pkt);if (ret < 0){av_packet_unref(&pkt);if (ret == AVERROR(EAGAIN)){qDebug() << "flush EAGAIN avcodec_receive_packet";ret = 1;continue;}else if (ret == AVERROR_EOF){qDebug() << "flush audio encoder finished";/*break;*/if (!(--nFlush))break;m_aCurPts = INT_MAX;continue;}qDebug() << "flush audio avcodec_receive_packet failed, ret: " << ret;return;}pkt.stream_index = m_aOutIndex;//将pts从编码层的timebase转成复用层的timebaseav_packet_rescale_ts(&pkt, m_aEncodeCtx->time_base, m_oFmtCtx->streams[m_aOutIndex]->time_base);m_aCurPts = pkt.pts;qDebug() << "m_aCurPts: " << m_aCurPts;ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);if (ret == 0)qDebug() << "flush write audio packet id: " << ++g_aEncodeFrameCnt;elseqDebug() << "flush audio av_interleaved_write_frame failed, ret: " << ret;av_free_packet(&pkt);}}
}void ScreenRecordImpl::Release()
{if (m_vOutFrame){av_frame_free(&m_vOutFrame);m_vOutFrame = nullptr;}if (m_vOutFrameBuf){av_free(m_vOutFrameBuf);m_vOutFrameBuf = nullptr;}if (m_oFmtCtx){avio_close(m_oFmtCtx->pb);avformat_free_context(m_oFmtCtx);m_oFmtCtx = nullptr;}//if (m_vDecodeCtx)//{//  // FIXME: 为什么这里会崩溃//  avcodec_free_context(&m_vDecodeCtx);//  m_vDecodeCtx = nullptr;//}if (m_aDecodeCtx){avcodec_free_context(&m_aDecodeCtx);m_aDecodeCtx = nullptr;}if (m_vEncodeCtx){avcodec_free_context(&m_vEncodeCtx);m_vEncodeCtx = nullptr;}if (m_aEncodeCtx){avcodec_free_context(&m_aEncodeCtx);m_aEncodeCtx = nullptr;}if (m_vFifoBuf){av_fifo_freep(&m_vFifoBuf);m_vFifoBuf = nullptr;}if (m_aFifoBuf){av_audio_fifo_free(m_aFifoBuf);m_aFifoBuf = nullptr;}if (m_vFmtCtx){avformat_close_input(&m_vFmtCtx);m_vFmtCtx = nullptr;}if (m_aFmtCtx){avformat_close_input(&m_aFmtCtx);m_aFmtCtx = nullptr;}
}void ScreenRecordImpl::MuxThreadProc()
{int ret = -1;bool done = false;int vFrameIndex = 0, aFrameIndex = 0;av_register_all();avdevice_register_all();avcodec_register_all();if (OpenVideo() < 0)return;if (OpenAudio() < 0)return;if (OpenOutput() < 0)return;InitVideoBuffer();InitAudioBuffer();//启动音视频数据采集线程std::thread screenRecord(&ScreenRecordImpl::ScreenRecordThreadProc, this);std::thread soundRecord(&ScreenRecordImpl::SoundRecordThreadProc, this);screenRecord.detach();soundRecord.detach();while (1){if (m_state == RecordState::Stopped && !done)done = true;if (done){unique_lock<mutex> vBufLock(m_mtxVBuf, std::defer_lock);unique_lock<mutex> aBufLock(m_mtxABuf, std::defer_lock);std::lock(vBufLock, aBufLock);if (av_fifo_size(m_vFifoBuf) < m_vOutFrameSize &&av_audio_fifo_size(m_aFifoBuf) < m_nbSamples){qDebug() << "both video and audio fifo buf are empty, break";break;}}if (av_compare_ts(m_vCurPts, m_oFmtCtx->streams[m_vOutIndex]->time_base,m_aCurPts, m_oFmtCtx->streams[m_aOutIndex]->time_base) <= 0)/*   if (av_compare_ts(vCurPts, m_vEncodeCtx->time_base,aCurPts, m_aEncodeCtx->time_base) <= 0)*/{if (done){lock_guard<mutex> lk(m_mtxVBuf);if (av_fifo_size(m_vFifoBuf) < m_vOutFrameSize){qDebug() << "video wirte done";//break;//m_vCurPts = 0x7ffffffffffffffe; //int64_t最大有符号整数m_vCurPts = INT_MAX;continue;}}else {unique_lock<mutex> lk(m_mtxVBuf);m_cvVBufNotEmpty.wait(lk, [this] { return av_fifo_size(m_vFifoBuf) >= m_vOutFrameSize; });}av_fifo_generic_read(m_vFifoBuf, m_vOutFrameBuf, m_vOutFrameSize, NULL);m_cvVBufNotFull.notify_one();//设置视频帧参数//m_vOutFrame->pts = vFrameIndex * ((m_oFmtCtx->streams[m_vOutIndex]->time_base.den / m_oFmtCtx->streams[m_vOutIndex]->time_base.num) / m_fps);m_vOutFrame->pts = vFrameIndex++;m_vOutFrame->format = m_vEncodeCtx->pix_fmt;m_vOutFrame->width = m_vEncodeCtx->width;m_vOutFrame->height = m_vEncodeCtx->height;AVPacket pkt = { 0 };av_init_packet(&pkt);ret = avcodec_send_frame(m_vEncodeCtx, m_vOutFrame);if (ret != 0){qDebug() << "video avcodec_send_frame failed, ret: " << ret;av_packet_unref(&pkt);continue;}ret = avcodec_receive_packet(m_vEncodeCtx, &pkt);if (ret != 0){qDebug() << "video avcodec_receive_packet failed, ret: " << ret;av_packet_unref(&pkt);continue;}pkt.stream_index = m_vOutIndex;//将pts从编码层的timebase转成复用层的timebaseav_packet_rescale_ts(&pkt, m_vEncodeCtx->time_base, m_oFmtCtx->streams[m_vOutIndex]->time_base);m_vCurPts = pkt.pts;qDebug() << "m_vCurPts: " << m_vCurPts;ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);if (ret == 0)qDebug() << "Write video packet id: " << ++g_vEncodeFrameCnt;elseqDebug() << "video av_interleaved_write_frame failed, ret:" << ret;av_free_packet(&pkt);}else{if (done){lock_guard<mutex> lk(m_mtxABuf);if (av_audio_fifo_size(m_aFifoBuf) < m_nbSamples){qDebug() << "audio write done";//m_aCurPts = 0x7fffffffffffffff;m_aCurPts = INT_MAX;continue;}}else{unique_lock<mutex> lk(m_mtxABuf);m_cvABufNotEmpty.wait(lk, [this] { return av_audio_fifo_size(m_aFifoBuf) >= m_nbSamples; });}int ret = -1;AVFrame *aFrame = av_frame_alloc();aFrame->nb_samples = m_nbSamples;aFrame->channel_layout = m_aEncodeCtx->channel_layout;aFrame->format = m_aEncodeCtx->sample_fmt;aFrame->sample_rate = m_aEncodeCtx->sample_rate;aFrame->pts = m_nbSamples * aFrameIndex++;//分配data bufret = av_frame_get_buffer(aFrame, 0);av_audio_fifo_read(m_aFifoBuf, (void **)aFrame->data, m_nbSamples);m_cvABufNotFull.notify_one();AVPacket pkt = { 0 };av_init_packet(&pkt);ret = avcodec_send_frame(m_aEncodeCtx, aFrame);if (ret != 0){qDebug() << "audio avcodec_send_frame failed, ret: " << ret;av_frame_free(&aFrame);av_packet_unref(&pkt);continue;}ret = avcodec_receive_packet(m_aEncodeCtx, &pkt);if (ret != 0){qDebug() << "audio avcodec_receive_packet failed, ret: " << ret;av_frame_free(&aFrame);av_packet_unref(&pkt);continue;}pkt.stream_index = m_aOutIndex;av_packet_rescale_ts(&pkt, m_aEncodeCtx->time_base, m_oFmtCtx->streams[m_aOutIndex]->time_base);m_aCurPts = pkt.pts;qDebug() << "aCurPts: " << m_aCurPts;ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);if (ret == 0)qDebug() << "Write audio packet id: " << ++g_aEncodeFrameCnt;elseqDebug() << "audio av_interleaved_write_frame failed, ret: " << ret;av_frame_free(&aFrame);av_free_packet(&pkt);}}FlushEncoders();av_write_trailer(m_oFmtCtx);Release();qDebug() << "parent thread exit";
}void ScreenRecordImpl::ScreenRecordThreadProc()
{int ret = -1;AVPacket pkt = { 0 };av_init_packet(&pkt);int y_size = m_width * m_height;AVFrame  *oldFrame = av_frame_alloc();AVFrame *newFrame = av_frame_alloc();int newFrameBufSize = av_image_get_buffer_size(m_vEncodeCtx->pix_fmt, m_width, m_height, 1);uint8_t *newFrameBuf = (uint8_t*)av_malloc(newFrameBufSize);av_image_fill_arrays(newFrame->data, newFrame->linesize, newFrameBuf,m_vEncodeCtx->pix_fmt, m_width, m_height, 1);while (m_state != RecordState::Stopped){if (m_state == RecordState::Paused){unique_lock<mutex> lk(m_mtxPause);m_cvNotPause.wait(lk, [this] { return m_state != RecordState::Paused; });}if (av_read_frame(m_vFmtCtx, &pkt) < 0){qDebug() << "video av_read_frame < 0";continue;}if (pkt.stream_index != m_vIndex){qDebug() << "not a video packet from video input";av_packet_unref(&pkt);}ret = avcodec_send_packet(m_vDecodeCtx, &pkt);if (ret != 0){qDebug() << "video avcodec_send_packet failed, ret:" << ret;av_packet_unref(&pkt);continue;}ret = avcodec_receive_frame(m_vDecodeCtx, oldFrame);if (ret != 0){qDebug() << "video avcodec_receive_frame failed, ret:" << ret;av_packet_unref(&pkt);continue;}++g_vCollectFrameCnt;sws_scale(m_swsCtx, (const uint8_t* const*)oldFrame->data, oldFrame->linesize, 0,m_vEncodeCtx->height, newFrame->data, newFrame->linesize);{unique_lock<mutex> lk(m_mtxVBuf);m_cvVBufNotFull.wait(lk, [this] { return av_fifo_space(m_vFifoBuf) >= m_vOutFrameSize; });}av_fifo_generic_write(m_vFifoBuf, newFrame->data[0], y_size, NULL);av_fifo_generic_write(m_vFifoBuf, newFrame->data[1], y_size / 4, NULL);av_fifo_generic_write(m_vFifoBuf, newFrame->data[2], y_size / 4, NULL);m_cvVBufNotEmpty.notify_one();av_packet_unref(&pkt);}FlushVideoDecoder();av_free(newFrameBuf);av_frame_free(&oldFrame);av_frame_free(&newFrame);qDebug() << "screen record thread exit";
}void ScreenRecordImpl::SoundRecordThreadProc()
{int ret = -1;AVPacket pkt = { 0 };av_init_packet(&pkt);int nbSamples = m_nbSamples;int dstNbSamples, maxDstNbSamples;AVFrame *rawFrame = av_frame_alloc();AVFrame *newFrame = AllocAudioFrame(m_aEncodeCtx, nbSamples);maxDstNbSamples = dstNbSamples = av_rescale_rnd(nbSamples, m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);while (m_state != RecordState::Stopped){if (m_state == RecordState::Paused){unique_lock<mutex> lk(m_mtxPause);m_cvNotPause.wait(lk, [this] { return m_state != RecordState::Paused; });}if (av_read_frame(m_aFmtCtx, &pkt) < 0){qDebug() << "audio av_read_frame < 0";continue;}if (pkt.stream_index != m_aIndex){qDebug() << "not a audio packet";av_packet_unref(&pkt);continue;}ret = avcodec_send_packet(m_aDecodeCtx, &pkt);if (ret != 0){qDebug() << "audio avcodec_send_packet failed, ret: " << ret;av_packet_unref(&pkt);continue;}ret = avcodec_receive_frame(m_aDecodeCtx, rawFrame);if (ret != 0){qDebug() << "audio avcodec_receive_frame failed, ret: " << ret;av_packet_unref(&pkt);continue;}++g_aCollectFrameCnt;dstNbSamples = av_rescale_rnd(swr_get_delay(m_swrCtx, m_aDecodeCtx->sample_rate) + rawFrame->nb_samples,m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);if (dstNbSamples > maxDstNbSamples) {qDebug() << "audio newFrame realloc";av_freep(&newFrame->data[0]);//nb_samples*nb_channels*Bytes_sample_fmtret = av_samples_alloc(newFrame->data, newFrame->linesize, m_aEncodeCtx->channels,dstNbSamples, m_aEncodeCtx->sample_fmt, 1);if (ret < 0){qDebug() << "av_samples_alloc failed";return;}maxDstNbSamples = dstNbSamples;m_aEncodeCtx->frame_size = dstNbSamples;m_nbSamples = newFrame->nb_samples;  //1024/** m_nbSamples = dstNbSamples;      //22050* 如果改为m_nbSamples = dstNbSamples;则av_audio_fifo_write会异常,不明白为什么?* 我觉得应该改为22050,不然编码线程一次编码的帧sample太少了,* 但是用1024生成的音频好像没问题?* 音频是否应该根据采集的nb_samples而重新分配fifo?*/}newFrame->nb_samples = swr_convert(m_swrCtx, newFrame->data, dstNbSamples,(const uint8_t **)rawFrame->data, rawFrame->nb_samples);if (newFrame->nb_samples < 0){qDebug() << "swr_convert error";return;}{unique_lock<mutex> lk(m_mtxABuf);m_cvABufNotFull.wait(lk, [newFrame, this] { return av_audio_fifo_space(m_aFifoBuf) >= newFrame->nb_samples; });}if (av_audio_fifo_write(m_aFifoBuf, (void **)newFrame->data, newFrame->nb_samples) < newFrame->nb_samples){qDebug() << "av_audio_fifo_write";return;}m_cvABufNotEmpty.notify_one();}FlushAudioDecoder();av_frame_free(&rawFrame);av_frame_free(&newFrame);qDebug() << "sound record thread exit";
}

ScreenRecordTest.h

#pragma once
#include <QObject>
#include <QVariant>class ScreenRecord : public QObject
{Q_OBJECT
public:ScreenRecord(QObject *parent = Q_NULLPTR);private:QVariantMap m_args;
};

ScreenRecordTest.cpp

#include "ScreenRecordTest.h"
#include "ScreenRecordImpl.h"
#include <QTimer>ScreenRecord::ScreenRecord(QObject *parent) :QObject(parent)
{ScreenRecordImpl *sr = new ScreenRecordImpl(this);QVariantMap args;args["filePath"] = "test.mp4";//args["width"] = 1920;//args["height"] = 1080;args["width"] = 1440;args["height"] = 900;args["fps"] = 30;args["audioBitrate"] = 128000;sr->Init(args);QTimer::singleShot(1000, sr, SLOT(Start()));//QTimer::singleShot(5000, sr, SLOT(Pause()));QTimer::singleShot(11000, sr, SLOT(Stop()));
}

Qt+FFmpeg录屏录音相关推荐

  1. ffmpeg 获取帧率_项目实战:Qt+FFmpeg录屏应用(支持帧率、清晰度设置)

    若该文为原创文章,转载请注明原文出处 本文章博客地址:https://blog.csdn.net/qq21497936/article/details/109827936 各位读者,知识无穷而人力有穷 ...

  2. win10使用FFmpeg录屏/录音

    安装FFmpeg 下载FFmepg 解压缩后将文件中的bin文件夹路径添加到系统路径中 如果在cmd中能够使用FFmpeg命令则安装成功 安装screen-capture-recorder 下载scr ...

  3. Qt+FFmpeg录屏

    欢迎加QQ学习交流群309798848 源码: https://github.com/onlyet/ScreenCapture NanaRecorder 之前的录屏项目ScreenCapture存在音 ...

  4. Qt C++ 录屏录音功能实现(avilib+ffmpeg)以及动态库生成

    实现一个录屏+录音的功能且需要快速开发,Qt无疑是一个非常好的选择.他有丰富的类库和接口可以很好的满足开发需求. 完整demo代码在文章最下方的百度网盘链接中,有需要的各位可以随意下载. 录屏部分 录 ...

  5. ffmpeg录屏鼠标闪烁问题解决方法

    场景: 平台:win7 x64.显卡很差的电脑,使用ffmpeg录屏,鼠标闪烁的几乎看不到.录屏实现方法是使用ffmpeg,源采用的是gdigrab. 解决方法: 安装screen capture r ...

  6. 桌面录屏录音技术,录屏软件原理

    要实现MP4录屏功能,有很多种方法. 方法一: 可以借助于ffmpeg工具.基于以下命令: 录制视频 ffmpeg -f dshow -i video="screen-capture-rec ...

  7. FFmpeg录屏和常用命令总结(不录制声音 Windows平台)

    一.ffmpeg录屏 1.使用dshow录屏 (1)下载安装dshow软件,下载地址dshow下载地址 (2)检查dshow是否安装成功 ffmpeg -list_devices true -f ds ...

  8. javafx实现桌面录屏录音软件

    系列文章专栏:javafx图形绘制.桌面录屏录音源码合集 目录 一.实现的效果

  9. Qt+ffmpeg+avilib实现录屏录音的功能(包含合成)

    骤: 1.录屏 思路:子线程进行截屏的方式进行录制,再使用avilib将截取到的图片保存为视频文件. 参考文章:avilib库的使用 - Ron's个人页面 - OSCHINA - 中文开源技术交流社 ...

最新文章

  1. 物理学走到尽头了吗?
  2. 很经典的117句惊世良言
  3. 自动化测试pythontest_自动化测试教程-Pytest的基本应用
  4. 智联招聘爬虫源码分析(一)
  5. Delphi XE7 Update1修正列表
  6. lnp和mysql分开安装_毕业设计之LNP+DISCUZ +分离的数据库操作
  7. git入门_绝对入门的Git
  8. 吉大20春学期计算机系统结构在线作业一,吉大20春学期《计算机原理及系统结构》在线作业一【奥鹏百分答案】...
  9. FFmpeg拼接文件常见问题
  10. 常用电脑端口作用大曝光
  11. eBay月入五万以上大卖家请进,解决你的提现和结汇问题!
  12. 18c新特性——PDB snapshot Carousel 快照轮转
  13. 高并发下接口幂等性解决方案
  14. 一文搞定计算机网络面试题
  15. python语言表白超炫图形_经验分享 篇二:三分钟教你用Excel制作各种尺寸、底色的证件照...
  16. 使用Packer在Winodws VMware Workstation Pro上自动部署Windows Server 2016中文版
  17. centos下安装看门狗的方法
  18. java 私有类_Java类属性的私有化
  19. 爬虫实战系列!淘宝店铺各品牌手机售卖信息爬取及可视化!
  20. Apache Hue基础知识大全

热门文章

  1. 果园游戏农场种植游戏小程序APP
  2. AutowareAuto泊车案例复现及原理简介
  3. i5 9400f和i9 9900kf 参数对比哪个好
  4. 50款国外后期草地素材t
  5. 一文教你挖掘用户评论典型意见
  6. 用Java帮我写一个手机投屏到电视上的应用
  7. 数据安全--28--数据安全管理之安全审计
  8. excel平均值公式_你知道吗?Word中也可以运用公式进行计算
  9. 虚拟企业:21世纪企业管理新模式
  10. 建站宝盒——最适合中小企业的免费建站工具