diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Private/AgoraBlueprintPlugin/AgoraBPuRtcEngine.cpp b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Private/AgoraBlueprintPlugin/AgoraBPuRtcEngine.cpp index 427d6165..0eee32bf 100644 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Private/AgoraBlueprintPlugin/AgoraBPuRtcEngine.cpp +++ b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Private/AgoraBlueprintPlugin/AgoraBPuRtcEngine.cpp @@ -2439,7 +2439,7 @@ int UAgoraBPuRtcEngine::StartScreenCaptureByWindowId(int64 windowId, const FRect #if defined(_WIN32) || (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE) agora::rtc::Rectangle rectangle = regionRect.CreateAgoraData(); agora::rtc::ScreenCaptureParameters screenCaptureParameters = captureParams.CreateAgoraData(); - auto ret = AgoraUERtcEngine::Get()->startScreenCaptureByWindowId(UABT::ToView(windowId), rectangle, screenCaptureParameters); + auto ret = AgoraUERtcEngine::Get()->startScreenCaptureByWindowId(windowId, rectangle, screenCaptureParameters); regionRect.FreeAgoraData(rectangle); captureParams.FreeAgoraData(screenCaptureParameters); diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Private/AgoraCppPlugin/AgoraUERTCEngine.cpp b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Private/AgoraCppPlugin/AgoraUERTCEngine.cpp index 045aeec7..3335f650 100644 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Private/AgoraCppPlugin/AgoraUERTCEngine.cpp +++ b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Private/AgoraCppPlugin/AgoraUERTCEngine.cpp @@ -33,7 +33,6 @@ namespace agora { AgoraUERtcEngine* AgoraUERtcEngine::Instance = nullptr; std::mutex AgoraUERtcEngine::MutexLock; - AgoraAppType AgoraUERtcEngine::RtcEngineAppType = AgoraAppType::kAppTypeUnreal; AgoraUERtcEngine* AgoraUERtcEngine::Get() @@ -525,6 +524,14 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } + int AgoraUERtcEngine::setFilterEffectOptions(bool enabled, const FilterEffectOptions& options, agora::media::MEDIA_SOURCE_TYPE type /*= agora::media::PRIMARY_CAMERA_SOURCE*/) + { + if (RtcEngine != nullptr) { + return RtcEngine->setFilterEffectOptions(enabled, options, type); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + int AgoraUERtcEngine::setLowlightEnhanceOptions(bool enabled, agora::rtc::LowlightEnhanceOptions const& options, agora::media::MEDIA_SOURCE_TYPE type) { if (RtcEngine != nullptr) { return RtcEngine->setLowlightEnhanceOptions(enabled, options, type); @@ -663,56 +670,55 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - - int AgoraUERtcEngine::setSubscribeAudioBlocklist(uid_t* uidList, int uidNumber) - { + int AgoraUERtcEngine::muteRemoteVideoStream(agora::rtc::uid_t uid, bool mute) { if (RtcEngine != nullptr) { - return RtcEngine->setSubscribeAudioBlocklist(uidList, uidNumber); + return RtcEngine->muteRemoteVideoStream(uid, mute); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setSubscribeAudioAllowlist(uid_t* uidList, int uidNumber) - { + int AgoraUERtcEngine::setRemoteVideoStreamType(agora::rtc::uid_t uid, agora::rtc::VIDEO_STREAM_TYPE streamType) { if (RtcEngine != nullptr) { - return RtcEngine->setSubscribeAudioAllowlist(uidList, uidNumber); + return RtcEngine->setRemoteVideoStreamType(uid, streamType); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setSubscribeVideoBlocklist(uid_t* uidList, int uidNumber) - { + int AgoraUERtcEngine::setRemoteVideoSubscriptionOptions(agora::rtc::uid_t uid, agora::rtc::VideoSubscriptionOptions const& options) { if (RtcEngine != nullptr) { - return RtcEngine->setSubscribeVideoBlocklist(uidList, uidNumber); + return RtcEngine->setRemoteVideoSubscriptionOptions(uid, options); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setSubscribeVideoAllowlist(uid_t* uidList, int uidNumber) + int AgoraUERtcEngine::setSubscribeAudioBlocklist(uid_t* uidList, int uidNumber) { if (RtcEngine != nullptr) { - return RtcEngine->setSubscribeVideoAllowlist(uidList, uidNumber); + return RtcEngine->setSubscribeAudioBlocklist(uidList, uidNumber); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::muteRemoteVideoStream(agora::rtc::uid_t uid, bool mute) { + int AgoraUERtcEngine::setSubscribeAudioAllowlist(uid_t* uidList, int uidNumber) + { if (RtcEngine != nullptr) { - return RtcEngine->muteRemoteVideoStream(uid, mute); + return RtcEngine->setSubscribeAudioAllowlist(uidList, uidNumber); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setRemoteVideoStreamType(agora::rtc::uid_t uid, agora::rtc::VIDEO_STREAM_TYPE streamType) { + int AgoraUERtcEngine::setSubscribeVideoBlocklist(uid_t* uidList, int uidNumber) + { if (RtcEngine != nullptr) { - return RtcEngine->setRemoteVideoStreamType(uid, streamType); + return RtcEngine->setSubscribeVideoBlocklist(uidList, uidNumber); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setRemoteVideoSubscriptionOptions(agora::rtc::uid_t uid, agora::rtc::VideoSubscriptionOptions const& options) { + int AgoraUERtcEngine::setSubscribeVideoAllowlist(uid_t* uidList, int uidNumber) + { if (RtcEngine != nullptr) { - return RtcEngine->setRemoteVideoSubscriptionOptions(uid, options); + return RtcEngine->setSubscribeVideoAllowlist(uidList, uidNumber); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } @@ -1248,6 +1254,22 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } + int AgoraUERtcEngine::setLocalRenderTargetFps(VIDEO_SOURCE_TYPE sourceType, int targetFps) + { + if (RtcEngine != nullptr) { + return RtcEngine->setLocalRenderTargetFps(sourceType, targetFps); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::setRemoteRenderTargetFps(int targetFps) + { + if (RtcEngine != nullptr) { + return RtcEngine->setRemoteRenderTargetFps(targetFps); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + int AgoraUERtcEngine::setLocalRenderMode(media::base::RENDER_MODE_TYPE renderMode) { if (RtcEngine != nullptr) { return RtcEngine->setLocalRenderMode(renderMode); @@ -1301,22 +1323,6 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setEarMonitoringAudioFrameParameters(int sampleRate, int channel, RAW_AUDIO_FRAME_OP_MODE_TYPE mode, int samplesPerCall) - { - if (RtcEngine != nullptr) { - return RtcEngine->setEarMonitoringAudioFrameParameters(sampleRate, channel, mode, samplesPerCall); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::registerExtension(const char* provider, const char* extension, agora::media::MEDIA_SOURCE_TYPE type /*= agora::media::UNKNOWN_MEDIA_SOURCE*/) - { - if (RtcEngine != nullptr) { - return RtcEngine->registerExtension(provider, extension, type); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - int AgoraUERtcEngine::enableCustomAudioLocalPlayback(track_id_t trackId, bool enabled) { @@ -1347,6 +1353,14 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } + int AgoraUERtcEngine::setEarMonitoringAudioFrameParameters(int sampleRate, int channel, RAW_AUDIO_FRAME_OP_MODE_TYPE mode, int samplesPerCall) + { + if (RtcEngine != nullptr) { + return RtcEngine->setEarMonitoringAudioFrameParameters(sampleRate, channel, mode, samplesPerCall); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + int AgoraUERtcEngine::setPlaybackAudioFrameBeforeMixingParameters(int sampleRate, int channel) { if (RtcEngine != nullptr) { return RtcEngine->setPlaybackAudioFrameBeforeMixingParameters(sampleRate, channel); @@ -1434,6 +1448,30 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } + int AgoraUERtcEngine::enableExtension(const char* provider, const char* extension, const ExtensionInfo& extensionInfo, bool enable /*= true*/) + { + if (RtcEngine != nullptr) { + return RtcEngine->enableExtension(provider, extension, extensionInfo, enable); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::setExtensionProperty(const char* provider, const char* extension, const ExtensionInfo& extensionInfo, const char* key, const char* value) + { + if (RtcEngine != nullptr) { + return RtcEngine->setExtensionProperty(provider, extension, extensionInfo, key, value); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::getExtensionProperty(const char* provider, const char* extension, const ExtensionInfo& extensionInfo, const char* key, char* value, int buf_len) + { + if (RtcEngine != nullptr) { + return RtcEngine->getExtensionProperty(provider, extension, extensionInfo, key, value, buf_len); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + int AgoraUERtcEngine::enableLoopbackRecording(bool enabled, char const* deviceName) { if (RtcEngine != nullptr) { return RtcEngine->enableLoopbackRecording(enabled, deviceName); @@ -1483,23 +1521,21 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::enableExtension(char const* provider, char const* extension, bool enable, agora::media::MEDIA_SOURCE_TYPE type) { + int AgoraUERtcEngine::registerExtension(const char* provider, const char* extension, agora::media::MEDIA_SOURCE_TYPE type /*= agora::media::UNKNOWN_MEDIA_SOURCE*/) + { if (RtcEngine != nullptr) { - return RtcEngine->enableExtension(provider, extension, enable, type); + return RtcEngine->registerExtension(provider, extension, type); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::enableExtension(const char* provider, const char* extension, const ExtensionInfo& extensionInfo, bool enable /*= true*/) - { + int AgoraUERtcEngine::enableExtension(char const* provider, char const* extension, bool enable, agora::media::MEDIA_SOURCE_TYPE type) { if (RtcEngine != nullptr) { - return RtcEngine->enableExtension(provider, extension, extensionInfo, enable); + return RtcEngine->enableExtension(provider, extension, enable, type); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - - int AgoraUERtcEngine::setExtensionProperty(const char* provider, const char* extension, const char* key, const char* value, agora::media::MEDIA_SOURCE_TYPE type /*= agora::media::UNKNOWN_MEDIA_SOURCE*/) { if (RtcEngine != nullptr) { @@ -1508,24 +1544,6 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - - int AgoraUERtcEngine::setExtensionProperty(const char* provider, const char* extension, const ExtensionInfo& extensionInfo, const char* key, const char* value) - { - if (RtcEngine != nullptr) { - return RtcEngine->setExtensionProperty(provider, extension, extensionInfo, key, value); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::getExtensionProperty(const char* provider, const char* extension, const ExtensionInfo& extensionInfo, const char* key, char* value, int buf_len) - { - if (RtcEngine != nullptr) { - return RtcEngine->getExtensionProperty(provider, extension, extensionInfo, key, value, buf_len); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::getExtensionProperty(const char* provider, const char* extension, const char* key, char* value, int buf_len, agora::media::MEDIA_SOURCE_TYPE type /*= agora::media::UNKNOWN_MEDIA_SOURCE*/) { if (RtcEngine != nullptr) { @@ -1779,7 +1797,7 @@ namespace agora { } #endif #if defined(_WIN32) || (defined(__APPLE__) && !TARGET_OS_IPHONE && TARGET_OS_MAC) - int AgoraUERtcEngine::startScreenCaptureByDisplayId(uint32_t displayId, Rectangle const& regionRect, ScreenCaptureParameters const& captureParams) { + int AgoraUERtcEngine::startScreenCaptureByDisplayId(int64_t displayId, Rectangle const& regionRect, ScreenCaptureParameters const& captureParams) { if (RtcEngine != nullptr) { return RtcEngine->startScreenCaptureByDisplayId(displayId, regionRect, captureParams); } @@ -1804,7 +1822,7 @@ namespace agora { } #endif #if defined(_WIN32) || (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE) - int AgoraUERtcEngine::startScreenCaptureByWindowId(agora::view_t windowId, agora::rtc::Rectangle const& regionRect, agora::rtc::ScreenCaptureParameters const& captureParams) { + int AgoraUERtcEngine::startScreenCaptureByWindowId(int64_t windowId, agora::rtc::Rectangle const& regionRect, agora::rtc::ScreenCaptureParameters const& captureParams) { if (RtcEngine != nullptr) { return RtcEngine->startScreenCaptureByWindowId(windowId, regionRect, captureParams); } @@ -1865,6 +1883,16 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } +#if defined(__ANDROID__) + int AgoraUERtcEngine::setExternalMediaProjection(void* mediaProjection) + { + if (RtcEngine != nullptr) { + return RtcEngine->queryCameraFocalLengthCapability(mediaProjection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } +#endif + #endif #if defined(_WIN32) || defined(__APPLE__) || defined(__ANDROID__) @@ -1877,8 +1905,6 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - - int AgoraUERtcEngine::stopScreenCapture() { if (RtcEngine != nullptr) { return RtcEngine->stopScreenCapture(); @@ -1886,23 +1912,9 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::startScreenCapture(VIDEO_SOURCE_TYPE sourceType, const ScreenCaptureConfiguration& config) - { - if (RtcEngine != nullptr) { - return RtcEngine->startScreenCapture(sourceType, config); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::stopScreenCapture(VIDEO_SOURCE_TYPE sourceType) - { - if (RtcEngine != nullptr) { - return RtcEngine->stopScreenCapture(sourceType); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } #endif + int AgoraUERtcEngine::getCallId(agora::util::AString& callId) { if (RtcEngine != nullptr) { return RtcEngine->getCallId(callId); @@ -1945,23 +1957,23 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::stopRtmpStream(char const* url) { + int AgoraUERtcEngine::startLocalVideoTranscoder(agora::rtc::LocalTranscoderConfiguration const& config) { if (RtcEngine != nullptr) { - return RtcEngine->stopRtmpStream(url); + return RtcEngine->startLocalVideoTranscoder(config); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::startLocalVideoTranscoder(agora::rtc::LocalTranscoderConfiguration const& config) { + int AgoraUERtcEngine::updateLocalTranscoderConfiguration(agora::rtc::LocalTranscoderConfiguration const& config) { if (RtcEngine != nullptr) { - return RtcEngine->startLocalVideoTranscoder(config); + return RtcEngine->updateLocalTranscoderConfiguration(config); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::updateLocalTranscoderConfiguration(agora::rtc::LocalTranscoderConfiguration const& config) { + int AgoraUERtcEngine::stopRtmpStream(char const* url) { if (RtcEngine != nullptr) { - return RtcEngine->updateLocalTranscoderConfiguration(config); + return RtcEngine->stopRtmpStream(url); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } @@ -1974,6 +1986,33 @@ namespace agora { } + int AgoraUERtcEngine::startLocalAudioMixer(const LocalAudioMixerConfiguration& config) + { + if (RtcEngine != nullptr) { + return RtcEngine->startLocalAudioMixer(config); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + + int AgoraUERtcEngine::updateLocalAudioMixerConfiguration(const LocalAudioMixerConfiguration& config) + { + if (RtcEngine != nullptr) { + return RtcEngine->updateLocalAudioMixerConfiguration(config); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + + int AgoraUERtcEngine::stopLocalAudioMixer() + { + if (RtcEngine != nullptr) { + return RtcEngine->stopLocalAudioMixer(); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::startCameraCapture(VIDEO_SOURCE_TYPE sourceType, const CameraCapturerConfiguration& config) { if (RtcEngine != nullptr) { @@ -2005,6 +2044,21 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } + int AgoraUERtcEngine::startScreenCapture(VIDEO_SOURCE_TYPE sourceType, const ScreenCaptureConfiguration& config) + { + if (RtcEngine != nullptr) { + return RtcEngine->startScreenCapture(sourceType, config); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::stopScreenCapture(VIDEO_SOURCE_TYPE sourceType) + { + if (RtcEngine != nullptr) { + return RtcEngine->stopScreenCapture(sourceType); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } agora::rtc::CONNECTION_STATE_TYPE AgoraUERtcEngine::getConnectionState() { if (RtcEngine != nullptr) { @@ -2113,151 +2167,68 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setAdvancedAudioOptions(AdvancedAudioOptions& options, int sourceType /*= 0*/) - { + + int AgoraUERtcEngine::sendCustomReportMessage(char const* id, char const* category, char const* event, char const* label, int value) { if (RtcEngine != nullptr) { - return RtcEngine->setAdvancedAudioOptions(options, sourceType); + return RtcEngine->sendCustomReportMessage(id, category, event, label, value); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int64_t AgoraUERtcEngine::getCurrentMonotonicTimeInMs() - { + int AgoraUERtcEngine::registerMediaMetadataObserver(agora::rtc::IMetadataObserver* observer, IMetadataObserver::METADATA_TYPE type) { if (RtcEngine != nullptr) { - return RtcEngine->getCurrentMonotonicTimeInMs(); + return RtcEngine->registerMediaMetadataObserver(observer, type); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::enableWirelessAccelerate(bool enabled) { + + int AgoraUERtcEngine::unregisterMediaMetadataObserver(agora::rtc::IMetadataObserver* observer, IMetadataObserver::METADATA_TYPE type) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->enableWirelessAccelerate(enabled); + return RtcEngine->unregisterMediaMetadataObserver(observer, type); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::getNetworkType() - { + int AgoraUERtcEngine::startAudioFrameDump(char const* channel_id, agora::rtc::uid_t user_id, char const* location, char const* uuid, char const* passwd, long duration_ms, bool auto_upload) { if (RtcEngine != nullptr) { - return RtcEngine->getNetworkType(); + return RtcEngine->startAudioFrameDump(channel_id, user_id, location, uuid, passwd, duration_ms, auto_upload); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - - int AgoraUERtcEngine::setParameters(const char* parameters) - { + int AgoraUERtcEngine::stopAudioFrameDump(char const* channel_id, agora::rtc::uid_t user_id, char const* location) { if (RtcEngine != nullptr) { - return RtcEngine->setParameters(parameters); + return RtcEngine->stopAudioFrameDump(channel_id, user_id, location); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::startMediaRenderingTracing() + int AgoraUERtcEngine::setAINSMode(bool enabled, AUDIO_AINS_MODE mode) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->startMediaRenderingTracing(); + return RtcEngine->setAINSMode(enabled, mode); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - - int AgoraUERtcEngine::enableInstantMediaRendering() - { + int AgoraUERtcEngine::registerLocalUserAccount(char const* appId, char const* userAccount) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->enableInstantMediaRendering(); + return RtcEngine->registerLocalUserAccount(appId, userAccount); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - - uint64_t AgoraUERtcEngine::getNtpWallTimeInMs() - { + int AgoraUERtcEngine::joinChannelWithUserAccount(char const* token, char const* channelId, char const* userAccount) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->getNtpWallTimeInMs(); + return RtcEngine->joinChannelWithUserAccount(token, channelId, userAccount); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - - bool AgoraUERtcEngine::isFeatureAvailableOnDevice(FeatureType type) - { + int AgoraUERtcEngine::joinChannelWithUserAccount(char const* token, char const* channelId, char const* userAccount, agora::rtc::ChannelMediaOptions const& options) { if (RtcEngine != nullptr) { - return RtcEngine->isFeatureAvailableOnDevice(type); - } - return true; - } - - - int AgoraUERtcEngine::sendAudioMetadata(const char* metadata, size_t length) - { - if (RtcEngine != nullptr) { - return RtcEngine->sendAudioMetadata(metadata, length); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::sendCustomReportMessage(char const* id, char const* category, char const* event, char const* label, int value) { - if (RtcEngine != nullptr) { - return RtcEngine->sendCustomReportMessage(id, category, event, label, value); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::registerMediaMetadataObserver(agora::rtc::IMetadataObserver* observer, IMetadataObserver::METADATA_TYPE type) { - if (RtcEngine != nullptr) { - return RtcEngine->registerMediaMetadataObserver(observer, type); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::unregisterMediaMetadataObserver(agora::rtc::IMetadataObserver* observer, IMetadataObserver::METADATA_TYPE type) { - if (RtcEngine != nullptr) { - return RtcEngine->unregisterMediaMetadataObserver(observer, type); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::startAudioFrameDump(char const* channel_id, agora::rtc::uid_t user_id, char const* location, char const* uuid, char const* passwd, long duration_ms, bool auto_upload) { - if (RtcEngine != nullptr) { - return RtcEngine->startAudioFrameDump(channel_id, user_id, location, uuid, passwd, duration_ms, auto_upload); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::stopAudioFrameDump(char const* channel_id, agora::rtc::uid_t user_id, char const* location) { - if (RtcEngine != nullptr) { - return RtcEngine->stopAudioFrameDump(channel_id, user_id, location); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - - int AgoraUERtcEngine::setAINSMode(bool enabled, AUDIO_AINS_MODE mode) - { - if (RtcEngine != nullptr) { - return RtcEngine->setAINSMode(enabled, mode); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::registerLocalUserAccount(char const* appId, char const* userAccount) { - if (RtcEngine != nullptr) { - return RtcEngine->registerLocalUserAccount(appId, userAccount); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::joinChannelWithUserAccount(char const* token, char const* channelId, char const* userAccount) { - if (RtcEngine != nullptr) { - return RtcEngine->joinChannelWithUserAccount(token, channelId, userAccount); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::joinChannelWithUserAccount(char const* token, char const* channelId, char const* userAccount, agora::rtc::ChannelMediaOptions const& options) { - if (RtcEngine != nullptr) { - return RtcEngine->joinChannelWithUserAccount(token, channelId, userAccount, options); + return RtcEngine->joinChannelWithUserAccount(token, channelId, userAccount, options); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } @@ -2376,6 +2347,14 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } + int AgoraUERtcEngine::takeSnapshot(uid_t uid, const media::SnapshotConfig& config) + { + if (RtcEngine != nullptr) { + return RtcEngine->takeSnapshot(uid, config); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + int AgoraUERtcEngine::enableContentInspect(bool enabled, media::ContentInspectConfig const& config) { if (RtcEngine != nullptr) { return RtcEngine->enableContentInspect(enabled, config); @@ -2414,6 +2393,13 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } + int AgoraUERtcEngine::setAdvancedAudioOptions(AdvancedAudioOptions& options, int sourceType /*= 0*/) + { + if (RtcEngine != nullptr) { + return RtcEngine->setAdvancedAudioOptions(options, sourceType); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } int AgoraUERtcEngine::setAVSyncSource(char const* channelId, agora::rtc::uid_t uid) { if (RtcEngine != nullptr) { @@ -2429,231 +2415,230 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::joinChannelEx(char const* token, agora::rtc::RtcConnection const& connection, agora::rtc::ChannelMediaOptions const& options, agora::rtc::IRtcEngineEventHandler* eventHandler) { - if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->joinChannelEx(token, connection, options, eventHandler); - } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - int AgoraUERtcEngine::leaveChannelEx(agora::rtc::RtcConnection const& connection) { + int64_t AgoraUERtcEngine::getCurrentMonotonicTimeInMs() + { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->leaveChannelEx(connection); + return RtcEngine->getCurrentMonotonicTimeInMs(); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::leaveChannelEx(const RtcConnection& connection, const LeaveChannelOptions& options) - { + int AgoraUERtcEngine::enableWirelessAccelerate(bool enabled) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->leaveChannelEx(connection, options); + return ((IRtcEngineEx*)RtcEngine)->enableWirelessAccelerate(enabled); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::muteLocalAudioStreamEx(bool mute, const RtcConnection& connection) + int AgoraUERtcEngine::getNetworkType() { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->muteLocalAudioStreamEx(mute, connection); + return RtcEngine->getNetworkType(); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::muteLocalVideoStreamEx(bool mute, const RtcConnection& connection) + + int AgoraUERtcEngine::setParameters(const char* parameters) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->muteLocalVideoStreamEx(mute, connection); + return RtcEngine->setParameters(parameters); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::muteAllRemoteAudioStreamsEx(bool mute, const RtcConnection& connection) + + int AgoraUERtcEngine::startMediaRenderingTracing() { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->muteAllRemoteAudioStreamsEx(mute, connection); + return ((IRtcEngineEx*)RtcEngine)->startMediaRenderingTracing(); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::muteAllRemoteVideoStreamsEx(bool mute, const RtcConnection& connection) + + int AgoraUERtcEngine::enableInstantMediaRendering() { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->muteAllRemoteVideoStreamsEx(mute, connection); + return ((IRtcEngineEx*)RtcEngine)->enableInstantMediaRendering(); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setSubscribeAudioBlocklistEx(uid_t* uidList, int uidNumber, const RtcConnection& connection) + + uint64_t AgoraUERtcEngine::getNtpWallTimeInMs() { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->setSubscribeAudioBlocklistEx(uidList, uidNumber, connection); + return ((IRtcEngineEx*)RtcEngine)->getNtpWallTimeInMs(); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setSubscribeAudioAllowlistEx(uid_t* uidList, int uidNumber, const RtcConnection& connection) + + bool AgoraUERtcEngine::isFeatureAvailableOnDevice(FeatureType type) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->setSubscribeAudioAllowlistEx(uidList, uidNumber, connection); + return RtcEngine->isFeatureAvailableOnDevice(type); } - return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + return true; } - int AgoraUERtcEngine::setSubscribeVideoBlocklistEx(uid_t* uidList, int uidNumber, const RtcConnection& connection) + + int AgoraUERtcEngine::sendAudioMetadata(const char* metadata, size_t length) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->setSubscribeVideoBlocklistEx(uidList, uidNumber, connection); + return RtcEngine->sendAudioMetadata(metadata, length); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setSubscribeVideoAllowlistEx(uid_t* uidList, int uidNumber, const RtcConnection& connection) + int AgoraUERtcEngine::queryHDRCapability(VIDEO_MODULE_TYPE videoModule, HDR_CAPABILITY& capability) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->setSubscribeVideoAllowlistEx(uidList, uidNumber, connection); + return RtcEngine->queryHDRCapability(videoModule, capability); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::adjustUserPlaybackSignalVolumeEx(uid_t uid, int volume, const RtcConnection& connection) - { + + + // IRtcEngineEx + int AgoraUERtcEngine::joinChannelEx(char const* token, agora::rtc::RtcConnection const& connection, agora::rtc::ChannelMediaOptions const& options, agora::rtc::IRtcEngineEventHandler* eventHandler) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->adjustUserPlaybackSignalVolumeEx(uid, volume, connection); + return ((IRtcEngineEx*)RtcEngine)->joinChannelEx(token, connection, options, eventHandler); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::startRtmpStreamWithoutTranscodingEx(const char* url, const RtcConnection& connection) - { + int AgoraUERtcEngine::leaveChannelEx(agora::rtc::RtcConnection const& connection) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->startRtmpStreamWithoutTranscodingEx(url, connection); + return ((IRtcEngineEx*)RtcEngine)->leaveChannelEx(connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::startRtmpStreamWithTranscodingEx(const char* url, const LiveTranscoding& transcoding, const RtcConnection& connection) + int AgoraUERtcEngine::leaveChannelEx(const RtcConnection& connection, const LeaveChannelOptions& options) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->startRtmpStreamWithTranscodingEx(url, transcoding, connection); + return ((IRtcEngineEx*)RtcEngine)->leaveChannelEx(connection, options); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::updateRtmpTranscodingEx(const LiveTranscoding& transcoding, const RtcConnection& connection) + + int AgoraUERtcEngine::leaveChannelWithUserAccountEx(const char* channelId, const char* userAccount) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->updateRtmpTranscodingEx(transcoding, connection); + return ((IRtcEngineEx*)RtcEngine)->leaveChannelWithUserAccountEx(channelId, userAccount); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::stopRtmpStreamEx(const char* url, const RtcConnection& connection) + int AgoraUERtcEngine::leaveChannelWithUserAccountEx(const char* channelId, const char* userAccount, const LeaveChannelOptions& options) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->stopRtmpStreamEx(url, connection); + return ((IRtcEngineEx*)RtcEngine)->leaveChannelWithUserAccountEx(channelId, userAccount, options); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - - int AgoraUERtcEngine::startOrUpdateChannelMediaRelayEx(const ChannelMediaRelayConfiguration& configuration, const RtcConnection& connection) - { + int AgoraUERtcEngine::updateChannelMediaOptionsEx(agora::rtc::ChannelMediaOptions const& options, agora::rtc::RtcConnection const& connection) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->startOrUpdateChannelMediaRelayEx(configuration, connection); + return ((IRtcEngineEx*)RtcEngine)->updateChannelMediaOptionsEx(options, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::stopChannelMediaRelayEx(const RtcConnection& connection) - { + int AgoraUERtcEngine::setVideoEncoderConfigurationEx(agora::rtc::VideoEncoderConfiguration const& config, agora::rtc::RtcConnection const& connection) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->stopChannelMediaRelayEx(connection); + return ((IRtcEngineEx*)RtcEngine)->setVideoEncoderConfigurationEx(config, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::pauseAllChannelMediaRelayEx(const RtcConnection& connection) - { + int AgoraUERtcEngine::muteRemoteAudioStreamEx(agora::rtc::uid_t uid, bool mute, agora::rtc::RtcConnection const& connection) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->stopChannelMediaRelayEx(connection); + return ((IRtcEngineEx*)RtcEngine)->muteRemoteAudioStreamEx(uid, mute, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::resumeAllChannelMediaRelayEx(const RtcConnection& connection) - { + int AgoraUERtcEngine::muteRemoteVideoStreamEx(agora::rtc::uid_t uid, bool mute, agora::rtc::RtcConnection const& connection) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->resumeAllChannelMediaRelayEx(connection); + return ((IRtcEngineEx*)RtcEngine)->muteRemoteVideoStreamEx(uid, mute, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::enableDualStreamModeEx(bool enabled, const SimulcastStreamConfig& streamConfig, const RtcConnection& connection) - { + + int AgoraUERtcEngine::setRemoteVideoStreamTypeEx(agora::rtc::uid_t uid, agora::rtc::VIDEO_STREAM_TYPE streamType, agora::rtc::RtcConnection const& connection) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->enableDualStreamModeEx(enabled, streamConfig, connection); + return ((IRtcEngineEx*)RtcEngine)->setRemoteVideoStreamTypeEx(uid, streamType, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setDualStreamModeEx(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig, const RtcConnection& connection) + int AgoraUERtcEngine::muteLocalAudioStreamEx(bool mute, const RtcConnection& connection) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->setDualStreamModeEx(mode, streamConfig, connection); + return ((IRtcEngineEx*)RtcEngine)->muteLocalAudioStreamEx(mute, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - - int AgoraUERtcEngine::setSimulcastConfigEx(const SimulcastConfig& simulcastConfig, const RtcConnection& connection) + int AgoraUERtcEngine::muteLocalVideoStreamEx(bool mute, const RtcConnection& connection) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->setSimulcastConfigEx(simulcastConfig, connection); + return ((IRtcEngineEx*)RtcEngine)->muteLocalVideoStreamEx(mute, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); - } - - int AgoraUERtcEngine::setHighPriorityUserListEx(uid_t* uidList, int uidNum, STREAM_FALLBACK_OPTIONS option, const RtcConnection& connection) + int AgoraUERtcEngine::muteAllRemoteAudioStreamsEx(bool mute, const RtcConnection& connection) { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->setHighPriorityUserListEx(uidList, uidNum, option, connection); + return ((IRtcEngineEx*)RtcEngine)->muteAllRemoteAudioStreamsEx(mute, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::updateChannelMediaOptionsEx(agora::rtc::ChannelMediaOptions const& options, agora::rtc::RtcConnection const& connection) { + int AgoraUERtcEngine::muteAllRemoteVideoStreamsEx(bool mute, const RtcConnection& connection) + { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->updateChannelMediaOptionsEx(options, connection); + return ((IRtcEngineEx*)RtcEngine)->muteAllRemoteVideoStreamsEx(mute, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setVideoEncoderConfigurationEx(agora::rtc::VideoEncoderConfiguration const& config, agora::rtc::RtcConnection const& connection) { + int AgoraUERtcEngine::setSubscribeAudioBlocklistEx(uid_t* uidList, int uidNumber, const RtcConnection& connection) + { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->setVideoEncoderConfigurationEx(config, connection); + return ((IRtcEngineEx*)RtcEngine)->setSubscribeAudioBlocklistEx(uidList, uidNumber, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::muteRemoteAudioStreamEx(agora::rtc::uid_t uid, bool mute, agora::rtc::RtcConnection const& connection) { + int AgoraUERtcEngine::setSubscribeAudioAllowlistEx(uid_t* uidList, int uidNumber, const RtcConnection& connection) + { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->muteRemoteAudioStreamEx(uid, mute, connection); + return ((IRtcEngineEx*)RtcEngine)->setSubscribeAudioAllowlistEx(uidList, uidNumber, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::muteRemoteVideoStreamEx(agora::rtc::uid_t uid, bool mute, agora::rtc::RtcConnection const& connection) { + int AgoraUERtcEngine::setSubscribeVideoBlocklistEx(uid_t* uidList, int uidNumber, const RtcConnection& connection) + { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->muteRemoteVideoStreamEx(uid, mute, connection); + return ((IRtcEngineEx*)RtcEngine)->setSubscribeVideoBlocklistEx(uidList, uidNumber, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } - int AgoraUERtcEngine::setRemoteVideoStreamTypeEx(agora::rtc::uid_t uid, agora::rtc::VIDEO_STREAM_TYPE streamType, agora::rtc::RtcConnection const& connection) { + int AgoraUERtcEngine::setSubscribeVideoAllowlistEx(uid_t* uidList, int uidNumber, const RtcConnection& connection) + { if (RtcEngine != nullptr) { - return ((IRtcEngineEx*)RtcEngine)->setRemoteVideoStreamTypeEx(uid, streamType, connection); + return ((IRtcEngineEx*)RtcEngine)->setSubscribeVideoAllowlistEx(uidList, uidNumber, connection); } return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } @@ -2711,6 +2696,16 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } + + int AgoraUERtcEngine::adjustUserPlaybackSignalVolumeEx(uid_t uid, int volume, const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->adjustUserPlaybackSignalVolumeEx(uid, volume, connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + agora::rtc::CONNECTION_STATE_TYPE AgoraUERtcEngine::getConnectionStateEx(agora::rtc::RtcConnection const& connection) { if (RtcEngine != nullptr) { return ((IRtcEngineEx*)RtcEngine)->getConnectionStateEx(connection); @@ -2774,6 +2769,71 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } + int AgoraUERtcEngine::startRtmpStreamWithoutTranscodingEx(const char* url, const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->startRtmpStreamWithoutTranscodingEx(url, connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::startRtmpStreamWithTranscodingEx(const char* url, const LiveTranscoding& transcoding, const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->startRtmpStreamWithTranscodingEx(url, transcoding, connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::updateRtmpTranscodingEx(const LiveTranscoding& transcoding, const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->updateRtmpTranscodingEx(transcoding, connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::stopRtmpStreamEx(const char* url, const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->stopRtmpStreamEx(url, connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + + int AgoraUERtcEngine::startOrUpdateChannelMediaRelayEx(const ChannelMediaRelayConfiguration& configuration, const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->startOrUpdateChannelMediaRelayEx(configuration, connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::stopChannelMediaRelayEx(const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->stopChannelMediaRelayEx(connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::pauseAllChannelMediaRelayEx(const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->stopChannelMediaRelayEx(connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::resumeAllChannelMediaRelayEx(const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->resumeAllChannelMediaRelayEx(connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + int AgoraUERtcEngine::getUserInfoByUserAccountEx(char const* userAccount, rtc::UserInfo* userInfo, agora::rtc::RtcConnection const& connection) { if (RtcEngine != nullptr) { return ((IRtcEngineEx*)RtcEngine)->getUserInfoByUserAccountEx(userAccount, userInfo, connection); @@ -2788,6 +2848,41 @@ namespace agora { return AGORA_UE_ERR_CODE(ERROR_NULLPTR); } + int AgoraUERtcEngine::enableDualStreamModeEx(bool enabled, const SimulcastStreamConfig& streamConfig, const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->enableDualStreamModeEx(enabled, streamConfig, connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::setDualStreamModeEx(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig, const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->setDualStreamModeEx(mode, streamConfig, connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + + int AgoraUERtcEngine::setSimulcastConfigEx(const SimulcastConfig& simulcastConfig, const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->setSimulcastConfigEx(simulcastConfig, connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + + } + + + int AgoraUERtcEngine::setHighPriorityUserListEx(uid_t* uidList, int uidNum, STREAM_FALLBACK_OPTIONS option, const RtcConnection& connection) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->setHighPriorityUserListEx(uidList, uidNum, option, connection); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + int AgoraUERtcEngine::takeSnapshotEx(agora::rtc::RtcConnection const& connection, agora::rtc::uid_t uid, char const* filePath) { if (RtcEngine != nullptr) { return ((IRtcEngineEx*)RtcEngine)->takeSnapshotEx(connection, uid, filePath); @@ -2796,6 +2891,15 @@ namespace agora { } + int AgoraUERtcEngine::takeSnapshotEx(const RtcConnection& connection, uid_t uid, const media::SnapshotConfig& config) + { + if (RtcEngine != nullptr) { + return ((IRtcEngineEx*)RtcEngine)->takeSnapshotEx(connection, uid, config); + } + return AGORA_UE_ERR_CODE(ERROR_NULLPTR); + } + + int AgoraUERtcEngine::enableContentInspectEx(bool enabled, const media::ContentInspectConfig& config, const RtcConnection& connection) { if (RtcEngine != nullptr) { diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraBlueprintPlugin/AgoraBPuBaseDataTypes.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraBlueprintPlugin/AgoraBPuBaseDataTypes.h index 00cd64e1..4f38f2db 100644 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraBlueprintPlugin/AgoraBPuBaseDataTypes.h +++ b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraBlueprintPlugin/AgoraBPuBaseDataTypes.h @@ -3405,13 +3405,29 @@ struct FENUMWRAP_ENCODING_PREFERENCE { }; UENUM(BlueprintType) -enum class ECOMPRESSION_PREFERENCE : uint8 { +enum class EENUMCUSTOM_COMPRESSION_PREFERENCE :uint8 { + PREFER_COMPRESSION_AUTO = 0, + PREFER_LOW_LATENCY = 1, + PREFER_QUALITY = 2, +}; + +USTRUCT(BlueprintType) +struct FENUMWRAP_COMPRESSION_PREFERENCE { + + GENERATED_BODY() - PREFER_LOW_LATENCY = 0, +public: + // require to call [GetRawValue] method to get the raw value + UPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = "Agora|EENUMCUSTOM_COMPRESSION_PREFERENCE") + EENUMCUSTOM_COMPRESSION_PREFERENCE ValueWrapper = EENUMCUSTOM_COMPRESSION_PREFERENCE::PREFER_COMPRESSION_AUTO; - PREFER_QUALITY = 1, + AGORA_CREATE_UEENUM_CONVERT_STRUCT_INNER_3_ENTRIES(FENUMWRAP_COMPRESSION_PREFERENCE,agora::rtc::COMPRESSION_PREFERENCE, EENUMCUSTOM_COMPRESSION_PREFERENCE, + PREFER_COMPRESSION_AUTO, + PREFER_LOW_LATENCY, + PREFER_QUALITY) }; + USTRUCT(BlueprintType) struct FAdvanceOptions { @@ -3421,7 +3437,7 @@ struct FAdvanceOptions { FENUMWRAP_ENCODING_PREFERENCE encodingPreference = EENUMCUSTOM_ENCODING_PREFERENCE::PREFER_AUTO; UPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = "Agora|AdvanceOptions") - ECOMPRESSION_PREFERENCE compressionPreference = ECOMPRESSION_PREFERENCE::PREFER_QUALITY; + FENUMWRAP_COMPRESSION_PREFERENCE compressionPreference = EENUMCUSTOM_COMPRESSION_PREFERENCE::PREFER_QUALITY; UPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = "Agora|AdvanceOptions") bool encodeAlpha = false; @@ -3431,14 +3447,14 @@ struct FAdvanceOptions { FAdvanceOptions(const agora::rtc::AdvanceOptions & AgoraData){ encodingPreference = AgoraData.encodingPreference; - compressionPreference = static_cast(AgoraData.compressionPreference); + compressionPreference = static_cast(AgoraData.compressionPreference); encodeAlpha = AgoraData.encodeAlpha; } agora::rtc::AdvanceOptions CreateAgoraData() const{ agora::rtc::AdvanceOptions AgoraData; AgoraData.encodingPreference = encodingPreference.GetRawValue(); - AgoraData.compressionPreference =static_cast(compressionPreference); + AgoraData.compressionPreference =static_cast(compressionPreference.GetRawValue()); AgoraData.encodeAlpha = encodeAlpha; return AgoraData; } @@ -4521,7 +4537,7 @@ struct FScreenCaptureSourceInfo { type = AgoraData.type; - sourceId = UABT::FromViewToInt(AgoraData.sourceId); + sourceId = AgoraData.sourceId; sourceName = UTF8_TO_TCHAR(AgoraData.sourceName); thumbImage = FThumbImageBuffer(AgoraData.thumbImage); iconImage = FThumbImageBuffer(AgoraData.iconImage); @@ -4532,14 +4548,14 @@ struct FScreenCaptureSourceInfo { position = FRectangle(AgoraData.position); #if defined(_WIN32) minimizeWindow = AgoraData.minimizeWindow; - sourceDisplayId = UABT::FromViewToInt(AgoraData.sourceDisplayId); + sourceDisplayId = AgoraData.sourceDisplayId; #endif } agora::rtc::ScreenCaptureSourceInfo CreateAgoraData() const { agora::rtc::ScreenCaptureSourceInfo AgoraData; AgoraData.type = static_cast(type.GetRawValue()); - AgoraData.sourceId = UABT::ToView(sourceId); + AgoraData.sourceId = sourceId; SET_UABT_FSTRING_TO_CONST_CHAR___MEMALLOC(AgoraData.sourceName, sourceName) AgoraData.thumbImage = thumbImage.CreateAgoraData(); AgoraData.iconImage = iconImage.CreateAgoraData(); @@ -4550,7 +4566,7 @@ struct FScreenCaptureSourceInfo { AgoraData.position = position.CreateAgoraData(); #if defined(_WIN32) AgoraData.minimizeWindow = minimizeWindow; - AgoraData.sourceDisplayId = UABT::ToView(sourceDisplayId); + AgoraData.sourceDisplayId = sourceDisplayId; #endif return AgoraData; } diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/AgoraUERTCEngine.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/AgoraUERTCEngine.h index e22e81ee..53858a2f 100644 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/AgoraUERTCEngine.h +++ b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/AgoraUERTCEngine.h @@ -34,6 +34,7 @@ namespace agora { static void Release(bool sync = false); static AgoraAppType RtcEngineAppType; + protected: //static variables static AgoraUERtcEngine* Instance; @@ -112,6 +113,7 @@ namespace agora { virtual int setFaceShapeAreaOptions(const FaceShapeAreaOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) override; virtual int getFaceShapeBeautyOptions(FaceShapeBeautyOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) override; virtual int getFaceShapeAreaOptions(agora::rtc::FaceShapeAreaOptions::FACE_SHAPE_AREA shapeArea, FaceShapeAreaOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) override; + virtual int setFilterEffectOptions(bool enabled, const FilterEffectOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) override; virtual int setLowlightEnhanceOptions(bool enabled, const LowlightEnhanceOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) override; virtual int setVideoDenoiserOptions(bool enabled, const VideoDenoiserOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) override; virtual int setColorEnhanceOptions(bool enabled, const ColorEnhanceOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) override; @@ -210,6 +212,8 @@ namespace agora { virtual int writeLog(commons::LOG_LEVEL level, const char* fmt, ...) override; virtual int setLocalRenderMode(media::base::RENDER_MODE_TYPE renderMode, VIDEO_MIRROR_MODE_TYPE mirrorMode) override; virtual int setRemoteRenderMode(uid_t uid, media::base::RENDER_MODE_TYPE renderMode, VIDEO_MIRROR_MODE_TYPE mirrorMode) override; + virtual int setLocalRenderTargetFps(VIDEO_SOURCE_TYPE sourceType, int targetFps) override; + virtual int setRemoteRenderTargetFps(int targetFps) override; virtual int setLocalRenderMode(media::base::RENDER_MODE_TYPE renderMode) __deprecated override; virtual int setLocalVideoMirrorMode(VIDEO_MIRROR_MODE_TYPE mirrorMode) __deprecated override; virtual int enableDualStreamMode(bool enabled) __deprecated override; @@ -304,7 +308,7 @@ namespace agora { virtual int setAudioSessionOperationRestriction(AUDIO_SESSION_OPERATION_RESTRICTION restriction) override; #endif #if defined(_WIN32) || (defined(__APPLE__) && !TARGET_OS_IPHONE && TARGET_OS_MAC) - virtual int startScreenCaptureByDisplayId(uint32_t displayId, const Rectangle& regionRect, const ScreenCaptureParameters& captureParams) override; + virtual int startScreenCaptureByDisplayId(int64_t displayId, const Rectangle& regionRect, const ScreenCaptureParameters& captureParams) override; #endif #if defined(_WIN32) virtual int startScreenCaptureByScreenRect(const Rectangle& screenRect, const Rectangle& regionRect, const ScreenCaptureParameters& captureParams) __deprecated override; @@ -313,7 +317,7 @@ namespace agora { virtual int getAudioDeviceInfo(DeviceInfo& deviceInfo) override; #endif #if defined(_WIN32) || (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE) - virtual int startScreenCaptureByWindowId(view_t windowId, const Rectangle& regionRect, const ScreenCaptureParameters& captureParams) override; + virtual int startScreenCaptureByWindowId(int64_t windowId, const Rectangle& regionRect, const ScreenCaptureParameters& captureParams) override; virtual int setScreenCaptureContentHint(VIDEO_CONTENT_HINT contentHint) override; virtual int updateScreenCaptureRegion(const Rectangle& regionRect) override; virtual int updateScreenCaptureParameters(const ScreenCaptureParameters& captureParams) override; @@ -324,6 +328,10 @@ namespace agora { virtual int queryScreenCaptureCapability() override; virtual int queryCameraFocalLengthCapability(agora::rtc::FocalLengthInfo* focalLengthInfos, int& size) override; +#if defined(__ANDROID__) + virtual int setExternalMediaProjection(void* mediaProjection) override; +#endif + #endif #if defined(_WIN32) || defined(__APPLE__) || defined(__ANDROID__) virtual int setScreenCaptureScenario(SCREEN_SCENARIO_TYPE screenScenario) override; @@ -340,6 +348,9 @@ namespace agora { virtual int stopRtmpStream(const char* url) override; virtual int stopLocalVideoTranscoder() override; + virtual int startLocalAudioMixer(const LocalAudioMixerConfiguration& config) override; + virtual int updateLocalAudioMixerConfiguration(const LocalAudioMixerConfiguration& config) override; + virtual int stopLocalAudioMixer() override; virtual int startCameraCapture(VIDEO_SOURCE_TYPE sourceType, const CameraCapturerConfiguration& config) override; virtual int stopCameraCapture(VIDEO_SOURCE_TYPE sourceType) override; virtual int setCameraDeviceOrientation(VIDEO_SOURCE_TYPE type, VIDEO_ORIENTATION orientation) override; @@ -388,6 +399,7 @@ namespace agora { virtual int stopRhythmPlayer() override; virtual int configRhythmPlayer(const AgoraRhythmPlayerConfig& config) override; virtual int takeSnapshot(uid_t uid, const char* filePath) override; + virtual int takeSnapshot(uid_t uid, const media::SnapshotConfig& config) override; virtual int enableContentInspect(bool enabled, const media::ContentInspectConfig& config) override; virtual int adjustCustomAudioPublishVolume(track_id_t trackId, int volume) override; virtual int adjustCustomAudioPlayoutVolume(track_id_t trackId, int volume) override; @@ -406,10 +418,16 @@ namespace agora { virtual uint64_t getNtpWallTimeInMs() override; virtual bool isFeatureAvailableOnDevice(FeatureType type) override; virtual int sendAudioMetadata(const char* metadata, size_t length) override; + virtual int queryHDRCapability(VIDEO_MODULE_TYPE videoModule, HDR_CAPABILITY& capability) override; + + + // IRtcEngineEx virtual int joinChannelEx(const char* token, const RtcConnection& connection, const ChannelMediaOptions& options, IRtcEngineEventHandler* eventHandler) override; virtual int leaveChannelEx(const RtcConnection& connection) override; virtual int leaveChannelEx(const RtcConnection& connection, const LeaveChannelOptions& options) override; + virtual int leaveChannelWithUserAccountEx(const char* channelId, const char* userAccount) override; + virtual int leaveChannelWithUserAccountEx(const char* channelId, const char* userAccount, const LeaveChannelOptions& options) override; virtual int updateChannelMediaOptionsEx(const ChannelMediaOptions& options, const RtcConnection& connection) override; virtual int setVideoEncoderConfigurationEx(const VideoEncoderConfiguration& config, const RtcConnection& connection) override; virtual int muteRemoteAudioStreamEx(uid_t uid, bool mute, const RtcConnection& connection) override; @@ -456,6 +474,7 @@ namespace agora { const RtcConnection& connection) override; virtual int setHighPriorityUserListEx(uid_t* uidList, int uidNum, STREAM_FALLBACK_OPTIONS option, const RtcConnection& connection) override; virtual int takeSnapshotEx(const RtcConnection& connection, uid_t uid, const char* filePath) override; + virtual int takeSnapshotEx(const RtcConnection& connection, uid_t uid, const media::SnapshotConfig& config) override; virtual int enableContentInspectEx(bool enabled, const media::ContentInspectConfig& config, const RtcConnection& connection) override; virtual int startMediaRenderingTracingEx(const RtcConnection& connection) override; diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/AgoraBase.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/AgoraBase.h index 7ccb6891..f1208a20 100644 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/AgoraBase.h +++ b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/AgoraBase.h @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include "IAgoraParameter.h" #include "AgoraMediaBase.h" @@ -559,7 +561,8 @@ enum ERROR_CODE_TYPE { /** * 101: The App ID is invalid, usually because the data format of the App ID is incorrect. * - * Solution: Check the data format of your App ID. Ensure that you use the correct App ID to initialize the Agora service. + * Solution: Check the data format of your App ID. Ensure that you use the correct App ID to + * initialize the Agora service. */ ERR_INVALID_APP_ID = 101, /** @@ -578,9 +581,9 @@ enum ERROR_CODE_TYPE { * - Timeout for token authorization: Once a token is generated, you must use it to access the * Agora service within 24 hours. Otherwise, the token times out and you can no longer use it. * - The token privilege expires: To generate a token, you need to set a timestamp for the token - * privilege to expire. For example, If you set it as seven days, the token expires seven days after - * its usage. In that case, you can no longer access the Agora service. The users cannot make calls, - * or are kicked out of the channel. + * privilege to expire. For example, If you set it as seven days, the token expires seven days + * after its usage. In that case, you can no longer access the Agora service. The users cannot + * make calls, or are kicked out of the channel. * * Solution: Regardless of whether token authorization times out or the token privilege expires, * you need to generate a new token on your server, and try to join the channel. @@ -588,19 +591,19 @@ enum ERROR_CODE_TYPE { ERR_TOKEN_EXPIRED = 109, /** * 110: The token is invalid, usually for one of the following reasons: - * - Did not provide a token when joining a channel in a situation where the project has enabled the - * App Certificate. + * - Did not provide a token when joining a channel in a situation where the project has enabled + * the App Certificate. * - Tried to join a channel with a token in a situation where the project has not enabled the App * Certificate. - * - The App ID, user ID and channel name that you use to generate the token on the server do not match - * those that you use when joining a channel. + * - The App ID, user ID and channel name that you use to generate the token on the server do not + * match those that you use when joining a channel. * * Solution: - * - Before joining a channel, check whether your project has enabled the App certificate. If yes, you - * must provide a token when joining a channel; if no, join a channel without a token. - * - When using a token to join a channel, ensure that the App ID, user ID, and channel name that you - * use to generate the token is the same as the App ID that you use to initialize the Agora service, and - * the user ID and channel name that you use to join the channel. + * - Before joining a channel, check whether your project has enabled the App certificate. If yes, + * you must provide a token when joining a channel; if no, join a channel without a token. + * - When using a token to join a channel, ensure that the App ID, user ID, and channel name that + * you use to generate the token is the same as the App ID that you use to initialize the Agora + * service, and the user ID and channel name that you use to join the channel. */ ERR_INVALID_TOKEN = 110, /** @@ -672,13 +675,15 @@ enum ERROR_CODE_TYPE { ERR_LICENSE_CREDENTIAL_INVALID = 131, /** - * 134: The user account is invalid, usually because the data format of the user account is incorrect. + * 134: The user account is invalid, usually because the data format of the user account is + * incorrect. */ ERR_INVALID_USER_ACCOUNT = 134, /** 157: The necessary dynamical library is not integrated. For example, if you call - * the \ref agora::rtc::IRtcEngine::enableDeepLearningDenoise "enableDeepLearningDenoise" but do not integrate the dynamical - * library for the deep-learning noise reduction into your project, the SDK reports this error code. + * the \ref agora::rtc::IRtcEngine::enableDeepLearningDenoise "enableDeepLearningDenoise" but do + * not integrate the dynamical library for the deep-learning noise reduction into your project, + * the SDK reports this error code. * */ ERR_MODULE_NOT_FOUND = 157, @@ -698,7 +703,7 @@ enum ERROR_CODE_TYPE { ERR_CERT_REQUEST = 168, // PcmSend Error num - ERR_PCMSEND_FORMAT = 200, // unsupport pcm format + ERR_PCMSEND_FORMAT = 200, // unsupport pcm format ERR_PCMSEND_BUFFEROVERFLOW = 201, // buffer overflow, the pcm send rate too quickly /// @cond @@ -752,27 +757,27 @@ enum ERROR_CODE_TYPE { enum LICENSE_ERROR_TYPE { /** * 1: Invalid license - */ + */ LICENSE_ERR_INVALID = 1, /** * 2: License expired - */ + */ LICENSE_ERR_EXPIRE = 2, /** * 3: Exceed license minutes limit - */ + */ LICENSE_ERR_MINUTES_EXCEED = 3, /** * 4: License use in limited period - */ + */ LICENSE_ERR_LIMITED_PERIOD = 4, /** * 5: Same license used in different devices at the same time - */ + */ LICENSE_ERR_DIFF_DEVICES = 5, /** * 99: SDK internal error - */ + */ LICENSE_ERR_INTERNAL = 99, }; @@ -845,9 +850,9 @@ enum USER_OFFLINE_REASON_TYPE { */ USER_OFFLINE_QUIT = 0, /** - * 1: The SDK times out and the user drops offline because no data packet was received within a certain - * period of time. If a user quits the call and the message is not passed to the SDK (due to an - * unreliable channel), the SDK assumes that the user drops offline. + * 1: The SDK times out and the user drops offline because no data packet was received within a + * certain period of time. If a user quits the call and the message is not passed to the SDK (due + * to an unreliable channel), the SDK assumes that the user drops offline. */ USER_OFFLINE_DROPPED = 1, /** @@ -870,7 +875,7 @@ enum INTERFACE_ID_TYPE { AGORA_IID_STATE_SYNC = 13, AGORA_IID_META_SERVICE = 14, AGORA_IID_MUSIC_CONTENT_CENTER = 15, - AGORA_IID_H265_TRANSCODER = 16, + AGORA_IID_H265_TRANSCODER = 16, }; /** @@ -999,7 +1004,6 @@ enum FRAME_HEIGHT { FRAME_HEIGHT_540 = 540, }; - /** * Types of the video frame. */ @@ -1032,9 +1036,9 @@ enum ORIENTATION_MODE { ORIENTATION_MODE_ADAPTIVE = 0, /** * 1: Landscape mode. In this mode, the SDK always outputs videos in landscape (horizontal) mode. - * If the captured video is in portrait mode, the video encoder crops it to fit the output. Applies - * to situations where the receiving end cannot process the rotational information. For example, - * CDN live streaming. + * If the captured video is in portrait mode, the video encoder crops it to fit the output. + * Applies to situations where the receiving end cannot process the rotational information. For + * example, CDN live streaming. */ ORIENTATION_MODE_FIXED_LANDSCAPE = 1, /** @@ -1051,9 +1055,16 @@ enum ORIENTATION_MODE { */ enum DEGRADATION_PREFERENCE { /** - * 0: (Default) Prefers to reduce the video frame rate while maintaining video quality during video - * encoding under limited bandwidth. This degradation preference is suitable for scenarios where - * video quality is prioritized. + * -1: (Default) SDK uses degradation preference according to setVideoScenario API settings, real-time network state and other relevant data information. + * If API setVideoScenario set video scenario to APPLICATION_SCENARIO_LIVESHOW, then MAINTAIN_BALANCED is used. If not, then MAINTAIN_RESOLUTION is used. + * Also if network state has changed, SDK may change this parameter between MAINTAIN_FRAMERATE、MAINTAIN_BALANCED and MAINTAIN_RESOLUTION automatically to get the best QOE. + * We recommend using this option. + */ + MAINTAIN_AUTO = -1, + /** + * 0: (Deprecated) Prefers to reduce the video frame rate while maintaining video quality during + * video encoding under limited bandwidth. This degradation preference is suitable for scenarios + * where video quality is prioritized. * @note In the COMMUNICATION channel profile, the resolution of the video sent may change, so * remote users need to handle this issue. */ @@ -1066,9 +1077,9 @@ enum DEGRADATION_PREFERENCE { MAINTAIN_FRAMERATE = 1, /** * 2: Reduces the video frame rate and video quality simultaneously during video encoding under - * limited bandwidth. MAINTAIN_BALANCED has a lower reduction than MAINTAIN_QUALITY and MAINTAIN_FRAMERATE, - * and this preference is suitable for scenarios where both smoothness and video quality are a - * priority. + * limited bandwidth. MAINTAIN_BALANCED has a lower reduction than MAINTAIN_RESOLUTION and + * MAINTAIN_FRAMERATE, and this preference is suitable for scenarios where both smoothness and + * video quality are a priority. */ MAINTAIN_BALANCED = 2, /** @@ -1155,6 +1166,11 @@ enum VIDEO_CODEC_CAPABILITY_LEVEL { * The video codec types. */ enum VIDEO_CODEC_TYPE { + /** + * 0: (Default) SDK will automatically adjust the codec type according to country and region or real-time network state and other relevant data information. + * Also if network state is changed, SDK may change codec automatically to get the best QOE. + * We recommend use this option. + */ VIDEO_CODEC_NONE = 0, /** * 1: Standard VP8. @@ -1170,11 +1186,13 @@ enum VIDEO_CODEC_TYPE { VIDEO_CODEC_H265 = 3, /** * 6: Generic. This type is used for transmitting raw video data, such as encrypted video frames. - * The SDK returns this type of video frames in callbacks, and you need to decode and render the frames yourself. + * The SDK returns this type of video frames in callbacks, and you need to decode and render the + * frames yourself. */ VIDEO_CODEC_GENERIC = 6, /** * 7: Generic H264. + * @deprecated This codec type is deprecated. */ VIDEO_CODEC_GENERIC_H264 = 7, /** @@ -1237,7 +1255,8 @@ struct SenderOptions { */ TCcMode ccMode; /** - * The codec type used for the encoded images: \ref agora::rtc::VIDEO_CODEC_TYPE "VIDEO_CODEC_TYPE". + * The codec type used for the encoded images: \ref agora::rtc::VIDEO_CODEC_TYPE + * "VIDEO_CODEC_TYPE". */ VIDEO_CODEC_TYPE codecType; @@ -1249,12 +1268,14 @@ struct SenderOptions { * - \ref agora::rtc::STANDARD_BITRATE "STANDARD_BITRATE": (Recommended) Standard bitrate. * - Communication profile: The encoding bitrate equals the base bitrate. * - Live-broadcast profile: The encoding bitrate is twice the base bitrate. - * - \ref agora::rtc::COMPATIBLE_BITRATE "COMPATIBLE_BITRATE": Compatible bitrate. The bitrate stays the same + * - \ref agora::rtc::COMPATIBLE_BITRATE "COMPATIBLE_BITRATE": Compatible bitrate. The bitrate + stays the same * regardless of the profile. * * The Communication profile prioritizes smoothness, while the Live Broadcast * profile prioritizes video quality (requiring a higher bitrate). Agora - * recommends setting the bitrate mode as \ref agora::rtc::STANDARD_BITRATE "STANDARD_BITRATE" or simply to + * recommends setting the bitrate mode as \ref agora::rtc::STANDARD_BITRATE "STANDARD_BITRATE" or + simply to * address this difference. * * The following table lists the recommended video encoder configurations, @@ -1262,7 +1283,8 @@ struct SenderOptions { * bitrate based on this table. If the bitrate you set is beyond the proper * range, the SDK automatically sets it to within the range. - | Resolution | Frame Rate (fps) | Base Bitrate (Kbps, for Communication) | Live Bitrate (Kbps, for Live Broadcast)| + | Resolution | Frame Rate (fps) | Base Bitrate (Kbps, for Communication) | Live + Bitrate (Kbps, for Live Broadcast)| |------------------------|------------------|----------------------------------------|----------------------------------------| | 160 × 120 | 15 | 65 | 130 | | 120 × 120 | 15 | 50 | 100 | @@ -1299,10 +1321,7 @@ struct SenderOptions { */ int targetBitrate; - SenderOptions() - : ccMode(CC_ENABLED), - codecType(VIDEO_CODEC_H265), - targetBitrate(6500) {} + SenderOptions() : ccMode(CC_ENABLED), codecType(VIDEO_CODEC_H265), targetBitrate(6500) {} }; /** @@ -1365,8 +1384,8 @@ enum AUDIO_ENCODING_TYPE { */ AUDIO_ENCODING_TYPE_AAC_16000_LOW = 0x010101, /** - * AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_16000_MEDIUM = 0x010102, /** @@ -1375,18 +1394,18 @@ enum AUDIO_ENCODING_TYPE { */ AUDIO_ENCODING_TYPE_AAC_32000_LOW = 0x010201, /** - * AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_32000_MEDIUM = 0x010202, /** - * AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio duration of - * 10 minutes is approximately 3.5 MB after encoding. + * AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio duration + * of 10 minutes is approximately 3.5 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_32000_HIGH = 0x010203, /** - * AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_48000_MEDIUM = 0x010302, /** @@ -1400,18 +1419,18 @@ enum AUDIO_ENCODING_TYPE { */ AUDIO_ENCODING_TYPE_OPUS_16000_LOW = 0x020101, /** - * OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_16000_MEDIUM = 0x020102, /** - * OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM = 0x020302, /** - * OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration of - * 10 minutes is approximately 3.5 MB after encoding. + * OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration + * of 10 minutes is approximately 3.5 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_48000_HIGH = 0x020303, }; @@ -1421,13 +1440,13 @@ enum AUDIO_ENCODING_TYPE { */ enum WATERMARK_FIT_MODE { /** - * Use the `positionInLandscapeMode` and `positionInPortraitMode` values you set in #WatermarkOptions. - * The settings in `WatermarkRatio` are invalid. + * Use the `positionInLandscapeMode` and `positionInPortraitMode` values you set in + * #WatermarkOptions. The settings in `WatermarkRatio` are invalid. */ FIT_MODE_COVER_POSITION, /** - * Use the value you set in `WatermarkRatio`. The settings in `positionInLandscapeMode` and `positionInPortraitMode` - * in `WatermarkOptions` are invalid. + * Use the value you set in `WatermarkRatio`. The settings in `positionInLandscapeMode` and + * `positionInPortraitMode` in `WatermarkOptions` are invalid. */ FIT_MODE_USE_IMAGE_RATIO }; @@ -1436,9 +1455,7 @@ enum WATERMARK_FIT_MODE { * The advanced settings of encoded audio frame. */ struct EncodedAudioFrameAdvancedSettings { - EncodedAudioFrameAdvancedSettings() - : speech(true), - sendEvenIfEmpty(true) {} + EncodedAudioFrameAdvancedSettings() : speech(true), sendEvenIfEmpty(true) {} /** * Determines whether the audio source is speech. @@ -1459,19 +1476,19 @@ struct EncodedAudioFrameAdvancedSettings { */ struct EncodedAudioFrameInfo { EncodedAudioFrameInfo() - : codec(AUDIO_CODEC_AACLC), - sampleRateHz(0), - samplesPerChannel(0), - numberOfChannels(0), - captureTimeMs(0) {} + : codec(AUDIO_CODEC_AACLC), + sampleRateHz(0), + samplesPerChannel(0), + numberOfChannels(0), + captureTimeMs(0) {} EncodedAudioFrameInfo(const EncodedAudioFrameInfo& rhs) - : codec(rhs.codec), - sampleRateHz(rhs.sampleRateHz), - samplesPerChannel(rhs.samplesPerChannel), - numberOfChannels(rhs.numberOfChannels), - advancedSettings(rhs.advancedSettings), - captureTimeMs(rhs.captureTimeMs) {} + : codec(rhs.codec), + sampleRateHz(rhs.sampleRateHz), + samplesPerChannel(rhs.samplesPerChannel), + numberOfChannels(rhs.numberOfChannels), + advancedSettings(rhs.advancedSettings), + captureTimeMs(rhs.captureTimeMs) {} /** * The audio codec: #AUDIO_CODEC_TYPE. */ @@ -1504,14 +1521,15 @@ struct EncodedAudioFrameInfo { * The definition of the AudioPcmDataInfo struct. */ struct AudioPcmDataInfo { - AudioPcmDataInfo() : samplesPerChannel(0), channelNum(0), samplesOut(0), elapsedTimeMs(0), ntpTimeMs(0) {} + AudioPcmDataInfo() + : samplesPerChannel(0), channelNum(0), samplesOut(0), elapsedTimeMs(0), ntpTimeMs(0) {} AudioPcmDataInfo(const AudioPcmDataInfo& rhs) - : samplesPerChannel(rhs.samplesPerChannel), - channelNum(rhs.channelNum), - samplesOut(rhs.samplesOut), - elapsedTimeMs(rhs.elapsedTimeMs), - ntpTimeMs(rhs.ntpTimeMs) {} + : samplesPerChannel(rhs.samplesPerChannel), + channelNum(rhs.channelNum), + samplesOut(rhs.samplesOut), + elapsedTimeMs(rhs.elapsedTimeMs), + ntpTimeMs(rhs.ntpTimeMs) {} /** * The sample count of the PCM data that you expect. @@ -1545,7 +1563,7 @@ enum H264PacketizeMode { /** * Single NAL unit mode. See RFC 6184. */ - SingleNalUnit, // Mode 0 - only single NALU allowed + SingleNalUnit, // Mode 0 - only single NALU allowed }; /** @@ -1588,64 +1606,63 @@ enum VIDEO_STREAM_TYPE { }; struct VideoSubscriptionOptions { - /** - * The type of the video stream to subscribe to. - * - * The default value is `VIDEO_STREAM_HIGH`, which means the high-quality - * video stream. - */ - Optional type; - /** - * Whether to subscribe to encoded video data only: - * - `true`: Subscribe to encoded video data only. - * - `false`: (Default) Subscribe to decoded video data. - */ - Optional encodedFrameOnly; + /** + * The type of the video stream to subscribe to. + * + * The default value is `VIDEO_STREAM_HIGH`, which means the high-quality + * video stream. + */ + Optional type; + /** + * Whether to subscribe to encoded video data only: + * - `true`: Subscribe to encoded video data only. + * - `false`: (Default) Subscribe to decoded video data. + */ + Optional encodedFrameOnly; - VideoSubscriptionOptions() {} + VideoSubscriptionOptions() {} }; - /** The maximum length of the user account. */ -enum MAX_USER_ACCOUNT_LENGTH_TYPE -{ +enum MAX_USER_ACCOUNT_LENGTH_TYPE { /** The maximum length of the user account is 256 bytes. */ MAX_USER_ACCOUNT_LENGTH = 256 }; /** - * The definition of the EncodedVideoFrameInfo struct, which contains the information of the external encoded video frame. + * The definition of the EncodedVideoFrameInfo struct, which contains the information of the + * external encoded video frame. */ struct EncodedVideoFrameInfo { EncodedVideoFrameInfo() - : uid(0), - codecType(VIDEO_CODEC_H264), - width(0), - height(0), - framesPerSecond(0), - frameType(VIDEO_FRAME_TYPE_BLANK_FRAME), - rotation(VIDEO_ORIENTATION_0), - trackId(0), - captureTimeMs(0), - decodeTimeMs(0), - streamType(VIDEO_STREAM_HIGH), - presentationMs(-1) {} + : uid(0), + codecType(VIDEO_CODEC_H264), + width(0), + height(0), + framesPerSecond(0), + frameType(VIDEO_FRAME_TYPE_BLANK_FRAME), + rotation(VIDEO_ORIENTATION_0), + trackId(0), + captureTimeMs(0), + decodeTimeMs(0), + streamType(VIDEO_STREAM_HIGH), + presentationMs(-1) {} EncodedVideoFrameInfo(const EncodedVideoFrameInfo& rhs) - : uid(rhs.uid), - codecType(rhs.codecType), - width(rhs.width), - height(rhs.height), - framesPerSecond(rhs.framesPerSecond), - frameType(rhs.frameType), - rotation(rhs.rotation), - trackId(rhs.trackId), - captureTimeMs(rhs.captureTimeMs), - decodeTimeMs(rhs.decodeTimeMs), - streamType(rhs.streamType), - presentationMs(rhs.presentationMs) {} + : uid(rhs.uid), + codecType(rhs.codecType), + width(rhs.width), + height(rhs.height), + framesPerSecond(rhs.framesPerSecond), + frameType(rhs.frameType), + rotation(rhs.rotation), + trackId(rhs.trackId), + captureTimeMs(rhs.captureTimeMs), + decodeTimeMs(rhs.decodeTimeMs), + streamType(rhs.streamType), + presentationMs(rhs.presentationMs) {} EncodedVideoFrameInfo& operator=(const EncodedVideoFrameInfo& rhs) { if (this == &rhs) return *this; @@ -1669,7 +1686,8 @@ struct EncodedVideoFrameInfo { */ uid_t uid; /** - * The codec type of the local video stream. See #VIDEO_CODEC_TYPE. The default value is `VIDEO_CODEC_H265 (3)`. + * The codec type of the local video stream. See #VIDEO_CODEC_TYPE. The default value is + * `VIDEO_CODEC_H265 (3)`. */ VIDEO_CODEC_TYPE codecType; /** @@ -1717,33 +1735,40 @@ struct EncodedVideoFrameInfo { }; /** -* Video compression preference. -*/ + * Video compression preference. + */ enum COMPRESSION_PREFERENCE { /** - * (Default) Low latency is preferred, usually used in real-time communication where low latency is the number one priority. + * (Default) SDK uses compression preference according to setVideoScenario API settings, real-time network state and other relevant data information. + * If API setVideoScenario set video scenario to APPLICATION_SCENARIO_LIVESHOW, then PREFER_QUALITY is used. If not, then PREFER_LOW_LATENCY is used. + * Also if network state has changed, SDK may change this parameter between PREFER_QUALITY and PREFER_LOW_LATENCY automatically to get the best QOE. + * We recommend using this option. */ - PREFER_LOW_LATENCY, + PREFER_COMPRESSION_AUTO = -1, /** - * Prefer quality in sacrifice of a degree of latency, usually around 30ms ~ 150ms, depends target fps + * Prefer low latency, usually used in real-time communication where low latency is the number one priority. */ - PREFER_QUALITY, + PREFER_LOW_LATENCY = 0, + /** + * Prefer quality in sacrifice of a degree of latency, usually around 30ms ~ 150ms, depends target fps + */ + PREFER_QUALITY = 1, }; /** -* The video encoder type preference. -*/ + * The video encoder type preference. + */ enum ENCODING_PREFERENCE { /** - *Default . + *Default . */ PREFER_AUTO = -1, /** - * Software encoding. - */ + * Software encoding. + */ PREFER_SOFTWARE = 0, /** - * Hardware encoding + * Hardware encoding */ PREFER_HARDWARE = 1, }; @@ -1752,15 +1777,14 @@ enum ENCODING_PREFERENCE { * The definition of the AdvanceOptions struct. */ struct AdvanceOptions { - /** * The video encoder type preference.. */ ENCODING_PREFERENCE encodingPreference; /** - * Video compression preference. - */ + * Video compression preference. + */ COMPRESSION_PREFERENCE compressionPreference; /** @@ -1770,7 +1794,7 @@ struct AdvanceOptions { bool encodeAlpha; AdvanceOptions() : encodingPreference(PREFER_AUTO), - compressionPreference(PREFER_LOW_LATENCY), + compressionPreference(PREFER_COMPRESSION_AUTO), encodeAlpha(false) {} AdvanceOptions(ENCODING_PREFERENCE encoding_preference, @@ -1785,7 +1809,6 @@ struct AdvanceOptions { compressionPreference == rhs.compressionPreference && encodeAlpha == rhs.encodeAlpha; } - }; /** @@ -1818,6 +1841,30 @@ enum CAMERA_FORMAT_TYPE { }; #endif +enum VIDEO_MODULE_TYPE { + /** Video capture module */ + VIDEO_MODULE_CAPTURER = 0, + /** Video software encoder module */ + VIDEO_MODULE_SOFTWARE_ENCODER = 1, + /** Video hardware encoder module */ + VIDEO_MODULE_HARDWARE_ENCODER = 2, + /** Video software decoder module */ + VIDEO_MODULE_SOFTWARE_DECODER = 3, + /** Video hardware decoder module */ + VIDEO_MODULE_HARDWARE_DECODER = 4, + /** Video render module */ + VIDEO_MODULE_RENDERER = 5, +}; + +enum HDR_CAPABILITY { + /** The result of static check is not reliable, by defualt*/ + HDR_CAPABILITY_UNKNOWN = -1, + /** The module you query doesn't support HDR */ + HDR_CAPABILITY_UNSUPPORTED = 0, + /** The module you query supports HDR */ + HDR_CAPABILITY_SUPPORTED = 1, +}; + /** Supported codec type bit mask. */ enum CODEC_CAP_MASK { /** 0: No codec support. */ @@ -1840,7 +1887,9 @@ struct CodecCapLevels { VIDEO_CODEC_CAPABILITY_LEVEL hwDecodingLevel; VIDEO_CODEC_CAPABILITY_LEVEL swDecodingLevel; - CodecCapLevels(): hwDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED), swDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED) {} + CodecCapLevels() + : hwDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED), + swDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED) {} }; /** The codec support information. */ @@ -1852,10 +1901,11 @@ struct CodecCapInfo { /** The codec capability level, estimated based on the device hardware.*/ CodecCapLevels codecLevels; - CodecCapInfo(): codecType(VIDEO_CODEC_NONE), codecCapMask(0) {} + CodecCapInfo() : codecType(VIDEO_CODEC_NONE), codecCapMask(0) {} }; -/** FocalLengthInfo contains the IDs of the front and rear cameras, along with the wide-angle types. */ +/** FocalLengthInfo contains the IDs of the front and rear cameras, along with the wide-angle types. + */ struct FocalLengthInfo { /** The camera direction. */ int cameraDirection; @@ -1882,21 +1932,22 @@ struct VideoEncoderConfiguration { /** * The bitrate (Kbps) of the video. * - * Refer to the **Video Bitrate Table** below and set your bitrate. If you set a bitrate beyond the - * proper range, the SDK automatically adjusts it to a value within the range. You can also choose - * from the following options: + * Refer to the **Video Bitrate Table** below and set your bitrate. If you set a bitrate beyond + * the proper range, the SDK automatically adjusts it to a value within the range. You can also + * choose from the following options: * - * - #STANDARD_BITRATE: (Recommended) Standard bitrate mode. In this mode, the bitrates differ between - * the Live Broadcast and Communication profiles: + * - #STANDARD_BITRATE: (Recommended) Standard bitrate mode. In this mode, the bitrates differ + * between the Live Broadcast and Communication profiles: * - In the Communication profile, the video bitrate is the same as the base bitrate. * - In the Live Broadcast profile, the video bitrate is twice the base bitrate. - * - #COMPATIBLE_BITRATE: Compatible bitrate mode. The compatible bitrate mode. In this mode, the bitrate - * stays the same regardless of the profile. If you choose this mode for the Live Broadcast profile, - * the video frame rate may be lower than the set value. + * - #COMPATIBLE_BITRATE: Compatible bitrate mode. The compatible bitrate mode. In this mode, the + * bitrate stays the same regardless of the profile. If you choose this mode for the Live + * Broadcast profile, the video frame rate may be lower than the set value. * - * Agora uses different video codecs for different profiles to optimize the user experience. For example, - * the communication profile prioritizes the smoothness while the live-broadcast profile prioritizes the - * video quality (a higher bitrate). Therefore, We recommend setting this parameter as #STANDARD_BITRATE. + * Agora uses different video codecs for different profiles to optimize the user experience. For + * example, the communication profile prioritizes the smoothness while the live-broadcast profile + * prioritizes the video quality (a higher bitrate). Therefore, We recommend setting this + * parameter as #STANDARD_BITRATE. * * | Resolution | Frame Rate (fps) | Base Bitrate (Kbps) | Live Bitrate (Kbps)| * |------------------------|------------------|---------------------|--------------------| @@ -1964,7 +2015,8 @@ struct VideoEncoderConfiguration { /** * The mirror mode is disabled by default - * If mirror_type is set to VIDEO_MIRROR_MODE_ENABLED, then the video frame would be mirrored before encoding. + * If mirror_type is set to VIDEO_MIRROR_MODE_ENABLED, then the video frame would be mirrored + * before encoding. */ VIDEO_MIRROR_MODE_TYPE mirrorMode; @@ -1980,9 +2032,9 @@ struct VideoEncoderConfiguration { bitrate(b), minBitrate(DEFAULT_MIN_BITRATE), orientationMode(m), - degradationPreference(MAINTAIN_QUALITY), + degradationPreference(MAINTAIN_AUTO), mirrorMode(mirror), - advanceOptions(PREFER_AUTO, PREFER_LOW_LATENCY, false) {} + advanceOptions(PREFER_AUTO, PREFER_COMPRESSION_AUTO, false) {} VideoEncoderConfiguration(int width, int height, int f, int b, ORIENTATION_MODE m, VIDEO_MIRROR_MODE_TYPE mirror = VIDEO_MIRROR_MODE_DISABLED) : codecType(VIDEO_CODEC_NONE), dimensions(width, height), @@ -1990,19 +2042,19 @@ struct VideoEncoderConfiguration { bitrate(b), minBitrate(DEFAULT_MIN_BITRATE), orientationMode(m), - degradationPreference(MAINTAIN_QUALITY), + degradationPreference(MAINTAIN_AUTO), mirrorMode(mirror), - advanceOptions(PREFER_AUTO, PREFER_LOW_LATENCY, false) {} + advanceOptions(PREFER_AUTO, PREFER_COMPRESSION_AUTO, false) {} VideoEncoderConfiguration(const VideoEncoderConfiguration& config) - : codecType(config.codecType), - dimensions(config.dimensions), - frameRate(config.frameRate), - bitrate(config.bitrate), - minBitrate(config.minBitrate), - orientationMode(config.orientationMode), - degradationPreference(config.degradationPreference), - mirrorMode(config.mirrorMode), - advanceOptions(config.advanceOptions) {} + : codecType(config.codecType), + dimensions(config.dimensions), + frameRate(config.frameRate), + bitrate(config.bitrate), + minBitrate(config.minBitrate), + orientationMode(config.orientationMode), + degradationPreference(config.degradationPreference), + mirrorMode(config.mirrorMode), + advanceOptions(config.advanceOptions) {} VideoEncoderConfiguration() : codecType(VIDEO_CODEC_NONE), dimensions(FRAME_WIDTH_960, FRAME_HEIGHT_540), @@ -2010,9 +2062,9 @@ struct VideoEncoderConfiguration { bitrate(STANDARD_BITRATE), minBitrate(DEFAULT_MIN_BITRATE), orientationMode(ORIENTATION_MODE_ADAPTIVE), - degradationPreference(MAINTAIN_QUALITY), + degradationPreference(MAINTAIN_AUTO), mirrorMode(VIDEO_MIRROR_MODE_DISABLED), - advanceOptions(PREFER_AUTO, PREFER_LOW_LATENCY, false) {} + advanceOptions(PREFER_AUTO, PREFER_COMPRESSION_AUTO, false) {} VideoEncoderConfiguration& operator=(const VideoEncoderConfiguration& rhs) { if (this == &rhs) return *this; @@ -2040,9 +2092,9 @@ struct DataStreamConfig { * * When you set the data packet to synchronize with the audio, then if the data packet delay is * within the audio delay, the SDK triggers the `onStreamMessage` callback when the synchronized - * audio packet is played out. Do not set this parameter as true if you need the receiver to receive - * the data packet immediately. Agora recommends that you set this parameter to `true` only when you - * need to implement specific functions, for example lyric synchronization. + * audio packet is played out. Do not set this parameter as true if you need the receiver to + * receive the data packet immediately. Agora recommends that you set this parameter to `true` + * only when you need to implement specific functions, for example lyric synchronization. */ bool syncWithAudio; /** @@ -2050,7 +2102,8 @@ struct DataStreamConfig { * - `true`: Guarantee that the receiver receives the data in the sent order. * - `false`: Do not guarantee that the receiver receives the data in the sent order. * - * Do not set this parameter as `true` if you need the receiver to receive the data packet immediately. + * Do not set this parameter as `true` if you need the receiver to receive the data packet + * immediately. */ bool ordered; }; @@ -2060,16 +2113,16 @@ struct DataStreamConfig { */ enum SIMULCAST_STREAM_MODE { /* - * disable simulcast stream until receive request for enable simulcast stream by other broadcaster - */ + * disable simulcast stream until receive request for enable simulcast stream by other broadcaster + */ AUTO_SIMULCAST_STREAM = -1, /* - * disable simulcast stream - */ + * disable simulcast stream + */ DISABLE_SIMULCAST_STREAM = 0, /* - * always enable simulcast stream - */ + * always enable simulcast stream + */ ENABLE_SIMULCAST_STREAM = 1, }; @@ -2082,7 +2135,8 @@ struct SimulcastStreamConfig { */ VideoDimensions dimensions; /** - * The video bitrate (Kbps), represented by an instantaneous value. The default value of the log level is 5. + * The video bitrate (Kbps), represented by an instantaneous value. The default value of the log + * level is 5. */ int kBitrate; /** @@ -2187,28 +2241,31 @@ struct Rectangle { /** * The position and size of the watermark on the screen. * - * The position and size of the watermark on the screen are determined by `xRatio`, `yRatio`, and `widthRatio`: - * - (`xRatio`, `yRatio`) refers to the coordinates of the upper left corner of the watermark, which determines - * the distance from the upper left corner of the watermark to the upper left corner of the screen. - * The `widthRatio` determines the width of the watermark. + * The position and size of the watermark on the screen are determined by `xRatio`, `yRatio`, and + * `widthRatio`: + * - (`xRatio`, `yRatio`) refers to the coordinates of the upper left corner of the watermark, which + * determines the distance from the upper left corner of the watermark to the upper left corner of + * the screen. The `widthRatio` determines the width of the watermark. */ struct WatermarkRatio { /** * The x-coordinate of the upper left corner of the watermark. The horizontal position relative to - * the origin, where the upper left corner of the screen is the origin, and the x-coordinate is the - * upper left corner of the watermark. The value range is [0.0,1.0], and the default value is 0. + * the origin, where the upper left corner of the screen is the origin, and the x-coordinate is + * the upper left corner of the watermark. The value range is [0.0,1.0], and the default value is + * 0. */ float xRatio; /** - * The y-coordinate of the upper left corner of the watermark. The vertical position relative to the - * origin, where the upper left corner of the screen is the origin, and the y-coordinate is the upper - * left corner of the screen. The value range is [0.0,1.0], and the default value is 0. + * The y-coordinate of the upper left corner of the watermark. The vertical position relative to + * the origin, where the upper left corner of the screen is the origin, and the y-coordinate is + * the upper left corner of the screen. The value range is [0.0,1.0], and the default value is 0. */ float yRatio; /** - * The width of the watermark. The SDK calculates the height of the watermark proportionally according - * to this parameter value to ensure that the enlarged or reduced watermark image is not distorted. - * The value range is [0,1], and the default value is 0, which means no watermark is displayed. + * The width of the watermark. The SDK calculates the height of the watermark proportionally + * according to this parameter value to ensure that the enlarged or reduced watermark image is not + * distorted. The value range is [0,1], and the default value is 0, which means no watermark is + * displayed. */ float widthRatio; @@ -2247,10 +2304,10 @@ struct WatermarkOptions { WATERMARK_FIT_MODE mode; WatermarkOptions() - : visibleInPreview(true), - positionInLandscapeMode(0, 0, 0, 0), - positionInPortraitMode(0, 0, 0, 0), - mode(FIT_MODE_COVER_POSITION) {} + : visibleInPreview(true), + positionInLandscapeMode(0, 0, 0, 0), + positionInPortraitMode(0, 0, 0, 0), + mode(FIT_MODE_COVER_POSITION) {} }; /** @@ -2321,7 +2378,8 @@ struct RtcStats { * The app CPU usage (%). * @note * - The value of `cpuAppUsage` is always reported as 0 in the `onLeaveChannel` callback. - * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system limitations. + * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system + * limitations. */ double cpuAppUsage; /** @@ -2331,13 +2389,15 @@ struct RtcStats { * value = (100 - System Idle Progress in Task Manager)/100. * @note * - The value of `cpuTotalUsage` is always reported as 0 in the `onLeaveChannel` callback. - * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system limitations. + * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system + * limitations. */ double cpuTotalUsage; /** * The round-trip time delay from the client to the local router. - * @note On Android, to get `gatewayRtt`, ensure that you add the `android.permission.ACCESS_WIFI_STATE` - * permission after `` in the `AndroidManifest.xml` file in your project. + * @note On Android, to get `gatewayRtt`, ensure that you add the + * `android.permission.ACCESS_WIFI_STATE` permission after `` in the + * `AndroidManifest.xml` file in your project. */ int gatewayRtt; /** @@ -2414,39 +2474,39 @@ struct RtcStats { */ int rxPacketLossRate; RtcStats() - : duration(0), - txBytes(0), - rxBytes(0), - txAudioBytes(0), - txVideoBytes(0), - rxAudioBytes(0), - rxVideoBytes(0), - txKBitRate(0), - rxKBitRate(0), - rxAudioKBitRate(0), - txAudioKBitRate(0), - rxVideoKBitRate(0), - txVideoKBitRate(0), - lastmileDelay(0), - userCount(0), - cpuAppUsage(0.0), - cpuTotalUsage(0.0), - gatewayRtt(0), - memoryAppUsageRatio(0.0), - memoryTotalUsageRatio(0.0), - memoryAppUsageInKbytes(0), - connectTimeMs(0), - firstAudioPacketDuration(0), - firstVideoPacketDuration(0), - firstVideoKeyFramePacketDuration(0), - packetsBeforeFirstKeyFramePacket(0), - firstAudioPacketDurationAfterUnmute(0), - firstVideoPacketDurationAfterUnmute(0), - firstVideoKeyFramePacketDurationAfterUnmute(0), - firstVideoKeyFrameDecodedDurationAfterUnmute(0), - firstVideoKeyFrameRenderedDurationAfterUnmute(0), - txPacketLossRate(0), - rxPacketLossRate(0) {} + : duration(0), + txBytes(0), + rxBytes(0), + txAudioBytes(0), + txVideoBytes(0), + rxAudioBytes(0), + rxVideoBytes(0), + txKBitRate(0), + rxKBitRate(0), + rxAudioKBitRate(0), + txAudioKBitRate(0), + rxVideoKBitRate(0), + txVideoKBitRate(0), + lastmileDelay(0), + userCount(0), + cpuAppUsage(0.0), + cpuTotalUsage(0.0), + gatewayRtt(0), + memoryAppUsageRatio(0.0), + memoryTotalUsageRatio(0.0), + memoryAppUsageInKbytes(0), + connectTimeMs(0), + firstAudioPacketDuration(0), + firstVideoPacketDuration(0), + firstVideoKeyFramePacketDuration(0), + packetsBeforeFirstKeyFramePacket(0), + firstAudioPacketDurationAfterUnmute(0), + firstVideoPacketDurationAfterUnmute(0), + firstVideoKeyFramePacketDurationAfterUnmute(0), + firstVideoKeyFrameDecodedDurationAfterUnmute(0), + firstVideoKeyFrameRenderedDurationAfterUnmute(0), + txPacketLossRate(0), + rxPacketLossRate(0) {} }; /** @@ -2464,7 +2524,8 @@ enum CLIENT_ROLE_TYPE { }; /** - * Quality change of the local video in terms of target frame rate and target bit rate since last count. + * Quality change of the local video in terms of target frame rate and target bit rate since last + * count. */ enum QUALITY_ADAPT_INDICATION { /** @@ -2482,11 +2543,10 @@ enum QUALITY_ADAPT_INDICATION { }; /** - * The latency level of an audience member in interactive live streaming. This enum takes effect only - * when the user role is set to `CLIENT_ROLE_AUDIENCE`. + * The latency level of an audience member in interactive live streaming. This enum takes effect + * only when the user role is set to `CLIENT_ROLE_AUDIENCE`. */ -enum AUDIENCE_LATENCY_LEVEL_TYPE -{ +enum AUDIENCE_LATENCY_LEVEL_TYPE { /** * 1: Low latency. */ @@ -2500,15 +2560,14 @@ enum AUDIENCE_LATENCY_LEVEL_TYPE /** * The detailed options of a user. */ -struct ClientRoleOptions -{ +struct ClientRoleOptions { /** - * The latency level of an audience member in interactive live streaming. See `AUDIENCE_LATENCY_LEVEL_TYPE`. + * The latency level of an audience member in interactive live streaming. See + * `AUDIENCE_LATENCY_LEVEL_TYPE`. */ AUDIENCE_LATENCY_LEVEL_TYPE audienceLatencyLevel; - ClientRoleOptions() - : audienceLatencyLevel(AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY) {} + ClientRoleOptions() : audienceLatencyLevel(AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY) {} }; /** @@ -2542,8 +2601,8 @@ enum EXPERIENCE_POOR_REASON { */ WIRELESS_SIGNAL_POOR = 4, /** - * 8: The local user enables both Wi-Fi and bluetooth, and their signals interfere with each other. - * As a result, audio transmission quality is undermined. + * 8: The local user enables both Wi-Fi and bluetooth, and their signals interfere with each + * other. As a result, audio transmission quality is undermined. */ WIFI_BLUETOOTH_COEXIST = 8, }; @@ -2552,18 +2611,18 @@ enum EXPERIENCE_POOR_REASON { * Audio AINS mode */ enum AUDIO_AINS_MODE { - /** - * AINS mode with soft suppression level. - */ - AINS_MODE_BALANCED = 0, - /** - * AINS mode with high suppression level. - */ - AINS_MODE_AGGRESSIVE = 1, - /** - * AINS mode with high suppression level and ultra-low-latency - */ - AINS_MODE_ULTRALOWLATENCY = 2 + /** + * AINS mode with soft suppression level. + */ + AINS_MODE_BALANCED = 0, + /** + * AINS mode with high suppression level. + */ + AINS_MODE_AGGRESSIVE = 1, + /** + * AINS mode with high suppression level and ultra-low-latency + */ + AINS_MODE_ULTRALOWLATENCY = 2 }; /** @@ -2574,9 +2633,10 @@ enum AUDIO_PROFILE_TYPE { * 0: The default audio profile. * - For the Communication profile: * - Windows: A sample rate of 16 kHz, audio encoding, mono, and a bitrate of up to 16 Kbps. - * - Android/macOS/iOS: A sample rate of 32 kHz, audio encoding, mono, and a bitrate of up to 18 Kbps. - * of up to 16 Kbps. - * - For the Live-broadcast profile: A sample rate of 48 kHz, music encoding, mono, and a bitrate of up to 64 Kbps. + * - Android/macOS/iOS: A sample rate of 32 kHz, audio encoding, mono, and a bitrate of up to 18 + * Kbps. of up to 16 Kbps. + * - For the Live-broadcast profile: A sample rate of 48 kHz, music encoding, mono, and a bitrate + * of up to 64 Kbps. */ AUDIO_PROFILE_DEFAULT = 0, /** @@ -2590,8 +2650,8 @@ enum AUDIO_PROFILE_TYPE { /** * 3: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 80 Kbps. * - * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels` - * to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. + * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set + * `audioProcessingChannels` to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. */ AUDIO_PROFILE_MUSIC_STANDARD_STEREO = 3, /** @@ -2601,8 +2661,8 @@ enum AUDIO_PROFILE_TYPE { /** * 5: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 128 Kbps. * - * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels` - * to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. + * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set + * `audioProcessingChannels` to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. */ AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO = 5, /** @@ -2634,7 +2694,8 @@ enum AUDIO_SCENARIO_TYPE { */ AUDIO_SCENARIO_CHATROOM = 5, /** - * 7: Real-time chorus scenario, where users have good network conditions and require ultra-low latency. + * 7: Real-time chorus scenario, where users have good network conditions and require ultra-low + * latency. */ AUDIO_SCENARIO_CHORUS = 7, /** @@ -2651,19 +2712,19 @@ enum AUDIO_SCENARIO_TYPE { * The format of the video frame. */ struct VideoFormat { - OPTIONAL_ENUM_SIZE_T { - /** The maximum value (px) of the width. */ - kMaxWidthInPixels = 3840, - /** The maximum value (px) of the height. */ - kMaxHeightInPixels = 2160, - /** The maximum value (fps) of the frame rate. */ - kMaxFps = 60, + OPTIONAL_ENUM_SIZE_T{ + /** The maximum value (px) of the width. */ + kMaxWidthInPixels = 3840, + /** The maximum value (px) of the height. */ + kMaxHeightInPixels = 2160, + /** The maximum value (fps) of the frame rate. */ + kMaxFps = 60, }; /** * The width (px) of the video. */ - int width; // Number of pixels. + int width; // Number of pixels. /** * The height (px) of the video. */ @@ -2687,9 +2748,7 @@ struct VideoFormat { bool operator==(const VideoFormat& fmt) const { return width == fmt.width && height == fmt.height && fps == fmt.fps; } - bool operator!=(const VideoFormat& fmt) const { - return !operator==(fmt); - } + bool operator!=(const VideoFormat& fmt) const { return !operator==(fmt); } }; /** @@ -2742,7 +2801,6 @@ enum SCREEN_SCENARIO_TYPE { SCREEN_SCENARIO_RDC = 4, }; - /** * The video application scenario type. */ @@ -2759,6 +2817,10 @@ enum VIDEO_APPLICATION_SCENARIO_TYPE { * 2: Video Call Scenario. This scenario is used to optimize the video experience in video application, like 1v1 video call. */ APPLICATION_SCENARIO_1V1 = 2, + /** + * 3: Live Show Scenario. This scenario is used to optimize the video experience in video live show. + */ + APPLICATION_SCENARIO_LIVESHOW = 3, }; /** @@ -2789,7 +2851,8 @@ enum VIDEO_QOE_PREFERENCE_TYPE { */ enum CAPTURE_BRIGHTNESS_LEVEL_TYPE { /** -1: The SDK does not detect the brightness level of the video image. - * Wait a few seconds to get the brightness level from `CAPTURE_BRIGHTNESS_LEVEL_TYPE` in the next callback. + * Wait a few seconds to get the brightness level from `CAPTURE_BRIGHTNESS_LEVEL_TYPE` in the next + * callback. */ CAPTURE_BRIGHTNESS_LEVEL_INVALID = -1, /** 0: The brightness level of the video image is normal. @@ -2804,20 +2867,20 @@ enum CAPTURE_BRIGHTNESS_LEVEL_TYPE { }; enum CAMERA_STABILIZATION_MODE { - /** The camera stabilization mode is disabled. - */ + /** The camera stabilization mode is disabled. + */ CAMERA_STABILIZATION_MODE_OFF = -1, - /** device choose stabilization mode automatically. - */ + /** device choose stabilization mode automatically. + */ CAMERA_STABILIZATION_MODE_AUTO = 0, - /** stabilization mode level 1. - */ + /** stabilization mode level 1. + */ CAMERA_STABILIZATION_MODE_LEVEL_1 = 1, - /** stabilization mode level 2. - */ + /** stabilization mode level 2. + */ CAMERA_STABILIZATION_MODE_LEVEL_2 = 2, - /** stabilization mode level 3. - */ + /** stabilization mode level 3. + */ CAMERA_STABILIZATION_MODE_LEVEL_3 = 3, /** The maximum level of the camera stabilization mode. */ @@ -2855,7 +2918,8 @@ enum LOCAL_AUDIO_STREAM_REASON { */ LOCAL_AUDIO_STREAM_REASON_OK = 0, /** - * 1: No specified reason for the local audio failure. Remind your users to try to rejoin the channel. + * 1: No specified reason for the local audio failure. Remind your users to try to rejoin the + * channel. */ LOCAL_AUDIO_STREAM_REASON_FAILURE = 1, /** @@ -2968,7 +3032,7 @@ enum LOCAL_VIDEO_STREAM_REASON { */ LOCAL_VIDEO_STREAM_REASON_DEVICE_NOT_FOUND = 8, /** - * 9: (macOS only) The video capture device currently in use is disconnected (such as being + * 9: (macOS and Windows only) The video capture device currently in use is disconnected (such as being * unplugged). */ LOCAL_VIDEO_STREAM_REASON_DEVICE_DISCONNECTED = 9, @@ -2983,8 +3047,8 @@ enum LOCAL_VIDEO_STREAM_REASON { */ LOCAL_VIDEO_STREAM_REASON_DEVICE_INTERRUPT = 14, /** - * 15: (Android only) The device may need to be shut down and restarted to restore camera function, - * or there may be a persistent hardware problem. + * 15: (Android only) The device may need to be shut down and restarted to restore camera + * function, or there may be a persistent hardware problem. */ LOCAL_VIDEO_STREAM_REASON_DEVICE_FATAL_ERROR = 15, /** @@ -3021,20 +3085,21 @@ enum LOCAL_VIDEO_STREAM_REASON { /** 22: No permision to capture screen. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_NO_PERMISSION = 22, /** - * 24: (Windows Only) An unexpected error (possibly due to window block failure) occurs during the screen - * sharing process, resulting in performance degradation. However, the screen sharing process itself is - * functioning normally. + * 24: (Windows Only) An unexpected error (possibly due to window block failure) occurs during the + * screen sharing process, resulting in performance degradation. However, the screen sharing + * process itself is functioning normally. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_AUTO_FALLBACK = 24, - /** 25: (Windows only) The local screen capture window is currently hidden and not visible on the desktop. */ + /** 25: (Windows only) The local screen capture window is currently hidden and not visible on the + desktop. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_HIDDEN = 25, /** 26: (Windows only) The local screen capture window is recovered from its hidden state. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_HIDDEN = 26, /** 27: (Windows and macOS only) The window is recovered from miniminzed */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_MINIMIZED = 27, - /** + /** * 28: The screen capture paused. - * + * * Common scenarios for reporting this error code: * - When the desktop switch to the secure desktop such as UAC dialog or the Winlogon desktop on * Windows platform, the SDK reports this error code. @@ -3050,41 +3115,41 @@ enum LOCAL_VIDEO_STREAM_REASON { /** * Remote audio states. */ -enum REMOTE_AUDIO_STATE -{ +enum REMOTE_AUDIO_STATE { /** * 0: The remote audio is in the default state. The SDK reports this state in the case of * `REMOTE_AUDIO_REASON_LOCAL_MUTED(3)`, `REMOTE_AUDIO_REASON_REMOTE_MUTED(5)`, or * `REMOTE_AUDIO_REASON_REMOTE_OFFLINE(7)`. */ - REMOTE_AUDIO_STATE_STOPPED = 0, // Default state, audio is started or remote user disabled/muted audio stream + REMOTE_AUDIO_STATE_STOPPED = + 0, // Default state, audio is started or remote user disabled/muted audio stream /** * 1: The first remote audio packet is received. */ REMOTE_AUDIO_STATE_STARTING = 1, // The first audio frame packet has been received /** - * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the case of - * `REMOTE_AUDIO_REASON_NETWORK_RECOVERY(2)`, `REMOTE_AUDIO_REASON_LOCAL_UNMUTED(4)`, or + * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the + * case of `REMOTE_AUDIO_REASON_NETWORK_RECOVERY(2)`, `REMOTE_AUDIO_REASON_LOCAL_UNMUTED(4)`, or * `REMOTE_AUDIO_REASON_REMOTE_UNMUTED(6)`. */ - REMOTE_AUDIO_STATE_DECODING = 2, // The first remote audio frame has been decoded or fronzen state ends + REMOTE_AUDIO_STATE_DECODING = + 2, // The first remote audio frame has been decoded or fronzen state ends /** * 3: The remote audio is frozen. The SDK reports this state in the case of * `REMOTE_AUDIO_REASON_NETWORK_CONGESTION(1)`. */ - REMOTE_AUDIO_STATE_FROZEN = 3, // Remote audio is frozen, probably due to network issue + REMOTE_AUDIO_STATE_FROZEN = 3, // Remote audio is frozen, probably due to network issue /** * 4: The remote audio fails to start. The SDK reports this state in the case of * `REMOTE_AUDIO_REASON_INTERNAL(0)`. */ - REMOTE_AUDIO_STATE_FAILED = 4, // Remote audio play failed + REMOTE_AUDIO_STATE_FAILED = 4, // Remote audio play failed }; /** * Reasons for the remote audio state change. */ -enum REMOTE_AUDIO_STATE_REASON -{ +enum REMOTE_AUDIO_STATE_REASON { /** * 0: The SDK reports this reason when the video state changes. */ @@ -3138,7 +3203,8 @@ enum REMOTE_VIDEO_STATE { /** * 0: The remote video is in the default state. The SDK reports this state in the case of * `REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED (3)`, `REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED (5)`, - * `REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE (7)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK (8)`. + * `REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE (7)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK + * (8)`. */ REMOTE_VIDEO_STATE_STOPPED = 0, /** @@ -3146,9 +3212,10 @@ enum REMOTE_VIDEO_STATE { */ REMOTE_VIDEO_STATE_STARTING = 1, /** - * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the case of - * `REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY (2)`, `REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED (4)`, - * `REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED (6)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY (9)`. + * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the + * case of `REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY (2)`, + * `REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED (4)`, `REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED (6)`, + * or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY (9)`. */ REMOTE_VIDEO_STATE_DECODING = 2, /** 3: The remote video is frozen, probably due to @@ -3165,36 +3232,36 @@ enum REMOTE_VIDEO_STATE { */ enum REMOTE_VIDEO_STATE_REASON { /** - * 0: The SDK reports this reason when the video state changes. - */ + * 0: The SDK reports this reason when the video state changes. + */ REMOTE_VIDEO_STATE_REASON_INTERNAL = 0, /** - * 1: Network congestion. - */ + * 1: Network congestion. + */ REMOTE_VIDEO_STATE_REASON_NETWORK_CONGESTION = 1, /** - * 2: Network recovery. - */ + * 2: Network recovery. + */ REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY = 2, /** - * 3: The local user stops receiving the remote video stream or disables the video module. - */ + * 3: The local user stops receiving the remote video stream or disables the video module. + */ REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED = 3, /** - * 4: The local user resumes receiving the remote video stream or enables the video module. - */ + * 4: The local user resumes receiving the remote video stream or enables the video module. + */ REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED = 4, /** - * 5: The remote user stops sending the video stream or disables the video module. - */ + * 5: The remote user stops sending the video stream or disables the video module. + */ REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED = 5, /** - * 6: The remote user resumes sending the video stream or enables the video module. - */ + * 6: The remote user resumes sending the video stream or enables the video module. + */ REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED = 6, /** - * 7: The remote user leaves the channel. - */ + * 7: The remote user leaves the channel. + */ REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE = 7, /** 8: The remote audio-and-video stream falls back to the audio-only stream * due to poor network conditions. @@ -3210,7 +3277,7 @@ enum REMOTE_VIDEO_STATE_REASON { /** (Internal use only) 11: The remote video stream type change to high stream type */ REMOTE_VIDEO_STATE_REASON_VIDEO_STREAM_TYPE_CHANGE_TO_HIGH = 11, - /** (iOS only) 12: The app of the remote user is in background. + /** (iOS only) 12: The app of the remote user is in background. */ REMOTE_VIDEO_STATE_REASON_SDK_IN_BACKGROUND = 12, @@ -3248,10 +3315,14 @@ enum REMOTE_USER_STATE { */ struct VideoTrackInfo { VideoTrackInfo() - : isLocal(false), ownerUid(0), trackId(0), channelId(OPTIONAL_NULLPTR) - , codecType(VIDEO_CODEC_H265) - , encodedFrameOnly(false), sourceType(VIDEO_SOURCE_CAMERA_PRIMARY) - , observationPosition(agora::media::base::POSITION_POST_CAPTURER) {} + : isLocal(false), + ownerUid(0), + trackId(0), + channelId(OPTIONAL_NULLPTR), + codecType(VIDEO_CODEC_H265), + encodedFrameOnly(false), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + observationPosition(agora::media::base::POSITION_POST_CAPTURER) {} /** * Whether the video track is local or remote. * - true: The video track is local. @@ -3291,7 +3362,8 @@ struct VideoTrackInfo { }; /** - * The downscale level of the remote video stream . The higher the downscale level, the more the video downscales. + * The downscale level of the remote video stream . The higher the downscale level, the more the + * video downscales. */ enum REMOTE_VIDEO_DOWNSCALE_LEVEL { /** @@ -3340,7 +3412,8 @@ struct AudioVolumeInfo { * @note * - The `vad` parameter does not report the voice activity status of remote users. In a remote * user's callback, the value of `vad` is always 1. - * - To use this parameter, you must set `reportVad` to true when calling `enableAudioVolumeIndication`. + * - To use this parameter, you must set `reportVad` to true when calling + * `enableAudioVolumeIndication`. */ unsigned int vad; /** @@ -3464,7 +3537,8 @@ enum VIDEO_CODEC_PROFILE_TYPE { */ VIDEO_CODEC_PROFILE_BASELINE = 66, /** - * 77: Main video codec profile. Generally used in mainstream electronics, such as MP4 players, portable video players, PSP, and iPads. + * 77: Main video codec profile. Generally used in mainstream electronics, such as MP4 players, + * portable video players, PSP, and iPads. */ VIDEO_CODEC_PROFILE_MAIN = 77, /** @@ -3473,7 +3547,6 @@ enum VIDEO_CODEC_PROFILE_TYPE { VIDEO_CODEC_PROFILE_HIGH = 100, }; - /** * Self-defined audio codec profile. */ @@ -3495,8 +3568,7 @@ enum AUDIO_CODEC_PROFILE_TYPE { /** * Local audio statistics. */ -struct LocalAudioStats -{ +struct LocalAudioStats { /** * The number of audio channels. */ @@ -3514,7 +3586,8 @@ struct LocalAudioStats */ int internalCodec; /** - * The packet loss rate (%) from the local client to the Agora server before applying the anti-packet loss strategies. + * The packet loss rate (%) from the local client to the Agora server before applying the + * anti-packet loss strategies. */ unsigned short txPacketLossRate; /** @@ -3535,35 +3608,45 @@ struct LocalAudioStats int aecEstimatedDelay; }; - /** * States of the Media Push. */ enum RTMP_STREAM_PUBLISH_STATE { /** - * 0: The Media Push has not started or has ended. This state is also triggered after you remove a RTMP or RTMPS stream from the CDN by calling `removePublishStreamUrl`. + * 0: The Media Push has not started or has ended. This state is also triggered after you remove a + * RTMP or RTMPS stream from the CDN by calling `removePublishStreamUrl`. */ RTMP_STREAM_PUBLISH_STATE_IDLE = 0, /** - * 1: The SDK is connecting to Agora's streaming server and the CDN server. This state is triggered after you call the `addPublishStreamUrl` method. + * 1: The SDK is connecting to Agora's streaming server and the CDN server. This state is + * triggered after you call the `addPublishStreamUrl` method. */ RTMP_STREAM_PUBLISH_STATE_CONNECTING = 1, /** - * 2: The RTMP or RTMPS streaming publishes. The SDK successfully publishes the RTMP or RTMPS streaming and returns this state. + * 2: The RTMP or RTMPS streaming publishes. The SDK successfully publishes the RTMP or RTMPS + * streaming and returns this state. */ RTMP_STREAM_PUBLISH_STATE_RUNNING = 2, /** - * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the streaming is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this state. - * - If the SDK successfully resumes the streaming, #RTMP_STREAM_PUBLISH_STATE_RUNNING (2) returns. - * - If the streaming does not resume within 60 seconds or server errors occur, #RTMP_STREAM_PUBLISH_STATE_FAILURE (4) returns. You can also reconnect to the server by calling the `removePublishStreamUrl` and `addPublishStreamUrl` methods. + * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the + * streaming is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this + * state. + * - If the SDK successfully resumes the streaming, #RTMP_STREAM_PUBLISH_STATE_RUNNING (2) + * returns. + * - If the streaming does not resume within 60 seconds or server errors occur, + * #RTMP_STREAM_PUBLISH_STATE_FAILURE (4) returns. You can also reconnect to the server by calling + * the `removePublishStreamUrl` and `addPublishStreamUrl` methods. */ RTMP_STREAM_PUBLISH_STATE_RECOVERING = 3, /** - * 4: The RTMP or RTMPS streaming fails. See the `errCode` parameter for the detailed error information. You can also call the `addPublishStreamUrl` method to publish the RTMP or RTMPS streaming again. + * 4: The RTMP or RTMPS streaming fails. See the `errCode` parameter for the detailed error + * information. You can also call the `addPublishStreamUrl` method to publish the RTMP or RTMPS + * streaming again. */ RTMP_STREAM_PUBLISH_STATE_FAILURE = 4, /** - * 5: The SDK is disconnecting to Agora's streaming server and the CDN server. This state is triggered after you call the `removePublishStreamUrl` method. + * 5: The SDK is disconnecting to Agora's streaming server and the CDN server. This state is + * triggered after you call the `removePublishStreamUrl` method. */ RTMP_STREAM_PUBLISH_STATE_DISCONNECTING = 5, }; @@ -3577,8 +3660,10 @@ enum RTMP_STREAM_PUBLISH_REASON { */ RTMP_STREAM_PUBLISH_REASON_OK = 0, /** - * 1: Invalid argument used. If, for example, you do not call the `setLiveTranscoding` method to configure the LiveTranscoding parameters before calling the addPublishStreamUrl method, - * the SDK returns this error. Check whether you set the parameters in the `setLiveTranscoding` method properly. + * 1: Invalid argument used. If, for example, you do not call the `setLiveTranscoding` method to + * configure the LiveTranscoding parameters before calling the addPublishStreamUrl method, the SDK + * returns this error. Check whether you set the parameters in the `setLiveTranscoding` method + * properly. */ RTMP_STREAM_PUBLISH_REASON_INVALID_ARGUMENT = 1, /** @@ -3586,11 +3671,13 @@ enum RTMP_STREAM_PUBLISH_REASON { */ RTMP_STREAM_PUBLISH_REASON_ENCRYPTED_STREAM_NOT_ALLOWED = 2, /** - * 3: Timeout for the RTMP or RTMPS streaming. Call the `addPublishStreamUrl` method to publish the streaming again. + * 3: Timeout for the RTMP or RTMPS streaming. Call the `addPublishStreamUrl` method to publish + * the streaming again. */ RTMP_STREAM_PUBLISH_REASON_CONNECTION_TIMEOUT = 3, /** - * 4: An error occurs in Agora's streaming server. Call the `addPublishStreamUrl` method to publish the streaming again. + * 4: An error occurs in Agora's streaming server. Call the `addPublishStreamUrl` method to + * publish the streaming again. */ RTMP_STREAM_PUBLISH_REASON_INTERNAL_SERVER_ERROR = 4, /** @@ -3614,17 +3701,23 @@ enum RTMP_STREAM_PUBLISH_REASON { */ RTMP_STREAM_PUBLISH_REASON_STREAM_NOT_FOUND = 9, /** - * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL format is correct. + * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL + * format is correct. */ RTMP_STREAM_PUBLISH_REASON_FORMAT_NOT_SUPPORTED = 10, /** - * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check your application code logic. + * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check + * your application code logic. */ - RTMP_STREAM_PUBLISH_REASON_NOT_BROADCASTER = 11, // Note: match to ERR_PUBLISH_STREAM_NOT_BROADCASTER in AgoraBase.h + RTMP_STREAM_PUBLISH_REASON_NOT_BROADCASTER = + 11, // Note: match to ERR_PUBLISH_STREAM_NOT_BROADCASTER in AgoraBase.h /** - * 13: The `updateRtmpTranscoding` or `setLiveTranscoding` method is called to update the transcoding configuration in a scenario where there is streaming without transcoding. Check your application code logic. + * 13: The `updateRtmpTranscoding` or `setLiveTranscoding` method is called to update the + * transcoding configuration in a scenario where there is streaming without transcoding. Check + * your application code logic. */ - RTMP_STREAM_PUBLISH_REASON_TRANSCODING_NO_MIX_STREAM = 13, // Note: match to ERR_PUBLISH_STREAM_TRANSCODING_NO_MIX_STREAM in AgoraBase.h + RTMP_STREAM_PUBLISH_REASON_TRANSCODING_NO_MIX_STREAM = + 13, // Note: match to ERR_PUBLISH_STREAM_TRANSCODING_NO_MIX_STREAM in AgoraBase.h /** * 14: Errors occurred in the host's network. */ @@ -3632,11 +3725,13 @@ enum RTMP_STREAM_PUBLISH_REASON { /** * 15: Your App ID does not have permission to use the CDN live streaming function. */ - RTMP_STREAM_PUBLISH_REASON_INVALID_APPID = 15, // Note: match to ERR_PUBLISH_STREAM_APPID_INVALID in AgoraBase.h + RTMP_STREAM_PUBLISH_REASON_INVALID_APPID = + 15, // Note: match to ERR_PUBLISH_STREAM_APPID_INVALID in AgoraBase.h /** invalid privilege. */ RTMP_STREAM_PUBLISH_REASON_INVALID_PRIVILEGE = 16, /** - * 100: The streaming has been stopped normally. After you call `removePublishStreamUrl` to stop streaming, the SDK returns this value. + * 100: The streaming has been stopped normally. After you call `removePublishStreamUrl` to stop + * streaming, the SDK returns this value. */ RTMP_STREAM_UNPUBLISH_REASON_OK = 100, }; @@ -3644,11 +3739,13 @@ enum RTMP_STREAM_PUBLISH_REASON { /** Events during the RTMP or RTMPS streaming. */ enum RTMP_STREAMING_EVENT { /** - * 1: An error occurs when you add a background image or a watermark image to the RTMP or RTMPS stream. + * 1: An error occurs when you add a background image or a watermark image to the RTMP or RTMPS + * stream. */ RTMP_STREAMING_EVENT_FAILED_LOAD_IMAGE = 1, /** - * 2: The streaming URL is already being used for CDN live streaming. If you want to start new streaming, use a new streaming URL. + * 2: The streaming URL is already being used for CDN live streaming. If you want to start new + * streaming, use a new streaming URL. */ RTMP_STREAMING_EVENT_URL_ALREADY_IN_USE = 2, /** @@ -3666,15 +3763,18 @@ enum RTMP_STREAMING_EVENT { */ typedef struct RtcImage { /** - *The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter is 1024 bytes. + *The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter + *is 1024 bytes. */ const char* url; /** - * The x coordinate (pixel) of the image on the video frame (taking the upper left corner of the video frame as the origin). + * The x coordinate (pixel) of the image on the video frame (taking the upper left corner of the + * video frame as the origin). */ int x; /** - * The y coordinate (pixel) of the image on the video frame (taking the upper left corner of the video frame as the origin). + * The y coordinate (pixel) of the image on the video frame (taking the upper left corner of the + * video frame as the origin). */ int y; /** @@ -3705,18 +3805,21 @@ typedef struct RtcImage { /** * The configuration for advanced features of the RTMP or RTMPS streaming with transcoding. * - * If you want to enable the advanced features of streaming with transcoding, contact support@agora.io. + * If you want to enable the advanced features of streaming with transcoding, contact + * support@agora.io. */ struct LiveStreamAdvancedFeature { LiveStreamAdvancedFeature() : featureName(OPTIONAL_NULLPTR), opened(false) {} - LiveStreamAdvancedFeature(const char* feat_name, bool open) : featureName(feat_name), opened(open) {} + LiveStreamAdvancedFeature(const char* feat_name, bool open) + : featureName(feat_name), opened(open) {} /** The advanced feature for high-quality video with a lower bitrate. */ // static const char* LBHQ = "lbhq"; /** The advanced feature for the optimized video encoder. */ // static const char* VEO = "veo"; /** - * The feature names, including LBHQ (high-quality video with a lower bitrate) and VEO (optimized video encoder). + * The feature names, including LBHQ (high-quality video with a lower bitrate) and VEO (optimized + * video encoder). */ const char* featureName; @@ -3726,15 +3829,15 @@ struct LiveStreamAdvancedFeature { * - `false`: (Default) Disable the advanced feature. */ bool opened; -} ; +}; /** * Connection state types. */ -enum CONNECTION_STATE_TYPE -{ +enum CONNECTION_STATE_TYPE { /** - * 1: The SDK is disconnected from the Agora edge server. The state indicates the SDK is in one of the following phases: + * 1: The SDK is disconnected from the Agora edge server. The state indicates the SDK is in one of + * the following phases: * - The initial state before calling the `joinChannel` method. * - The app calls the `leaveChannel` method. */ @@ -3786,11 +3889,15 @@ struct TranscodingUser { */ uid_t uid; /** - * The x coordinate (pixel) of the host's video on the output video frame (taking the upper left corner of the video frame as the origin). The value range is [0, width], where width is the `width` set in `LiveTranscoding`. + * The x coordinate (pixel) of the host's video on the output video frame (taking the upper left + * corner of the video frame as the origin). The value range is [0, width], where width is the + * `width` set in `LiveTranscoding`. */ int x; /** - * The y coordinate (pixel) of the host's video on the output video frame (taking the upper left corner of the video frame as the origin). The value range is [0, height], where height is the `height` set in `LiveTranscoding`. + * The y coordinate (pixel) of the host's video on the output video frame (taking the upper left + * corner of the video frame as the origin). The value range is [0, height], where height is the + * `height` set in `LiveTranscoding`. */ int y; /** @@ -3807,7 +3914,7 @@ struct TranscodingUser { * - 100: The host's video is the top layer. * * If the value is beyond this range, the SDK reports the error code `ERR_INVALID_ARGUMENT`. - */ + */ int zOrder; /** * The transparency of the host's video. The value range is [0.0, 1.0]. @@ -3816,28 +3923,29 @@ struct TranscodingUser { */ double alpha; /** - * The audio channel used by the host's audio in the output audio. The default value is 0, and the value range is [0, 5]. - * - `0`: (Recommended) The defaut setting, which supports dual channels at most and depends on the upstream of the host. - * - `1`: The host's audio uses the FL audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `2`: The host's audio uses the FC audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `3`: The host's audio uses the FR audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `4`: The host's audio uses the BL audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `5`: The host's audio uses the BR audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `0xFF` or a value greater than 5: The host's audio is muted, and the Agora server removes the host's audio. + * The audio channel used by the host's audio in the output audio. The default value is 0, and the + * value range is [0, 5]. + * - `0`: (Recommended) The defaut setting, which supports dual channels at most and depends on + * the upstream of the host. + * - `1`: The host's audio uses the FL audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `2`: The host's audio uses the FC audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `3`: The host's audio uses the FR audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `4`: The host's audio uses the BL audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `5`: The host's audio uses the BR audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `0xFF` or a value greater than 5: The host's audio is muted, and the Agora server removes the + * host's audio. * * @note If the value is not `0`, a special player is required. */ int audioChannel; TranscodingUser() - : uid(0), - x(0), - y(0), - width(0), - height(0), - zOrder(0), - alpha(1.0), - audioChannel(0) {} + : uid(0), x(0), y(0), width(0), height(0), zOrder(0), alpha(1.0), audioChannel(0) {} }; /** @@ -3860,10 +3968,12 @@ struct LiveTranscoding { int height; /** Bitrate of the CDN live output video stream. The default value is 400 Kbps. - Set this parameter according to the Video Bitrate Table. If you set a bitrate beyond the proper range, the SDK automatically adapts it to a value within the range. + Set this parameter according to the Video Bitrate Table. If you set a bitrate beyond the proper + range, the SDK automatically adapts it to a value within the range. */ int videoBitrate; - /** Frame rate of the output video stream set for the CDN live streaming. The default value is 15 fps, and the value range is (0,30]. + /** Frame rate of the output video stream set for the CDN live streaming. The default value is 15 + fps, and the value range is (0,30]. @note The Agora server adjusts any value over 30 to 30. */ @@ -3884,7 +3994,8 @@ struct LiveTranscoding { @note If you set this parameter to other values, Agora adjusts it to the default value of 100. */ VIDEO_CODEC_PROFILE_TYPE videoCodecProfile; - /** The background color in RGB hex value. Value only. Do not include a preceeding #. For example, 0xFFB6C1 (light pink). The default value is 0x000000 (black). + /** The background color in RGB hex value. Value only. Do not include a preceeding #. For example, + * 0xFFB6C1 (light pink). The default value is 0x000000 (black). */ unsigned int backgroundColor; /** Video codec profile types for Media Push. See VIDEO_CODEC_TYPE_FOR_STREAM. */ @@ -3893,10 +4004,12 @@ struct LiveTranscoding { * The value range is [0, 17]. */ unsigned int userCount; - /** Manages the user layout configuration in the Media Push. Agora supports a maximum of 17 transcoding users in a Media Push channel. See `TranscodingUser`. + /** Manages the user layout configuration in the Media Push. Agora supports a maximum of 17 + * transcoding users in a Media Push channel. See `TranscodingUser`. */ TranscodingUser* transcodingUsers; - /** Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream to the CDN live client. Maximum length: 4096 Bytes. + /** Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream + to the CDN live client. Maximum length: 4096 Bytes. For more information on SEI frame, see [SEI-related questions](https://docs.agora.io/en/faq/sei). */ @@ -3907,31 +4020,38 @@ struct LiveTranscoding { const char* metadata; /** The watermark on the live video. The image format needs to be PNG. See `RtcImage`. - You can add one watermark, or add multiple watermarks using an array. This parameter is used with `watermarkCount`. + You can add one watermark, or add multiple watermarks using an array. This parameter is used with + `watermarkCount`. */ RtcImage* watermark; /** - * The number of watermarks on the live video. The total number of watermarks and background images can range from 0 to 10. This parameter is used with `watermark`. + * The number of watermarks on the live video. The total number of watermarks and background + * images can range from 0 to 10. This parameter is used with `watermark`. */ unsigned int watermarkCount; - /** The number of background images on the live video. The image format needs to be PNG. See `RtcImage`. + /** The number of background images on the live video. The image format needs to be PNG. See + * `RtcImage`. * - * You can add a background image or use an array to add multiple background images. This parameter is used with `backgroundImageCount`. + * You can add a background image or use an array to add multiple background images. This + * parameter is used with `backgroundImageCount`. */ RtcImage* backgroundImage; /** - * The number of background images on the live video. The total number of watermarks and background images can range from 0 to 10. This parameter is used with `backgroundImage`. + * The number of background images on the live video. The total number of watermarks and + * background images can range from 0 to 10. This parameter is used with `backgroundImage`. */ unsigned int backgroundImageCount; /** The audio sampling rate (Hz) of the output media stream. See #AUDIO_SAMPLE_RATE_TYPE. */ AUDIO_SAMPLE_RATE_TYPE audioSampleRate; - /** Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the highest value is 128. + /** Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the + * highest value is 128. */ int audioBitrate; - /** The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo) audio channels. Special players are required if you choose 3, 4, or 5. + /** The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo) + * audio channels. Special players are required if you choose 3, 4, or 5. * - 1: (Default) Mono. * - 2: Stereo. * - 3: Three audio channels. @@ -3942,7 +4062,8 @@ struct LiveTranscoding { /** Audio codec profile type for Media Push. See #AUDIO_CODEC_PROFILE_TYPE. */ AUDIO_CODEC_PROFILE_TYPE audioCodecProfile; - /** Advanced features of the RTMP or RTMPS streaming with transcoding. See LiveStreamAdvancedFeature. + /** Advanced features of the RTMP or RTMPS streaming with transcoding. See + * LiveStreamAdvancedFeature. */ LiveStreamAdvancedFeature* advancedFeatures; @@ -3959,7 +4080,7 @@ struct LiveTranscoding { videoCodecProfile(VIDEO_CODEC_PROFILE_HIGH), backgroundColor(0x000000), videoCodecType(VIDEO_CODEC_H264_FOR_STREAM), - userCount(0), + userCount(0), transcodingUsers(OPTIONAL_NULLPTR), transcodingExtraInfo(OPTIONAL_NULLPTR), metadata(OPTIONAL_NULLPTR), @@ -3985,12 +4106,14 @@ struct TranscodingVideoStream { VIDEO_SOURCE_TYPE sourceType; /** * The ID of the remote user. - * @note Use this parameter only when the source type of the video for the video mixing on the local client is `VIDEO_SOURCE_REMOTE`. + * @note Use this parameter only when the source type of the video for the video mixing on the + * local client is `VIDEO_SOURCE_REMOTE`. */ uid_t remoteUserUid; /** * The URL of the image. - * @note Use this parameter only when the source type of the video for the video mixing on the local client is `RTC_IMAGE`. + * @note Use this parameter only when the source type of the video for the video mixing on the + * local client is `RTC_IMAGE`. */ const char* imageUrl; /** @@ -3998,11 +4121,13 @@ struct TranscodingVideoStream { */ int mediaPlayerId; /** - * The horizontal displacement of the top-left corner of the video for the video mixing on the client relative to the top-left corner (origin) of the canvas for this video mixing. + * The horizontal displacement of the top-left corner of the video for the video mixing on the + * client relative to the top-left corner (origin) of the canvas for this video mixing. */ int x; /** - * The vertical displacement of the top-left corner of the video for the video mixing on the client relative to the top-left corner (origin) of the canvas for this video mixing. + * The vertical displacement of the top-left corner of the video for the video mixing on the + * client relative to the top-left corner (origin) of the canvas for this video mixing. */ int y; /** @@ -4014,13 +4139,16 @@ struct TranscodingVideoStream { */ int height; /** - * The number of the layer to which the video for the video mixing on the local client belongs. The value range is [0,100]. + * The number of the layer to which the video for the video mixing on the local client belongs. + * The value range is [0,100]. * - 0: (Default) The layer is at the bottom. * - 100: The layer is at the top. */ int zOrder; /** - * The transparency of the video for the video mixing on the local client. The value range is [0.0,1.0]. 0.0 means the transparency is completely transparent. 1.0 means the transparency is opaque. + * The transparency of the video for the video mixing on the local client. The value range is + * [0.0,1.0]. 0.0 means the transparency is completely transparent. 1.0 means the transparency is + * opaque. */ double alpha; /** @@ -4032,16 +4160,16 @@ struct TranscodingVideoStream { bool mirror; TranscodingVideoStream() - : sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), - remoteUserUid(0), - imageUrl(OPTIONAL_NULLPTR), - x(0), - y(0), - width(0), - height(0), - zOrder(0), - alpha(1.0), - mirror(false) {} + : sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + remoteUserUid(0), + imageUrl(OPTIONAL_NULLPTR), + x(0), + y(0), + width(0), + height(0), + zOrder(0), + alpha(1.0), + mirror(false) {} }; /** @@ -4057,17 +4185,25 @@ struct LocalTranscoderConfiguration { */ TranscodingVideoStream* videoInputStreams; /** - * The encoding configuration of the mixed video stream after the video mixing on the local client. See VideoEncoderConfiguration. + * The encoding configuration of the mixed video stream after the video mixing on the local + * client. See VideoEncoderConfiguration. */ VideoEncoderConfiguration videoOutputConfiguration; /** - * Whether to use the timestamp when the primary camera captures the video frame as the timestamp of the mixed video frame. - * - true: (Default) Use the timestamp of the captured video frame as the timestamp of the mixed video frame. - * - false: Do not use the timestamp of the captured video frame as the timestamp of the mixed video frame. Instead, use the timestamp when the mixed video frame is constructed. + * Whether to use the timestamp when the primary camera captures the video frame as the timestamp + * of the mixed video frame. + * - true: (Default) Use the timestamp of the captured video frame as the timestamp of the mixed + * video frame. + * - false: Do not use the timestamp of the captured video frame as the timestamp of the mixed + * video frame. Instead, use the timestamp when the mixed video frame is constructed. */ bool syncWithPrimaryCamera; - LocalTranscoderConfiguration() : streamCount(0), videoInputStreams(OPTIONAL_NULLPTR), videoOutputConfiguration(), syncWithPrimaryCamera(true) {} + LocalTranscoderConfiguration() + : streamCount(0), + videoInputStreams(OPTIONAL_NULLPTR), + videoOutputConfiguration(), + syncWithPrimaryCamera(true) {} }; enum VIDEO_TRANSCODER_ERROR { @@ -4097,6 +4233,77 @@ enum VIDEO_TRANSCODER_ERROR { VT_ERR_INTERNAL = 20 }; + +/** + * The audio streams for the video mixing on the local client. + */ +struct MixedAudioStream { + /** + * The source type of audio for the audio mixing on the local client. See #AUDIO_SOURCE_TYPE. + */ + AUDIO_SOURCE_TYPE sourceType; + /** + * The ID of the remote user. + * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`. + */ + uid_t remoteUserUid; + /** + * The channel ID of the remote user. + * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`. + */ + const char* channelName; + /** + * The track ID of the local track. + * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`. + */ + track_id_t trackId; + + MixedAudioStream(AUDIO_SOURCE_TYPE source) + : sourceType(source), + remoteUserUid(0), + channelName(NULL), + trackId(-1) {} + + MixedAudioStream(AUDIO_SOURCE_TYPE source, track_id_t track) + : sourceType(source), + trackId(track) {} + + MixedAudioStream(AUDIO_SOURCE_TYPE source, uid_t uid, const char* channel) + : sourceType(source), + remoteUserUid(uid), + channelName(channel) {} + + MixedAudioStream(AUDIO_SOURCE_TYPE source, uid_t uid, const char* channel, track_id_t track) + : sourceType(source), + remoteUserUid(uid), + channelName(channel), + trackId(track) {} + +}; + +/** + * The configuration of the audio mixing on the local client. + */ +struct LocalAudioMixerConfiguration { + /** + * The number of the audio streams for the audio mixing on the local client. + */ + unsigned int streamCount; + /** + * The source of the streams to mixed; + */ + MixedAudioStream* sourceStreams; + + /** + * Whether to use the timestamp follow the local mic's audio frame. + * - true: (Default) Use the timestamp of the captured audio frame as the timestamp of the mixed audio frame. + * - false: Do not use the timestamp of the captured audio frame as the timestamp of the mixed audio frame. Instead, use the timestamp when the mixed audio frame is constructed. + */ + bool syncWithLocalMic; + + LocalAudioMixerConfiguration() : streamCount(0), syncWithLocalMic(true) {} +}; + /** * Configurations of the last-mile network test. */ @@ -4115,12 +4322,14 @@ struct LastmileProbeConfig { */ bool probeDownlink; /** - * The expected maximum sending bitrate (bps) of the local user. The value range is [100000, 5000000]. We recommend setting this parameter - * according to the bitrate value set by `setVideoEncoderConfiguration`. + * The expected maximum sending bitrate (bps) of the local user. The value range is [100000, + * 5000000]. We recommend setting this parameter according to the bitrate value set by + * `setVideoEncoderConfiguration`. */ unsigned int expectedUplinkBitrate; /** - * The expected maximum receiving bitrate (bps) of the local user. The value range is [100000,5000000]. + * The expected maximum receiving bitrate (bps) of the local user. The value range is + * [100000,5000000]. */ unsigned int expectedDownlinkBitrate; }; @@ -4134,11 +4343,13 @@ enum LASTMILE_PROBE_RESULT_STATE { */ LASTMILE_PROBE_RESULT_COMPLETE = 1, /** - * 2: The last-mile network probe test is incomplete because the bandwidth estimation is not available due to limited test resources. + * 2: The last-mile network probe test is incomplete because the bandwidth estimation is not + * available due to limited test resources. */ LASTMILE_PROBE_RESULT_INCOMPLETE_NO_BWE = 2, /** - * 3: The last-mile network probe test is not carried out, probably due to poor network conditions. + * 3: The last-mile network probe test is not carried out, probably due to poor network + * conditions. */ LASTMILE_PROBE_RESULT_UNAVAILABLE = 3 }; @@ -4160,9 +4371,7 @@ struct LastmileProbeOneWayResult { */ unsigned int availableBandwidth; - LastmileProbeOneWayResult() : packetLossRate(0), - jitter(0), - availableBandwidth(0) {} + LastmileProbeOneWayResult() : packetLossRate(0), jitter(0), availableBandwidth(0) {} }; /** @@ -4186,16 +4395,13 @@ struct LastmileProbeResult { */ unsigned int rtt; - LastmileProbeResult() - : state(LASTMILE_PROBE_RESULT_UNAVAILABLE), - rtt(0) {} + LastmileProbeResult() : state(LASTMILE_PROBE_RESULT_UNAVAILABLE), rtt(0) {} }; /** * Reasons causing the change of the connection state. */ -enum CONNECTION_CHANGED_REASON_TYPE -{ +enum CONNECTION_CHANGED_REASON_TYPE { /** * 0: The SDK is connecting to the server. */ @@ -4209,11 +4415,13 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_INTERRUPTED = 2, /** - * 3: The connection between the SDK and the server is banned by the server. This error occurs when the user is kicked out of the channel by the server. + * 3: The connection between the SDK and the server is banned by the server. This error occurs + * when the user is kicked out of the channel by the server. */ CONNECTION_CHANGED_BANNED_BY_SERVER = 3, /** - * 4: The SDK fails to join the channel. When the SDK fails to join the channel for more than 20 minutes, this error occurs and the SDK stops reconnecting to the channel. + * 4: The SDK fails to join the channel. When the SDK fails to join the channel for more than 20 + * minutes, this error occurs and the SDK stops reconnecting to the channel. */ CONNECTION_CHANGED_JOIN_FAILED = 4, /** @@ -4225,13 +4433,17 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_INVALID_APP_ID = 6, /** - * 7: The connection fails because the channel name is not valid. Please rejoin the channel with a valid channel name. + * 7: The connection fails because the channel name is not valid. Please rejoin the channel with a + * valid channel name. */ CONNECTION_CHANGED_INVALID_CHANNEL_NAME = 7, /** * 8: The connection fails because the token is not valid. Typical reasons include: - * - The App Certificate for the project is enabled in Agora Console, but you do not use a token when joining the channel. If you enable the App Certificate, you must use a token to join the channel. - * - The `uid` specified when calling `joinChannel` to join the channel is inconsistent with the `uid` passed in when generating the token. + * - The App Certificate for the project is enabled in Agora Console, but you do not use a token + * when joining the channel. If you enable the App Certificate, you must use a token to join the + * channel. + * - The `uid` specified when calling `joinChannel` to join the channel is inconsistent with the + * `uid` passed in when generating the token. */ CONNECTION_CHANGED_INVALID_TOKEN = 8, /** @@ -4240,8 +4452,10 @@ enum CONNECTION_CHANGED_REASON_TYPE CONNECTION_CHANGED_TOKEN_EXPIRED = 9, /** * 10: The connection is rejected by the server. Typical reasons include: - * - The user is already in the channel and still calls a method, for example, `joinChannel`, to join the channel. Stop calling this method to clear this error. - * - The user tries to join the channel when conducting a pre-call test. The user needs to call the channel after the call test ends. + * - The user is already in the channel and still calls a method, for example, `joinChannel`, to + * join the channel. Stop calling this method to clear this error. + * - The user tries to join the channel when conducting a pre-call test. The user needs to call + * the channel after the call test ends. */ CONNECTION_CHANGED_REJECTED_BY_SERVER = 10, /** @@ -4253,11 +4467,13 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_RENEW_TOKEN = 12, /** - * 13: The IP address of the client has changed, possibly because the network type, IP address, or port has been changed. + * 13: The IP address of the client has changed, possibly because the network type, IP address, or + * port has been changed. */ CONNECTION_CHANGED_CLIENT_IP_ADDRESS_CHANGED = 13, /** - * 14: Timeout for the keep-alive of the connection between the SDK and the Agora edge server. The connection state changes to CONNECTION_STATE_RECONNECTING. + * 14: Timeout for the keep-alive of the connection between the SDK and the Agora edge server. The + * connection state changes to CONNECTION_STATE_RECONNECTING. */ CONNECTION_CHANGED_KEEP_ALIVE_TIMEOUT = 14, /** @@ -4354,11 +4570,13 @@ enum WLACC_SUGGEST_ACTION { */ WLACC_SUGGEST_ACTION_CONNECT_SSID = 1, /** - * The user is advised to check whether the AP supports 5G band and enable 5G band (the aciton link is attached), or purchases an AP that supports 5G. AP does not support 5G band. + * The user is advised to check whether the AP supports 5G band and enable 5G band (the aciton + * link is attached), or purchases an AP that supports 5G. AP does not support 5G band. */ WLACC_SUGGEST_ACTION_CHECK_5G = 2, /** - * The user is advised to change the SSID of the 2.4G or 5G band (the aciton link is attached). The SSID of the 2.4G band AP is the same as that of the 5G band. + * The user is advised to change the SSID of the 2.4G or 5G band (the aciton link is attached). + * The SSID of the 2.4G band AP is the same as that of the 5G band. */ WLACC_SUGGEST_ACTION_MODIFY_SSID = 3, }; @@ -4447,8 +4665,9 @@ struct VideoCanvas { uid_t uid; /** - * The uid of video stream composing the video stream from transcoder which will be drawn on this video canvas. - */ + * The uid of video stream composing the video stream from transcoder which will be drawn on this + * video canvas. + */ uid_t subviewUid; /** * Video display window. @@ -4467,7 +4686,7 @@ struct VideoCanvas { * The video mirror mode. See \ref VIDEO_MIRROR_MODE_TYPE "VIDEO_MIRROR_MODE_TYPE". * The default value is VIDEO_MIRROR_MODE_AUTO. * @note - * - For the mirror mode of the local video view: + * - For the mirror mode of the local video view: * If you use a front camera, the SDK enables the mirror mode by default; * if you use a rear camera, the SDK disables the mirror mode by default. * - For the remote user: The mirror mode is disabled by default. @@ -4484,14 +4703,14 @@ struct VideoCanvas { */ VIDEO_SOURCE_TYPE sourceType; /** - * The media player id of AgoraMediaPlayer. It should set this parameter when the + * The media player id of AgoraMediaPlayer. It should set this parameter when the * sourceType is VIDEO_SOURCE_MEDIA_PLAYER to show the video that AgoraMediaPlayer is playing. * You can get this value by calling the method \ref getMediaPlayerId(). */ int mediaPlayerId; /** - * If you want to display a certain part of a video frame, you can set - * this value to crop the video frame to show. + * If you want to display a certain part of a video frame, you can set + * this value to crop the video frame to show. * The default value is empty(that is, if it has zero width or height), which means no cropping. */ Rectangle cropArea; @@ -4508,62 +4727,115 @@ struct VideoCanvas { media::base::VIDEO_MODULE_POSITION position; VideoCanvas() - : uid(0), subviewUid(0), view(NULL), backgroundColor(0x00000000), renderMode(media::base::RENDER_MODE_HIDDEN), mirrorMode(VIDEO_MIRROR_MODE_AUTO), - setupMode(VIDEO_VIEW_SETUP_REPLACE), sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} + : uid(0), + subviewUid(0), + view(NULL), + backgroundColor(0x00000000), + renderMode(media::base::RENDER_MODE_HIDDEN), + mirrorMode(VIDEO_MIRROR_MODE_AUTO), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt) - : uid(0), subviewUid(0), view(v), backgroundColor(0x00000000), renderMode(m), mirrorMode(mt), setupMode(VIDEO_VIEW_SETUP_REPLACE), - sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} + : uid(0), + subviewUid(0), + view(v), + backgroundColor(0x00000000), + renderMode(m), + mirrorMode(mt), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt, uid_t u) - : uid(u), subviewUid(0), view(v), backgroundColor(0x00000000), renderMode(m), mirrorMode(mt), setupMode(VIDEO_VIEW_SETUP_REPLACE), - sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} - - VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt, uid_t u, uid_t subu) - : uid(u), subviewUid(subu), view(v), backgroundColor(0x00000000), renderMode(m), mirrorMode(mt), setupMode(VIDEO_VIEW_SETUP_REPLACE), - sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} + : uid(u), + subviewUid(0), + view(v), + backgroundColor(0x00000000), + renderMode(m), + mirrorMode(mt), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} + + VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt, uid_t u, + uid_t subu) + : uid(u), + subviewUid(subu), + view(v), + backgroundColor(0x00000000), + renderMode(m), + mirrorMode(mt), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} }; /** Image enhancement options. */ struct BeautyOptions { /** The contrast level. - */ + */ enum LIGHTENING_CONTRAST_LEVEL { - /** Low contrast level. */ - LIGHTENING_CONTRAST_LOW = 0, - /** (Default) Normal contrast level. */ - LIGHTENING_CONTRAST_NORMAL = 1, - /** High contrast level. */ - LIGHTENING_CONTRAST_HIGH = 2, + /** Low contrast level. */ + LIGHTENING_CONTRAST_LOW = 0, + /** (Default) Normal contrast level. */ + LIGHTENING_CONTRAST_NORMAL = 1, + /** High contrast level. */ + LIGHTENING_CONTRAST_HIGH = 2, }; - /** The contrast level, used with the `lighteningLevel` parameter. The larger the value, the greater the contrast between light and dark. See #LIGHTENING_CONTRAST_LEVEL. - */ + /** The contrast level, used with the `lighteningLevel` parameter. The larger the value, the + * greater the contrast between light and dark. See #LIGHTENING_CONTRAST_LEVEL. + */ LIGHTENING_CONTRAST_LEVEL lighteningContrastLevel; - /** The brightness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value, the greater the degree of whitening. */ + /** The brightness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. + * The greater the value, the greater the degree of whitening. */ float lighteningLevel; - /** The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value, the greater the degree of skin grinding. - */ + /** The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value, + * the greater the degree of skin grinding. + */ float smoothnessLevel; - /** The redness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The larger the value, the greater the rosy degree. - */ + /** The redness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The + * larger the value, the greater the rosy degree. + */ float rednessLevel; - /** The sharpness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The larger the value, the greater the sharpening degree. - */ + /** The sharpness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. + * The larger the value, the greater the sharpening degree. + */ float sharpnessLevel; - BeautyOptions(LIGHTENING_CONTRAST_LEVEL contrastLevel, float lightening, float smoothness, float redness, float sharpness) : lighteningContrastLevel(contrastLevel), lighteningLevel(lightening), smoothnessLevel(smoothness), rednessLevel(redness), sharpnessLevel(sharpness) {} + BeautyOptions(LIGHTENING_CONTRAST_LEVEL contrastLevel, float lightening, float smoothness, + float redness, float sharpness) + : lighteningContrastLevel(contrastLevel), + lighteningLevel(lightening), + smoothnessLevel(smoothness), + rednessLevel(redness), + sharpnessLevel(sharpness) {} - BeautyOptions() : lighteningContrastLevel(LIGHTENING_CONTRAST_NORMAL), lighteningLevel(0), smoothnessLevel(0), rednessLevel(0), sharpnessLevel(0) {} + BeautyOptions() + : lighteningContrastLevel(LIGHTENING_CONTRAST_NORMAL), + lighteningLevel(0), + smoothnessLevel(0), + rednessLevel(0), + sharpnessLevel(0) {} }; /** Face shape area options. This structure defines options for facial adjustments on different facial areas. @@ -4645,12 +4917,45 @@ struct FaceShapeBeautyOptions { FaceShapeBeautyOptions() : shapeStyle(FACE_SHAPE_BEAUTY_STYLE_FEMALE), styleIntensity(50) {} }; +/** Filter effect options. This structure defines options for filter effect. + * + * @since v4.4.1 + */ +struct FilterEffectOptions { + /** + * The local absolute path of the custom 3D Cube path. Only cube format is supported. + * The cube file must strictly comply with the Cube LUT Specification; otherwise, the filter effects will not take effect. + * + * The following is an example of the Cube file format. The cube file starts with `LUT_3D_SIZE`, which indicates the cube size. In filter effects, the cube size is limited to 32. + + * LUT_3D_SIZE 32 + * 0.0039215689 0 0.0039215682 + * 0.0086021447 0.0037950677 0 + * 0.0728652592 0.0039215689 0 + * ... + * + * The SDK provides a built-in cube named `built_in_whiten.cube` for whitening. To use this cube, specify the path to `built_in_whiten_filter` + */ + const char * path; + + /** + * The intensity of specified filter effect. The value ranges from 0.0 to 1.0. The default value is 0.5. The greater the value, the stronger the intensity of the filter. + */ + float strength; + + FilterEffectOptions(const char * lut3dPath, float filterStrength) : path(lut3dPath), strength(filterStrength) {} + + FilterEffectOptions() : path(OPTIONAL_NULLPTR), strength(0.5) {} +}; + struct LowlightEnhanceOptions { /** * The low-light enhancement mode. */ enum LOW_LIGHT_ENHANCE_MODE { - /** 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light enhancement feature according to the ambient light to compensate for the lighting level or prevent overexposure, as necessary. */ + /** 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light + enhancement feature according to the ambient light to compensate for the lighting level or + prevent overexposure, as necessary. */ LOW_LIGHT_ENHANCE_AUTO = 0, /** Manual mode. Users need to enable or disable the low-light enhancement feature manually. */ LOW_LIGHT_ENHANCE_MANUAL = 1, @@ -4660,11 +4965,14 @@ struct LowlightEnhanceOptions { */ enum LOW_LIGHT_ENHANCE_LEVEL { /** - * 0: (Default) Promotes video quality during low-light enhancement. It processes the brightness, details, and noise of the video image. The performance consumption is moderate, the processing speed is moderate, and the overall video quality is optimal. + * 0: (Default) Promotes video quality during low-light enhancement. It processes the + * brightness, details, and noise of the video image. The performance consumption is moderate, + * the processing speed is moderate, and the overall video quality is optimal. */ LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY = 0, /** - * Promotes performance during low-light enhancement. It processes the brightness and details of the video image. The processing speed is faster. + * Promotes performance during low-light enhancement. It processes the brightness and details of + * the video image. The processing speed is faster. */ LOW_LIGHT_ENHANCE_LEVEL_FAST = 1, }; @@ -4677,9 +4985,11 @@ struct LowlightEnhanceOptions { */ LOW_LIGHT_ENHANCE_LEVEL level; - LowlightEnhanceOptions(LOW_LIGHT_ENHANCE_MODE lowlightMode, LOW_LIGHT_ENHANCE_LEVEL lowlightLevel) : mode(lowlightMode), level(lowlightLevel) {} + LowlightEnhanceOptions(LOW_LIGHT_ENHANCE_MODE lowlightMode, LOW_LIGHT_ENHANCE_LEVEL lowlightLevel) + : mode(lowlightMode), level(lowlightLevel) {} - LowlightEnhanceOptions() : mode(LOW_LIGHT_ENHANCE_AUTO), level(LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY) {} + LowlightEnhanceOptions() + : mode(LOW_LIGHT_ENHANCE_AUTO), level(LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY) {} }; /** * The video noise reduction options. @@ -4690,7 +5000,8 @@ struct VideoDenoiserOptions { /** The video noise reduction mode. */ enum VIDEO_DENOISER_MODE { - /** 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise reduction feature according to the ambient light. */ + /** 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise + reduction feature according to the ambient light. */ VIDEO_DENOISER_AUTO = 0, /** Manual mode. Users need to enable or disable the video noise reduction feature manually. */ VIDEO_DENOISER_MANUAL = 1, @@ -4700,21 +5011,20 @@ struct VideoDenoiserOptions { */ enum VIDEO_DENOISER_LEVEL { /** - * 0: (Default) Promotes video quality during video noise reduction. `HIGH_QUALITY` balances performance consumption and video noise reduction quality. - * The performance consumption is moderate, the video noise reduction speed is moderate, and the overall video quality is optimal. + * 0: (Default) Promotes video quality during video noise reduction. `HIGH_QUALITY` balances + * performance consumption and video noise reduction quality. The performance consumption is + * moderate, the video noise reduction speed is moderate, and the overall video quality is + * optimal. */ VIDEO_DENOISER_LEVEL_HIGH_QUALITY = 0, /** - * Promotes reducing performance consumption during video noise reduction. `FAST` prioritizes reducing performance consumption over video noise reduction quality. - * The performance consumption is lower, and the video noise reduction speed is faster. To avoid a noticeable shadowing effect (shadows trailing behind moving objects) in the processed video, Agora recommends that you use `FAST` when the camera is fixed. + * Promotes reducing performance consumption during video noise reduction. `FAST` prioritizes + * reducing performance consumption over video noise reduction quality. The performance + * consumption is lower, and the video noise reduction speed is faster. To avoid a noticeable + * shadowing effect (shadows trailing behind moving objects) in the processed video, Agora + * recommends that you use `FAST` when the camera is fixed. */ VIDEO_DENOISER_LEVEL_FAST = 1, - /** - * Enhanced video noise reduction. `STRENGTH` prioritizes video noise reduction quality over reducing performance consumption. - * The performance consumption is higher, the video noise reduction speed is slower, and the video noise reduction quality is better. - * If `HIGH_QUALITY` is not enough for your video noise reduction needs, you can use `STRENGTH`. - */ - VIDEO_DENOISER_LEVEL_STRENGTH = 2, }; /** The video noise reduction mode. See #VIDEO_DENOISER_MODE. */ @@ -4724,7 +5034,8 @@ struct VideoDenoiserOptions { */ VIDEO_DENOISER_LEVEL level; - VideoDenoiserOptions(VIDEO_DENOISER_MODE denoiserMode, VIDEO_DENOISER_LEVEL denoiserLevel) : mode(denoiserMode), level(denoiserLevel) {} + VideoDenoiserOptions(VIDEO_DENOISER_MODE denoiserMode, VIDEO_DENOISER_LEVEL denoiserLevel) + : mode(denoiserMode), level(denoiserLevel) {} VideoDenoiserOptions() : mode(VIDEO_DENOISER_AUTO), level(VIDEO_DENOISER_LEVEL_HIGH_QUALITY) {} }; @@ -4734,17 +5045,24 @@ struct VideoDenoiserOptions { * @since v4.0.0 */ struct ColorEnhanceOptions { - /** The level of color enhancement. The value range is [0.0,1.0]. `0.0` is the default value, which means no color enhancement is applied to the video. The higher the value, the higher the level of color enhancement. + /** The level of color enhancement. The value range is [0.0,1.0]. `0.0` is the default value, + * which means no color enhancement is applied to the video. The higher the value, the higher the + * level of color enhancement. */ float strengthLevel; - /** The level of skin tone protection. The value range is [0.0,1.0]. `0.0` means no skin tone protection. The higher the value, the higher the level of skin tone protection. - * The default value is `1.0`. When the level of color enhancement is higher, the portrait skin tone can be significantly distorted, so you need to set the level of skin tone protection; when the level of skin tone protection is higher, the color enhancement effect can be slightly reduced. - * Therefore, to get the best color enhancement effect, Agora recommends that you adjust `strengthLevel` and `skinProtectLevel` to get the most appropriate values. + /** The level of skin tone protection. The value range is [0.0,1.0]. `0.0` means no skin tone + * protection. The higher the value, the higher the level of skin tone protection. The default + * value is `1.0`. When the level of color enhancement is higher, the portrait skin tone can be + * significantly distorted, so you need to set the level of skin tone protection; when the level + * of skin tone protection is higher, the color enhancement effect can be slightly reduced. + * Therefore, to get the best color enhancement effect, Agora recommends that you adjust + * `strengthLevel` and `skinProtectLevel` to get the most appropriate values. */ float skinProtectLevel; - ColorEnhanceOptions(float stength, float skinProtect) : strengthLevel(stength), skinProtectLevel(skinProtect) {} + ColorEnhanceOptions(float stength, float skinProtect) + : strengthLevel(stength), skinProtectLevel(skinProtect) {} ColorEnhanceOptions() : strengthLevel(0), skinProtectLevel(1) {} }; @@ -4768,12 +5086,12 @@ struct VirtualBackgroundSource { * The background source is a file in PNG or JPG format. */ BACKGROUND_IMG = 2, - /** + /** * The background source is the blurred original video frame. * */ BACKGROUND_BLUR = 3, - /** - * The background source is a file in MP4, AVI, MKV, FLV format. + /** + * The background source is a file in MP4, AVI, MKV, FLV format. * */ BACKGROUND_VIDEO = 4, }; @@ -4781,11 +5099,14 @@ struct VirtualBackgroundSource { /** The degree of blurring applied to the background source. */ enum BACKGROUND_BLUR_DEGREE { - /** 1: The degree of blurring applied to the custom background image is low. The user can almost see the background clearly. */ + /** 1: The degree of blurring applied to the custom background image is low. The user can almost + see the background clearly. */ BLUR_DEGREE_LOW = 1, - /** 2: The degree of blurring applied to the custom background image is medium. It is difficult for the user to recognize details in the background. */ + /** 2: The degree of blurring applied to the custom background image is medium. It is difficult + for the user to recognize details in the background. */ BLUR_DEGREE_MEDIUM = 2, - /** 3: (Default) The degree of blurring applied to the custom background image is high. The user can barely see any distinguishing features in the background. */ + /** 3: (Default) The degree of blurring applied to the custom background image is high. The user + can barely see any distinguishing features in the background. */ BLUR_DEGREE_HIGH = 3, }; @@ -4794,34 +5115,41 @@ struct VirtualBackgroundSource { BACKGROUND_SOURCE_TYPE background_source_type; /** - * The color of the custom background image. The format is a hexadecimal integer defined by RGB, without the # sign, - * such as 0xFFB6C1 for light pink. The default value is 0xFFFFFF, which signifies white. The value range - * is [0x000000,0xFFFFFF]. If the value is invalid, the SDK replaces the original background image with a white - * background image. + * The color of the custom background image. The format is a hexadecimal integer defined by RGB, + * without the # sign, such as 0xFFB6C1 for light pink. The default value is 0xFFFFFF, which + * signifies white. The value range is [0x000000,0xFFFFFF]. If the value is invalid, the SDK + * replaces the original background image with a white background image. * - * @note This parameter takes effect only when the type of the custom background image is `BACKGROUND_COLOR`. + * @note This parameter takes effect only when the type of the custom background image is + * `BACKGROUND_COLOR`. */ unsigned int color; /** - * The local absolute path of the custom background image. PNG and JPG formats are supported. If the path is invalid, - * the SDK replaces the original background image with a white background image. + * The local absolute path of the custom background image. PNG and JPG formats are supported. If + * the path is invalid, the SDK replaces the original background image with a white background + * image. * - * @note This parameter takes effect only when the type of the custom background image is `BACKGROUND_IMG`. + * @note This parameter takes effect only when the type of the custom background image is + * `BACKGROUND_IMG`. */ const char* source; /** The degree of blurring applied to the custom background image. See BACKGROUND_BLUR_DEGREE. - * @note This parameter takes effect only when the type of the custom background image is `BACKGROUND_BLUR`. + * @note This parameter takes effect only when the type of the custom background image is + * `BACKGROUND_BLUR`. */ BACKGROUND_BLUR_DEGREE blur_degree; - VirtualBackgroundSource() : background_source_type(BACKGROUND_COLOR), color(0xffffff), source(OPTIONAL_NULLPTR), blur_degree(BLUR_DEGREE_HIGH) {} + VirtualBackgroundSource() + : background_source_type(BACKGROUND_COLOR), + color(0xffffff), + source(OPTIONAL_NULLPTR), + blur_degree(BLUR_DEGREE_HIGH) {} }; struct SegmentationProperty { - - enum SEG_MODEL_TYPE { + enum SEG_MODEL_TYPE { SEG_MODEL_AI = 1, SEG_MODEL_GREEN = 2 @@ -4831,34 +5159,33 @@ struct SegmentationProperty { float greenCapacity; - - SegmentationProperty() : modelType(SEG_MODEL_AI), greenCapacity(0.5){} + SegmentationProperty() : modelType(SEG_MODEL_AI), greenCapacity(0.5) {} }; /** The type of custom audio track -*/ + */ enum AUDIO_TRACK_TYPE { - /** + /** * -1: Invalid audio track */ AUDIO_TRACK_INVALID = -1, - /** + /** * 0: Mixable audio track - * You can push more than one mixable Audio tracks into one RTC connection(channel id + uid), + * You can push more than one mixable Audio tracks into one RTC connection(channel id + uid), * and SDK will mix these tracks into one audio track automatically. * However, compare to direct audio track, mixable track might cause extra 30ms+ delay. */ AUDIO_TRACK_MIXABLE = 0, /** * 1: Direct audio track - * You can only push one direct (non-mixable) audio track into one RTC connection(channel id + uid). - * Compare to mixable stream, you can have lower lantency using direct audio track. + * You can only push one direct (non-mixable) audio track into one RTC connection(channel id + + * uid). Compare to mixable stream, you can have lower lantency using direct audio track. */ AUDIO_TRACK_DIRECT = 1, }; /** The configuration of custom audio track -*/ + */ struct AudioTrackConfig { /** * Enable local playback, enabled by default @@ -4867,8 +5194,7 @@ struct AudioTrackConfig { */ bool enableLocalPlayback; - AudioTrackConfig() - : enableLocalPlayback(true) {} + AudioTrackConfig() : enableLocalPlayback(true) {} }; /** @@ -4915,11 +5241,12 @@ enum VOICE_BEAUTIFIER_PRESET { CHAT_BEAUTIFIER_VITALITY = 0x01010300, /** * Singing beautifier effect. - * - If you call `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER), you can beautify a male-sounding voice and add a reverberation effect - * that sounds like singing in a small room. Agora recommends not using `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER) to process - * a female-sounding voice; otherwise, you may experience vocal distortion. - * - If you call `setVoiceBeautifierParameters`(SINGING_BEAUTIFIER, param1, param2), you can beautify a male- or - * female-sounding voice and add a reverberation effect. + * - If you call `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER), you can beautify a male-sounding + * voice and add a reverberation effect that sounds like singing in a small room. Agora recommends + * not using `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER) to process a female-sounding voice; + * otherwise, you may experience vocal distortion. + * - If you call `setVoiceBeautifierParameters`(SINGING_BEAUTIFIER, param1, param2), you can + * beautify a male- or female-sounding voice and add a reverberation effect. */ SINGING_BEAUTIFIER = 0x01020100, /** A more vigorous voice. @@ -4949,8 +5276,9 @@ enum VOICE_BEAUTIFIER_PRESET { /** * A ultra-high quality voice, which makes the audio clearer and restores more details. * - To achieve better audio effect quality, Agora recommends that you call `setAudioProfile` - * and set the `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` - * and `scenario` to `AUDIO_SCENARIO_HIGH_DEFINITION(6)` before calling `setVoiceBeautifierPreset`. + * and set the `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or + * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` and `scenario` to + * `AUDIO_SCENARIO_HIGH_DEFINITION(6)` before calling `setVoiceBeautifierPreset`. * - If you have an audio capturing device that can already restore audio details to a high * degree, Agora recommends that you do not enable ultra-high quality; otherwise, the SDK may * over-restore audio details, and you may not hear the anticipated voice effect. @@ -4960,7 +5288,9 @@ enum VOICE_BEAUTIFIER_PRESET { /** Preset voice effects. * - * For better voice effects, Agora recommends setting the `profile` parameter of `setAudioProfile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` before using the following presets: + * For better voice effects, Agora recommends setting the `profile` parameter of `setAudioProfile` + * to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` before using + * the following presets: * * - `ROOM_ACOUSTICS_KTV` * - `ROOM_ACOUSTICS_VOCAL_CONCERT` @@ -5008,8 +5338,8 @@ enum AUDIO_EFFECT_PRESET { */ ROOM_ACOUSTICS_ETHEREAL = 0x02010700, /** A 3D voice effect that makes the voice appear to be moving around the user. The default cycle - * period of the 3D voice effect is 10 seconds. To change the cycle period, call `setAudioEffectParameters` - * after this method. + * period of the 3D voice effect is 10 seconds. To change the cycle period, call + * `setAudioEffectParameters` after this method. * * @note * - Before using this preset, set the `profile` parameter of `setAudioProfile` to @@ -5031,12 +5361,12 @@ enum AUDIO_EFFECT_PRESET { */ ROOM_ACOUSTICS_VIRTUAL_SURROUND_SOUND = 0x02010900, /** The voice effect for chorus. - * + * * @note: To achieve better audio effect quality, Agora recommends calling \ref * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `profile` parameter to * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before * setting this enumerator. - */ + */ ROOM_ACOUSTICS_CHORUS = 0x02010D00, /** A middle-aged man's voice. * @@ -5047,14 +5377,14 @@ enum AUDIO_EFFECT_PRESET { VOICE_CHANGER_EFFECT_UNCLE = 0x02020100, /** A senior man's voice. * - * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may - * not hear the anticipated voice effect. + * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you + * may not hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_OLDMAN = 0x02020200, /** A boy's voice. * - * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may - * not hear the anticipated voice effect. + * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you + * may not hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_BOY = 0x02020300, /** A young woman's voice. @@ -5066,8 +5396,8 @@ enum AUDIO_EFFECT_PRESET { VOICE_CHANGER_EFFECT_SISTER = 0x02020400, /** A girl's voice. * - * @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you may - * not hear the anticipated voice effect. + * @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you + * may not hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_GIRL = 0x02020500, /** The voice of Pig King, a character in Journey to the West who has a voice like a growling @@ -5092,8 +5422,8 @@ enum AUDIO_EFFECT_PRESET { */ STYLE_TRANSFORMATION_POPULAR = 0x02030200, /** A pitch correction effect that corrects the user's pitch based on the pitch of the natural C - * major scale. After setting this voice effect, you can call `setAudioEffectParameters` to adjust - * the basic mode of tuning and the pitch of the main tone. + * major scale. After setting this voice effect, you can call `setAudioEffectParameters` to adjust + * the basic mode of tuning and the pitch of the main tone. */ PITCH_CORRECTION = 0x02040100, @@ -5108,16 +5438,20 @@ enum VOICE_CONVERSION_PRESET { /** Turn off voice conversion and use the original voice. */ VOICE_CONVERSION_OFF = 0x00000000, - /** A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to process a female-sounding voice. + /** A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to + * process a female-sounding voice. */ VOICE_CHANGER_NEUTRAL = 0x03010100, - /** A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a female-sounding voice. + /** A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a + * female-sounding voice. */ VOICE_CHANGER_SWEET = 0x03010200, - /** A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a male-sounding voice. + /** A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a + * male-sounding voice. */ VOICE_CHANGER_SOLID = 0x03010300, - /** A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a male-sounding voice. + /** A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a + * male-sounding voice. */ VOICE_CHANGER_BASS = 0x03010400, /** A voice like a cartoon character. @@ -5224,9 +5558,9 @@ struct ScreenCaptureParameters { */ VideoDimensions dimensions; /** - * On Windows and macOS, it represents the video encoding frame rate (fps) of the shared screen stream. - * The frame rate (fps) of the shared region. The default value is 5. We do not recommend setting - * this to a value greater than 15. + * On Windows and macOS, it represents the video encoding frame rate (fps) of the shared screen + * stream. The frame rate (fps) of the shared region. The default value is 5. We do not recommend + * setting this to a value greater than 15. */ int frameRate; /** @@ -5241,52 +5575,109 @@ struct ScreenCaptureParameters { */ bool captureMouseCursor; /** - * Whether to bring the window to the front when calling the `startScreenCaptureByWindowId` method to share it: + * Whether to bring the window to the front when calling the `startScreenCaptureByWindowId` method + * to share it: * - `true`: Bring the window to the front. * - `false`: (Default) Do not bring the window to the front. - */ + */ bool windowFocus; /** - * A list of IDs of windows to be blocked. When calling `startScreenCaptureByDisplayId` to start screen sharing, - * you can use this parameter to block a specified window. When calling `updateScreenCaptureParameters` to update - * screen sharing configurations, you can use this parameter to dynamically block the specified windows during - * screen sharing. + * A list of IDs of windows to be blocked. When calling `startScreenCaptureByDisplayId` to start + * screen sharing, you can use this parameter to block a specified window. When calling + * `updateScreenCaptureParameters` to update screen sharing configurations, you can use this + * parameter to dynamically block the specified windows during screen sharing. */ - view_t *excludeWindowList; + view_t* excludeWindowList; /** * The number of windows to be blocked. */ int excludeWindowCount; /** The width (px) of the border. Defaults to 0, and the value range is [0,50]. - * - */ + * + */ int highLightWidth; /** The color of the border in RGBA format. The default value is 0xFF8CBF26. - * - */ + * + */ unsigned int highLightColor; /** Whether to place a border around the shared window or screen: - * - true: Place a border. - * - false: (Default) Do not place a border. - * - * @note When you share a part of a window or screen, the SDK places a border around the entire window or screen if you set `enableHighLight` as true. - * - */ + * - true: Place a border. + * - false: (Default) Do not place a border. + * + * @note When you share a part of a window or screen, the SDK places a border around the entire + * window or screen if you set `enableHighLight` as true. + * + */ bool enableHighLight; ScreenCaptureParameters() - : dimensions(1920, 1080), frameRate(5), bitrate(STANDARD_BITRATE), captureMouseCursor(true), windowFocus(false), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false) {} + : dimensions(1920, 1080), + frameRate(5), + bitrate(STANDARD_BITRATE), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} ScreenCaptureParameters(const VideoDimensions& d, int f, int b) - : dimensions(d), frameRate(f), bitrate(b), captureMouseCursor(true), windowFocus(false), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false) {} + : dimensions(d), + frameRate(f), + bitrate(b), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} ScreenCaptureParameters(int width, int height, int f, int b) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(true), windowFocus(false), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false){} + : dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(cur), windowFocus(fcs), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false) {} - ScreenCaptureParameters(int width, int height, int f, int b, view_t *ex, int cnt) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(true), windowFocus(false), excludeWindowList(ex), excludeWindowCount(cnt), highLightWidth(0), highLightColor(0), enableHighLight(false) {} - ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs, view_t *ex, int cnt) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(cur), windowFocus(fcs), excludeWindowList(ex), excludeWindowCount(cnt), highLightWidth(0), highLightColor(0), enableHighLight(false) {} + : dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(cur), + windowFocus(fcs), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} + ScreenCaptureParameters(int width, int height, int f, int b, view_t* ex, int cnt) + : dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(ex), + excludeWindowCount(cnt), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} + ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs, view_t* ex, + int cnt) + : dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(cur), + windowFocus(fcs), + excludeWindowList(ex), + excludeWindowCount(cnt), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} }; /** @@ -5294,15 +5685,18 @@ struct ScreenCaptureParameters { */ enum AUDIO_RECORDING_QUALITY_TYPE { /** - * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes of recording. + * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes + * of recording. */ AUDIO_RECORDING_QUALITY_LOW = 0, /** - * 1: Medium quality. The sample rate is 32 kHz, and the file size is around 2 MB after 10 minutes of recording. + * 1: Medium quality. The sample rate is 32 kHz, and the file size is around 2 MB after 10 minutes + * of recording. */ AUDIO_RECORDING_QUALITY_MEDIUM = 1, /** - * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10 minutes of recording. + * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10 + * minutes of recording. */ AUDIO_RECORDING_QUALITY_HIGH = 2, /** @@ -5334,16 +5728,16 @@ enum AUDIO_FILE_RECORDING_TYPE { */ enum AUDIO_ENCODED_FRAME_OBSERVER_POSITION { /** - * 1: Only records the audio of the local user. - */ + * 1: Only records the audio of the local user. + */ AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD = 1, /** - * 2: Only records the audio of all remote users. - */ + * 2: Only records the audio of all remote users. + */ AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK = 2, /** - * 3: Records the mixed audio of the local and all remote users. - */ + * 3: Records the mixed audio of the local and all remote users. + */ AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED = 3, }; @@ -5352,7 +5746,8 @@ enum AUDIO_ENCODED_FRAME_OBSERVER_POSITION { */ struct AudioRecordingConfiguration { /** - * The absolute path (including the filename extensions) of the recording file. For example: `C:\music\audio.mp4`. + * The absolute path (including the filename extensions) of the recording file. For example: + * `C:\music\audio.mp4`. * @note Ensure that the directory for the log files exists and is writable. */ const char* filePath; @@ -5368,8 +5763,9 @@ struct AudioRecordingConfiguration { * - (Default) 32000 * - 44100 * - 48000 - * @note If you set this parameter to 44100 or 48000, Agora recommends recording WAV files, or AAC files with quality - * to be `AUDIO_RECORDING_QUALITY_MEDIUM` or `AUDIO_RECORDING_QUALITY_HIGH` for better recording quality. + * @note If you set this parameter to 44100 or 48000, Agora recommends recording WAV files, or AAC + * files with quality to be `AUDIO_RECORDING_QUALITY_MEDIUM` or `AUDIO_RECORDING_QUALITY_HIGH` for + * better recording quality. */ int sampleRate; /** @@ -5390,131 +5786,142 @@ struct AudioRecordingConfiguration { int recordingChannel; AudioRecordingConfiguration() - : filePath(OPTIONAL_NULLPTR), - encode(false), - sampleRate(32000), - fileRecordingType(AUDIO_FILE_RECORDING_MIXED), - quality(AUDIO_RECORDING_QUALITY_LOW), - recordingChannel(1) {} - - AudioRecordingConfiguration(const char* file_path, int sample_rate, AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) - : filePath(file_path), - encode(false), - sampleRate(sample_rate), - fileRecordingType(AUDIO_FILE_RECORDING_MIXED), - quality(quality_type), - recordingChannel(channel) {} - - AudioRecordingConfiguration(const char* file_path, bool enc, int sample_rate, AUDIO_FILE_RECORDING_TYPE type, AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) - : filePath(file_path), - encode(enc), - sampleRate(sample_rate), - fileRecordingType(type), - quality(quality_type), - recordingChannel(channel) {} - - AudioRecordingConfiguration(const AudioRecordingConfiguration &rhs) - : filePath(rhs.filePath), - encode(rhs.encode), - sampleRate(rhs.sampleRate), - fileRecordingType(rhs.fileRecordingType), - quality(rhs.quality), - recordingChannel(rhs.recordingChannel) {} + : filePath(OPTIONAL_NULLPTR), + encode(false), + sampleRate(32000), + fileRecordingType(AUDIO_FILE_RECORDING_MIXED), + quality(AUDIO_RECORDING_QUALITY_LOW), + recordingChannel(1) {} + + AudioRecordingConfiguration(const char* file_path, int sample_rate, + AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) + : filePath(file_path), + encode(false), + sampleRate(sample_rate), + fileRecordingType(AUDIO_FILE_RECORDING_MIXED), + quality(quality_type), + recordingChannel(channel) {} + + AudioRecordingConfiguration(const char* file_path, bool enc, int sample_rate, + AUDIO_FILE_RECORDING_TYPE type, + AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) + : filePath(file_path), + encode(enc), + sampleRate(sample_rate), + fileRecordingType(type), + quality(quality_type), + recordingChannel(channel) {} + + AudioRecordingConfiguration(const AudioRecordingConfiguration& rhs) + : filePath(rhs.filePath), + encode(rhs.encode), + sampleRate(rhs.sampleRate), + fileRecordingType(rhs.fileRecordingType), + quality(rhs.quality), + recordingChannel(rhs.recordingChannel) {} }; /** * Observer settings for the encoded audio. */ struct AudioEncodedFrameObserverConfig { - /** - * Audio profile. For details, see `AUDIO_ENCODED_FRAME_OBSERVER_POSITION`. - */ - AUDIO_ENCODED_FRAME_OBSERVER_POSITION postionType; - /** - * Audio encoding type. For details, see `AUDIO_ENCODING_TYPE`. - */ - AUDIO_ENCODING_TYPE encodingType; - - AudioEncodedFrameObserverConfig() - : postionType(AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK), - encodingType(AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM){} + /** + * Audio profile. For details, see `AUDIO_ENCODED_FRAME_OBSERVER_POSITION`. + */ + AUDIO_ENCODED_FRAME_OBSERVER_POSITION postionType; + /** + * Audio encoding type. For details, see `AUDIO_ENCODING_TYPE`. + */ + AUDIO_ENCODING_TYPE encodingType; + AudioEncodedFrameObserverConfig() + : postionType(AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK), + encodingType(AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM) {} }; /** * The encoded audio observer. */ class IAudioEncodedFrameObserver { -public: -/** -* Gets the encoded audio data of the local user. -* -* After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD`, -* you can get the encoded audio data of the local user from this callback. -* -* @param frameBuffer The pointer to the audio frame buffer. -* @param length The data length (byte) of the audio frame. -* @param audioEncodedFrameInfo Audio information after encoding. For details, see `EncodedAudioFrameInfo`. -*/ -virtual void onRecordAudioEncodedFrame(const uint8_t* frameBuffer, int length, const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; + public: + /** + * Gets the encoded audio data of the local user. + * + * After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as + * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD`, you can get the encoded audio data of the local + * user from this callback. + * + * @param frameBuffer The pointer to the audio frame buffer. + * @param length The data length (byte) of the audio frame. + * @param audioEncodedFrameInfo Audio information after encoding. For details, see + * `EncodedAudioFrameInfo`. + */ + virtual void onRecordAudioEncodedFrame(const uint8_t* frameBuffer, int length, + const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; -/** -* Gets the encoded audio data of all remote users. -* -* After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK`, -* you can get encoded audio data of all remote users through this callback. -* -* @param frameBuffer The pointer to the audio frame buffer. -* @param length The data length (byte) of the audio frame. -* @param audioEncodedFrameInfo Audio information after encoding. For details, see `EncodedAudioFrameInfo`. -*/ -virtual void onPlaybackAudioEncodedFrame(const uint8_t* frameBuffer, int length, const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; + /** + * Gets the encoded audio data of all remote users. + * + * After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as + * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK`, you can get encoded audio data of all remote + * users through this callback. + * + * @param frameBuffer The pointer to the audio frame buffer. + * @param length The data length (byte) of the audio frame. + * @param audioEncodedFrameInfo Audio information after encoding. For details, see + * `EncodedAudioFrameInfo`. + */ + virtual void onPlaybackAudioEncodedFrame(const uint8_t* frameBuffer, int length, + const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; -/** -* Gets the mixed and encoded audio data of the local and all remote users. -* -* After calling `registerAudioEncodedFrameObserver` and setting the audio profile as `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED`, -* you can get the mixed and encoded audio data of the local and all remote users through this callback. -* -* @param frameBuffer The pointer to the audio frame buffer. -* @param length The data length (byte) of the audio frame. -* @param audioEncodedFrameInfo Audio information after encoding. For details, see `EncodedAudioFrameInfo`. -*/ -virtual void onMixedAudioEncodedFrame(const uint8_t* frameBuffer, int length, const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; + /** + * Gets the mixed and encoded audio data of the local and all remote users. + * + * After calling `registerAudioEncodedFrameObserver` and setting the audio profile as + * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED`, you can get the mixed and encoded audio data of + * the local and all remote users through this callback. + * + * @param frameBuffer The pointer to the audio frame buffer. + * @param length The data length (byte) of the audio frame. + * @param audioEncodedFrameInfo Audio information after encoding. For details, see + * `EncodedAudioFrameInfo`. + */ + virtual void onMixedAudioEncodedFrame(const uint8_t* frameBuffer, int length, + const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; -virtual ~IAudioEncodedFrameObserver () {} + virtual ~IAudioEncodedFrameObserver() {} }; /** The region for connection, which is the region where the server the SDK connects to is located. */ enum AREA_CODE { - /** - * Mainland China. - */ - AREA_CODE_CN = 0x00000001, - /** - * North America. - */ - AREA_CODE_NA = 0x00000002, - /** - * Europe. - */ - AREA_CODE_EU = 0x00000004, - /** - * Asia, excluding Mainland China. - */ - AREA_CODE_AS = 0x00000008, - /** - * Japan. - */ - AREA_CODE_JP = 0x00000010, - /** - * India. - */ - AREA_CODE_IN = 0x00000020, - /** - * (Default) Global. - */ - AREA_CODE_GLOB = (0xFFFFFFFF) + /** + * Mainland China. + */ + AREA_CODE_CN = 0x00000001, + /** + * North America. + */ + AREA_CODE_NA = 0x00000002, + /** + * Europe. + */ + AREA_CODE_EU = 0x00000004, + /** + * Asia, excluding Mainland China. + */ + AREA_CODE_AS = 0x00000008, + /** + * Japan. + */ + AREA_CODE_JP = 0x00000010, + /** + * India. + */ + AREA_CODE_IN = 0x00000020, + /** + * (Default) Global. + */ + AREA_CODE_GLOB = (0xFFFFFFFF) }; /** @@ -5568,8 +5975,9 @@ enum CHANNEL_MEDIA_RELAY_ERROR { RELAY_ERROR_SERVER_ERROR_RESPONSE = 1, /** 2: No server response. You can call the `leaveChannel` method to leave the channel. * - * This error can also occur if your project has not enabled co-host token authentication. You can contact technical - * support to enable the service for cohosting across channels before starting a channel media relay. + * This error can also occur if your project has not enabled co-host token authentication. You can + * contact technical support to enable the service for cohosting across channels before starting a + * channel media relay. */ RELAY_ERROR_SERVER_NO_RESPONSE = 2, /** 3: The SDK fails to access the service, probably due to limited resources of the server. @@ -5587,8 +5995,8 @@ enum CHANNEL_MEDIA_RELAY_ERROR { /** 7: The server fails to send the media stream. */ RELAY_ERROR_FAILED_PACKET_SENT_TO_DEST = 7, - /** 8: The SDK disconnects from the server due to poor network connections. You can call the `leaveChannel` method to - * leave the channel. + /** 8: The SDK disconnects from the server due to poor network connections. You can call the + * `leaveChannel` method to leave the channel. */ RELAY_ERROR_SERVER_CONNECTION_LOST = 8, /** 9: An internal error occurs in the server. @@ -5606,8 +6014,8 @@ enum CHANNEL_MEDIA_RELAY_ERROR { * The state code of the channel media relay. */ enum CHANNEL_MEDIA_RELAY_STATE { - /** 0: The initial state. After you successfully stop the channel media relay by calling `stopChannelMediaRelay`, - * the `onChannelMediaRelayStateChanged` callback returns this state. + /** 0: The initial state. After you successfully stop the channel media relay by calling + * `stopChannelMediaRelay`, the `onChannelMediaRelayStateChanged` callback returns this state. */ RELAY_STATE_IDLE = 0, /** 1: The SDK tries to relay the media stream to the destination channel. @@ -5625,15 +6033,15 @@ enum CHANNEL_MEDIA_RELAY_STATE { */ struct ChannelMediaInfo { /** The user ID. - */ + */ uid_t uid; /** The channel name. The default value is NULL, which means that the SDK - * applies the current channel name. - */ + * applies the current channel name. + */ const char* channelName; /** The token that enables the user to join the channel. The default value - * is NULL, which means that the SDK applies the current token. - */ + * is NULL, which means that the SDK applies the current token. + */ const char* token; ChannelMediaInfo() : uid(0), channelName(NULL), token(NULL) {} @@ -5644,31 +6052,32 @@ struct ChannelMediaInfo { */ struct ChannelMediaRelayConfiguration { /** The information of the source channel `ChannelMediaInfo`. It contains the following members: - * - `channelName`: The name of the source channel. The default value is `NULL`, which means the SDK applies the name - * of the current channel. - * - `uid`: The unique ID to identify the relay stream in the source channel. The default value is 0, which means the - * SDK generates a random UID. You must set it as 0. - * - `token`: The token for joining the source channel. It is generated with the `channelName` and `uid` you set in - * `srcInfo`. - * - If you have not enabled the App Certificate, set this parameter as the default value `NULL`, which means the - * SDK applies the App ID. - * - If you have enabled the App Certificate, you must use the token generated with the `channelName` and `uid`, and - * the `uid` must be set as 0. + * - `channelName`: The name of the source channel. The default value is `NULL`, which means the + * SDK applies the name of the current channel. + * - `uid`: The unique ID to identify the relay stream in the source channel. The default value is + * 0, which means the SDK generates a random UID. You must set it as 0. + * - `token`: The token for joining the source channel. It is generated with the `channelName` and + * `uid` you set in `srcInfo`. + * - If you have not enabled the App Certificate, set this parameter as the default value + * `NULL`, which means the SDK applies the App ID. + * - If you have enabled the App Certificate, you must use the token generated with the + * `channelName` and `uid`, and the `uid` must be set as 0. */ ChannelMediaInfo* srcInfo; - /** The information of the destination channel `ChannelMediaInfo`. It contains the following members: + /** The information of the destination channel `ChannelMediaInfo`. It contains the following + * members: * - `channelName`: The name of the destination channel. * - `uid`: The unique ID to identify the relay stream in the destination channel. The value * ranges from 0 to (2^32-1). To avoid UID conflicts, this `UID` must be different from any * other `UID` in the destination channel. The default value is 0, which means the SDK generates * a random `UID`. Do not set this parameter as the `UID` of the host in the destination channel, * and ensure that this `UID` is different from any other `UID` in the channel. - * - `token`: The token for joining the destination channel. It is generated with the `channelName` - * and `uid` you set in `destInfos`. + * - `token`: The token for joining the destination channel. It is generated with the + * `channelName` and `uid` you set in `destInfos`. * - If you have not enabled the App Certificate, set this parameter as the default value NULL, * which means the SDK applies the App ID. - * If you have enabled the App Certificate, you must use the token generated with the `channelName` - * and `uid`. + * If you have enabled the App Certificate, you must use the token generated with the + * `channelName` and `uid`. */ ChannelMediaInfo* destInfos; /** The number of destination channels. The default value is 0, and the value range is from 0 to @@ -5677,7 +6086,8 @@ struct ChannelMediaRelayConfiguration { */ int destCount; - ChannelMediaRelayConfiguration() : srcInfo(OPTIONAL_NULLPTR), destInfos(OPTIONAL_NULLPTR), destCount(0) {} + ChannelMediaRelayConfiguration() + : srcInfo(OPTIONAL_NULLPTR), destInfos(OPTIONAL_NULLPTR), destCount(0) {} }; /** @@ -5722,11 +6132,11 @@ struct DownlinkNetworkInfo { expected_bitrate_bps(-1) {} PeerDownlinkInfo(const PeerDownlinkInfo& rhs) - : stream_type(rhs.stream_type), + : stream_type(rhs.stream_type), current_downscale_level(rhs.current_downscale_level), expected_bitrate_bps(rhs.expected_bitrate_bps) { if (rhs.userId != OPTIONAL_NULLPTR) { - const int len = std::strlen(rhs.userId); + const size_t len = std::strlen(rhs.userId); char* buf = new char[len + 1]; std::memcpy(buf, rhs.userId, len); buf[len] = '\0'; @@ -5741,7 +6151,7 @@ struct DownlinkNetworkInfo { current_downscale_level = rhs.current_downscale_level; expected_bitrate_bps = rhs.expected_bitrate_bps; if (rhs.userId != OPTIONAL_NULLPTR) { - const int len = std::strlen(rhs.userId); + const size_t len = std::strlen(rhs.userId); char* buf = new char[len + 1]; std::memcpy(buf, rhs.userId, len); buf[len] = '\0'; @@ -5775,18 +6185,18 @@ struct DownlinkNetworkInfo { int total_received_video_count; DownlinkNetworkInfo() - : lastmile_buffer_delay_time_ms(-1), - bandwidth_estimation_bps(-1), - total_downscale_level_count(-1), - peer_downlink_info(OPTIONAL_NULLPTR), - total_received_video_count(-1) {} + : lastmile_buffer_delay_time_ms(-1), + bandwidth_estimation_bps(-1), + total_downscale_level_count(-1), + peer_downlink_info(OPTIONAL_NULLPTR), + total_received_video_count(-1) {} DownlinkNetworkInfo(const DownlinkNetworkInfo& info) - : lastmile_buffer_delay_time_ms(info.lastmile_buffer_delay_time_ms), - bandwidth_estimation_bps(info.bandwidth_estimation_bps), - total_downscale_level_count(info.total_downscale_level_count), - peer_downlink_info(OPTIONAL_NULLPTR), - total_received_video_count(info.total_received_video_count) { + : lastmile_buffer_delay_time_ms(info.lastmile_buffer_delay_time_ms), + bandwidth_estimation_bps(info.bandwidth_estimation_bps), + total_downscale_level_count(info.total_downscale_level_count), + peer_downlink_info(OPTIONAL_NULLPTR), + total_received_video_count(info.total_received_video_count) { if (total_received_video_count <= 0) return; peer_downlink_info = new PeerDownlinkInfo[total_received_video_count]; for (int i = 0; i < total_received_video_count; ++i) @@ -5840,7 +6250,8 @@ enum ENCRYPTION_MODE { * salt (`encryptionKdfSalt`). */ AES_128_GCM2 = 7, - /** 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt (`encryptionKdfSalt`). + /** 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt + * (`encryptionKdfSalt`). */ AES_256_GCM2 = 8, /** Enumerator boundary. @@ -5858,30 +6269,31 @@ struct EncryptionConfig { /** * Encryption key in string type with unlimited length. Agora recommends using a 32-byte key. * - * @note If you do not set an encryption key or set it as NULL, you cannot use the built-in encryption, and the SDK returns #ERR_INVALID_ARGUMENT (-2). + * @note If you do not set an encryption key or set it as NULL, you cannot use the built-in + * encryption, and the SDK returns #ERR_INVALID_ARGUMENT (-2). */ const char* encryptionKey; /** - * Salt, 32 bytes in length. Agora recommends that you use OpenSSL to generate salt on the server side. + * Salt, 32 bytes in length. Agora recommends that you use OpenSSL to generate salt on the server + * side. * * @note This parameter takes effect only in `AES_128_GCM2` or `AES_256_GCM2` encrypted mode. * In this case, ensure that this parameter is not 0. */ uint8_t encryptionKdfSalt[32]; - + bool datastreamEncryptionEnabled; EncryptionConfig() - : encryptionMode(AES_128_GCM2), - encryptionKey(OPTIONAL_NULLPTR), - datastreamEncryptionEnabled(false) - { + : encryptionMode(AES_128_GCM2), + encryptionKey(OPTIONAL_NULLPTR), + datastreamEncryptionEnabled(false) { memset(encryptionKdfSalt, 0, sizeof(encryptionKdfSalt)); } /// @cond const char* getEncryptionString() const { - switch(encryptionMode) { + switch (encryptionMode) { case AES_128_XTS: return "aes-128-xts"; case AES_128_ECB: @@ -5909,30 +6321,31 @@ struct EncryptionConfig { /** Encryption error type. */ enum ENCRYPTION_ERROR_TYPE { - /** - * 0: Internal reason. - */ - ENCRYPTION_ERROR_INTERNAL_FAILURE = 0, - /** - * 1: MediaStream decryption errors. Ensure that the receiver and the sender use the same encryption mode and key. - */ - ENCRYPTION_ERROR_DECRYPTION_FAILURE = 1, - /** - * 2: MediaStream encryption errors. - */ - ENCRYPTION_ERROR_ENCRYPTION_FAILURE = 2, - /** - * 3: DataStream decryption errors. Ensure that the receiver and the sender use the same encryption mode and key. - */ - ENCRYPTION_ERROR_DATASTREAM_DECRYPTION_FAILURE = 3, - /** - * 4: DataStream encryption errors. - */ - ENCRYPTION_ERROR_DATASTREAM_ENCRYPTION_FAILURE = 4, + /** + * 0: Internal reason. + */ + ENCRYPTION_ERROR_INTERNAL_FAILURE = 0, + /** + * 1: MediaStream decryption errors. Ensure that the receiver and the sender use the same + * encryption mode and key. + */ + ENCRYPTION_ERROR_DECRYPTION_FAILURE = 1, + /** + * 2: MediaStream encryption errors. + */ + ENCRYPTION_ERROR_ENCRYPTION_FAILURE = 2, + /** + * 3: DataStream decryption errors. Ensure that the receiver and the sender use the same + * encryption mode and key. + */ + ENCRYPTION_ERROR_DATASTREAM_DECRYPTION_FAILURE = 3, + /** + * 4: DataStream encryption errors. + */ + ENCRYPTION_ERROR_DATASTREAM_ENCRYPTION_FAILURE = 4, }; -enum UPLOAD_ERROR_REASON -{ +enum UPLOAD_ERROR_REASON { UPLOAD_SUCCESS = 0, UPLOAD_NET_ERROR = 1, UPLOAD_SERVER_ERROR = 2, @@ -5967,7 +6380,8 @@ enum STREAM_SUBSCRIBE_STATE { * - Calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop sending local * media stream. * - Calls `disableAudio` or `disableVideo `to disable the local audio or video module. - * - Calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or video capture. + * - Calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or + * video capture. * - The role of the remote user is audience. * - The local user calls the following methods to stop receiving remote streams: * - Calls `muteRemoteAudioStream(true)`, `muteAllRemoteAudioStreams(true)` to stop receiving the remote audio streams. @@ -5994,9 +6408,12 @@ enum STREAM_PUBLISH_STATE { PUB_STATE_IDLE = 0, /** * 1: Fails to publish the local stream. Possible reasons: - * - The local user calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop sending the local media stream. - * - The local user calls `disableAudio` or `disableVideo` to disable the local audio or video module. - * - The local user calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or video capture. + * - The local user calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop + * sending the local media stream. + * - The local user calls `disableAudio` or `disableVideo` to disable the local audio or video + * module. + * - The local user calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the + * local audio or video capture. * - The role of the local user is audience. */ PUB_STATE_NO_PUBLISHED = 1, @@ -6022,10 +6439,15 @@ struct EchoTestConfiguration { int intervalInSeconds; EchoTestConfiguration(view_t v, bool ea, bool ev, const char* t, const char* c, const int is) - : view(v), enableAudio(ea), enableVideo(ev), token(t), channelId(c), intervalInSeconds(is) {} + : view(v), enableAudio(ea), enableVideo(ev), token(t), channelId(c), intervalInSeconds(is) {} EchoTestConfiguration() - : view(OPTIONAL_NULLPTR), enableAudio(true), enableVideo(true), token(OPTIONAL_NULLPTR), channelId(OPTIONAL_NULLPTR), intervalInSeconds(2) {} + : view(OPTIONAL_NULLPTR), + enableAudio(true), + enableVideo(true), + token(OPTIONAL_NULLPTR), + channelId(OPTIONAL_NULLPTR), + intervalInSeconds(2) {} }; /** @@ -6041,9 +6463,7 @@ struct UserInfo { */ char userAccount[MAX_USER_ACCOUNT_LENGTH]; - UserInfo() : uid(0) { - userAccount[0] = '\0'; - } + UserInfo() : uid(0) { userAccount[0] = '\0'; } }; /** @@ -6053,21 +6473,22 @@ enum EAR_MONITORING_FILTER_TYPE { /** * 1: Do not add an audio filter to the in-ear monitor. */ - EAR_MONITORING_FILTER_NONE = (1<<0), + EAR_MONITORING_FILTER_NONE = (1 << 0), /** * 2: Enable audio filters to the in-ear monitor. If you implement functions such as voice * beautifier and audio effect, users can hear the voice after adding these effects. */ - EAR_MONITORING_FILTER_BUILT_IN_AUDIO_FILTERS = (1<<1), + EAR_MONITORING_FILTER_BUILT_IN_AUDIO_FILTERS = (1 << 1), /** * 4: Enable noise suppression to the in-ear monitor. */ - EAR_MONITORING_FILTER_NOISE_SUPPRESSION = (1<<2), + EAR_MONITORING_FILTER_NOISE_SUPPRESSION = (1 << 2), /** * 32768: Enable audio filters by reuse post-processing filter to the in-ear monitor. - * This bit is intended to be used in exclusive mode, which means, if this bit is set, all other bits will be disregarded. + * This bit is intended to be used in exclusive mode, which means, if this bit is set, all other + * bits will be disregarded. */ - EAR_MONITORING_FILTER_REUSE_POST_PROCESSING_FILTER = (1<<15), + EAR_MONITORING_FILTER_REUSE_POST_PROCESSING_FILTER = (1 << 15), }; /** @@ -6139,7 +6560,7 @@ struct ScreenVideoParameters { * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles). */ int frameRate = 15; - /** + /** * The video encoding bitrate (Kbps). For recommended values, see [Recommended video * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles). */ @@ -6230,7 +6651,7 @@ struct VideoRenderingTracingInfo { int elapsedTime; /** * Elapsed time from the start tracing time to the time when join channel. - * + * * **Note** * If the start tracing time is behind the time when join channel, this value will be negative. */ @@ -6241,7 +6662,7 @@ struct VideoRenderingTracingInfo { int join2JoinSuccess; /** * Elapsed time from finishing joining channel to remote user joined. - * + * * **Note** * If the start tracing time is after the time finishing join channel, this value will be * the elapsed time from the start tracing time to remote user joined. The minimum value is 0. @@ -6249,7 +6670,7 @@ struct VideoRenderingTracingInfo { int joinSuccess2RemoteJoined; /** * Elapsed time from remote user joined to set the view. - * + * * **Note** * If the start tracing time is after the time when remote user joined, this value will be * the elapsed time from the start tracing time to set the view. The minimum value is 0. @@ -6257,7 +6678,7 @@ struct VideoRenderingTracingInfo { int remoteJoined2SetView; /** * Elapsed time from remote user joined to the time subscribing remote video stream. - * + * * **Note** * If the start tracing time is after the time when remote user joined, this value will be * the elapsed time from the start tracing time to the time subscribing remote video stream. @@ -6266,7 +6687,7 @@ struct VideoRenderingTracingInfo { int remoteJoined2UnmuteVideo; /** * Elapsed time from remote user joined to the remote video packet received. - * + * * **Note** * If the start tracing time is after the time when remote user joined, this value will be * the elapsed time from the start tracing time to the time subscribing remote video stream. @@ -6286,7 +6707,6 @@ enum CONFIG_FETCH_TYPE { CONFIG_FETCH_TYPE_JOIN_CHANNEL = 2, }; - /** The local proxy mode type. */ enum LOCAL_PROXY_MODE { /** 0: Connect local proxy with high priority, if not connected to local proxy, fallback to sdrtn. @@ -6315,7 +6735,8 @@ struct LogUploadServerInfo { LogUploadServerInfo() : serverDomain(NULL), serverPath(NULL), serverPort(0), serverHttps(true) {} - LogUploadServerInfo(const char* domain, const char* path, int port, bool https) : serverDomain(domain), serverPath(path), serverPort(port), serverHttps(https) {} + LogUploadServerInfo(const char* domain, const char* path, int port, bool https) + : serverDomain(domain), serverPath(path), serverPort(port), serverHttps(https) {} }; struct AdvancedConfigInfo { @@ -6337,8 +6758,9 @@ struct LocalAccessPointConfiguration { /** The number of local access point domain. */ int domainListSize; - /** Certificate domain name installed on specific local access point. pass "" means using sni domain on specific local access point - * SNI(Server Name Indication) is an extension to the TLS protocol. + /** Certificate domain name installed on specific local access point. pass "" means using sni + * domain on specific local access point SNI(Server Name Indication) is an extension to the TLS + * protocol. */ const char* verifyDomainName; /** Local proxy connection mode, connectivity first or local only. @@ -6353,23 +6775,42 @@ struct LocalAccessPointConfiguration { - false: not disable vos-aut */ bool disableAut; - LocalAccessPointConfiguration() : ipList(NULL), ipListSize(0), domainList(NULL), domainListSize(0), verifyDomainName(NULL), mode(ConnectivityFirst), disableAut(true) {} + LocalAccessPointConfiguration() + : ipList(NULL), + ipListSize(0), + domainList(NULL), + domainListSize(0), + verifyDomainName(NULL), + mode(ConnectivityFirst), + disableAut(true) {} +}; + +enum RecorderStreamType { + RTC, + PREVIEW, }; /** * The information about recorded media streams. */ struct RecorderStreamInfo { - const char* channelId; - /** - * The user ID. - */ - uid_t uid; - /** - * The channel ID of the audio/video stream needs to be recorded. - */ - RecorderStreamInfo() : channelId(NULL), uid(0) {} - RecorderStreamInfo(const char* channelId, uid_t uid) : channelId(channelId), uid(uid) {} + /** + * The channel ID of the audio/video stream needs to be recorded. + */ + const char* channelId; + /** + * The user ID. + */ + uid_t uid; + /** + * The Recoder Stream type. + */ + RecorderStreamType type; + RecorderStreamInfo() : channelId(NULL), uid(0), type(RTC) {} + RecorderStreamInfo(const char* channelId, uid_t uid) + : channelId(channelId), uid(uid), type(RTC) {} + RecorderStreamInfo(const char* channelId, uid_t uid, RecorderStreamType) + : channelId(channelId), uid(uid), type(RTC) {} }; } // namespace rtc @@ -6396,12 +6837,12 @@ class AParameter : public agora::util::AutoPtr { }; class LicenseCallback { - public: - virtual ~LicenseCallback() {} - virtual void onCertificateRequired() = 0; - virtual void onLicenseRequest() = 0; - virtual void onLicenseValidated() = 0; - virtual void onLicenseError(int result) = 0; + public: + virtual ~LicenseCallback() {} + virtual void onCertificateRequired() = 0; + virtual void onLicenseRequest() = 0; + virtual void onLicenseValidated() = 0; + virtual void onLicenseError(int result) = 0; }; } // namespace base @@ -6445,44 +6886,51 @@ struct SpatialAudioParams { }; /** * Layout info of video stream which compose a transcoder video stream. -*/ -struct VideoLayout -{ + */ +struct VideoLayout { /** * Channel Id from which this video stream come from. - */ + */ const char* channelId; /** * User id of video stream. - */ + */ rtc::uid_t uid; /** * User account of video stream. - */ + */ user_id_t strUid; /** * x coordinate of video stream on a transcoded video stream canvas. - */ + */ uint32_t x; /** * y coordinate of video stream on a transcoded video stream canvas. - */ + */ uint32_t y; /** * width of video stream on a transcoded video stream canvas. - */ + */ uint32_t width; /** * height of video stream on a transcoded video stream canvas. - */ + */ uint32_t height; /** * video state of video stream on a transcoded video stream canvas. * 0 for normal video , 1 for placeholder image showed , 2 for black image. - */ - uint32_t videoState; + */ + uint32_t videoState; - VideoLayout() : channelId(OPTIONAL_NULLPTR), uid(0), strUid(OPTIONAL_NULLPTR), x(0), y(0), width(0), height(0), videoState(0) {} + VideoLayout() + : channelId(OPTIONAL_NULLPTR), + uid(0), + strUid(OPTIONAL_NULLPTR), + x(0), + y(0), + width(0), + height(0), + videoState(0) {} }; } // namespace agora @@ -6509,7 +6957,7 @@ AGORA_API int AGORA_CALL setAgoraSdkExternalSymbolLoader(void* (*func)(const cha * @note For license only, everytime will generate a different credential. * So, just need to call once for a device, and then save the credential */ -AGORA_API int AGORA_CALL createAgoraCredential(agora::util::AString &credential); +AGORA_API int AGORA_CALL createAgoraCredential(agora::util::AString& credential); /** * Verify given certificate and return the result @@ -6524,8 +6972,10 @@ AGORA_API int AGORA_CALL createAgoraCredential(agora::util::AString &credential) * @return The description of the error code. * @note For license only. */ -AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char *credential_buf, int credential_len, - const char *certificate_buf, int certificate_len); +AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char* credential_buf, + int credential_len, + const char* certificate_buf, + int certificate_len); /** * @brief Implement the agora::base::LicenseCallback, @@ -6534,7 +6984,7 @@ AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char *credential_ * @param [in] callback The object of agora::LiceseCallback, * set the callback to null before delete it. */ -AGORA_API void setAgoraLicenseCallback(agora::base::LicenseCallback *callback); +AGORA_API void setAgoraLicenseCallback(agora::base::LicenseCallback* callback); /** * @brief Get the LicenseCallback pointer if already setup, @@ -6550,18 +7000,15 @@ AGORA_API agora::base::LicenseCallback* getAgoraLicenseCallback(); * typical scenario is as follows: * * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - * | // custom audio/video base capture time, e.g. the first audio/video capture time. | - * | int64_t custom_capture_time_base; | - * | | - * | int64_t agora_monotonic_time = getAgoraCurrentMonotonicTimeInMs(); | - * | | - * | // offset is fixed once calculated in the begining. | - * | const int64_t offset = agora_monotonic_time - custom_capture_time_base; | - * | | - * | // realtime_custom_audio/video_capture_time is the origin capture time that customer provided.| - * | // actual_audio/video_capture_time is the actual capture time transfered to sdk. | - * | int64_t actual_audio_capture_time = realtime_custom_audio_capture_time + offset; | - * | int64_t actual_video_capture_time = realtime_custom_video_capture_time + offset; | + * | // custom audio/video base capture time, e.g. the first audio/video capture time. | | int64_t + * custom_capture_time_base; | | | | + * int64_t agora_monotonic_time = getAgoraCurrentMonotonicTimeInMs(); | + * | | | // offset is fixed once calculated in the begining. | | const int64_t offset = + * agora_monotonic_time - custom_capture_time_base; | | | | // + * realtime_custom_audio/video_capture_time is the origin capture time that customer provided.| | // + * actual_audio/video_capture_time is the actual capture time transfered to sdk. | | + * int64_t actual_audio_capture_time = realtime_custom_audio_capture_time + offset; | + * | int64_t actual_video_capture_time = realtime_custom_video_capture_time + offset; | * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ * * @return diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/AgoraMediaBase.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/AgoraMediaBase.h index 26b2eb2e..638fd7da 100644 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/AgoraMediaBase.h +++ b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/AgoraMediaBase.h @@ -63,8 +63,8 @@ struct ExtensionContext { /** -* Video source types definition. -**/ + * Video source types definition. + **/ enum VIDEO_SOURCE_TYPE { /** Video captured by the camera. */ @@ -115,17 +115,45 @@ enum VIDEO_SOURCE_TYPE { */ VIDEO_SOURCE_SCREEN_FOURTH = 14, /** Video for voice drive. - */ - VIDEO_SOURCE_SPEECH_DRIVEN = 15, + */ + VIDEO_SOURCE_SPEECH_DRIVEN = 15, VIDEO_SOURCE_UNKNOWN = 100 }; +/** +* Audio source types definition. +**/ +enum AUDIO_SOURCE_TYPE { + /** Audio captured by the mic. + */ + AUDIO_SOURCE_MICROPHONE = 0, + /** Not define. + */ + AUDIO_SOURCE_CUSTOM = 1, + /** Audio for media player sharing. + */ + AUDIO_SOURCE_MEDIA_PLAYER = 2, + /** Audio for screen audio. + */ + AUDIO_SOURCE_LOOPBACK_RECORDING = 3, + /** Audio captured by mixed source. + */ + AUDIO_SOURCE_MIXED_STREAM = 4, + /** Remote audio received from network. + */ + AUDIO_SOURCE_REMOTE_USER = 5, + /** Remote audio received from network by channel. + */ + AUDIO_SOURCE_REMOTE_CHANNEL = 6, + + AUDIO_SOURCE_UNKNOWN = 100 +}; + /** * Audio routes. */ -enum AudioRoute -{ +enum AudioRoute { /** * -1: The default audio route. */ @@ -191,23 +219,21 @@ struct AudioParameters { size_t channels; size_t frames_per_buffer; - AudioParameters() - : sample_rate(0), - channels(0), - frames_per_buffer(0) {} + AudioParameters() : sample_rate(0), channels(0), frames_per_buffer(0) {} }; /** * The use mode of the audio data. */ enum RAW_AUDIO_FRAME_OP_MODE_TYPE { - /** 0: Read-only mode: Users only read the data from `AudioFrame` without modifying anything. + /** 0: Read-only mode: Users only read the data from `AudioFrame` without modifying anything. * For example, when users acquire the data with the Agora SDK, then start the media push. */ RAW_AUDIO_FRAME_OP_MODE_READ_ONLY = 0, - /** 2: Read and write mode: Users read the data from `AudioFrame`, modify it, and then play it. - * For example, when users have their own audio-effect processing module and perform some voice pre-processing, such as a voice change. + /** 2: Read and write mode: Users read the data from `AudioFrame`, modify it, and then play it. + * For example, when users have their own audio-effect processing module and perform some voice + * pre-processing, such as a voice change. */ RAW_AUDIO_FRAME_OP_MODE_READ_WRITE = 2, }; @@ -215,7 +241,7 @@ enum RAW_AUDIO_FRAME_OP_MODE_TYPE { } // namespace rtc namespace media { - /** +/** * The type of media device. */ enum MEDIA_SOURCE_TYPE { @@ -290,23 +316,23 @@ enum CONTENT_INSPECT_RESULT { }; enum CONTENT_INSPECT_TYPE { -/** - * (Default) content inspect type invalid - */ -CONTENT_INSPECT_INVALID = 0, -/** - * @deprecated - * Content inspect type moderation - */ -CONTENT_INSPECT_MODERATION __deprecated = 1, -/** - * Content inspect type supervise - */ -CONTENT_INSPECT_SUPERVISION = 2, -/** - * Content inspect type image moderation - */ -CONTENT_INSPECT_IMAGE_MODERATION = 3 + /** + * (Default) content inspect type invalid + */ + CONTENT_INSPECT_INVALID = 0, + /** + * @deprecated + * Content inspect type moderation + */ + CONTENT_INSPECT_MODERATION __deprecated = 1, + /** + * Content inspect type supervise + */ + CONTENT_INSPECT_SUPERVISION = 2, + /** + * Content inspect type image moderation + */ + CONTENT_INSPECT_IMAGE_MODERATION = 3 }; struct ContentInspectModule { @@ -338,15 +364,14 @@ struct ContentInspectConfig { /**The content inspect module count. */ int moduleCount; - ContentInspectConfig& operator=(const ContentInspectConfig& rth) - { - extraInfo = rth.extraInfo; - serverConfig = rth.serverConfig; - moduleCount = rth.moduleCount; - memcpy(&modules, &rth.modules, MAX_CONTENT_INSPECT_MODULE_COUNT * sizeof(ContentInspectModule)); - return *this; - } - ContentInspectConfig() :extraInfo(NULL), serverConfig(NULL), moduleCount(0){} + ContentInspectConfig& operator=(const ContentInspectConfig& rth) { + extraInfo = rth.extraInfo; + serverConfig = rth.serverConfig; + moduleCount = rth.moduleCount; + memcpy(&modules, &rth.modules, MAX_CONTENT_INSPECT_MODULE_COUNT * sizeof(ContentInspectModule)); + return *this; + } + ContentInspectConfig() : extraInfo(NULL), serverConfig(NULL), moduleCount(0) {} }; namespace base { @@ -368,9 +393,7 @@ struct PacketOptions { uint32_t timestamp; // Audio level indication. uint8_t audioLevelIndication; - PacketOptions() - : timestamp(0), - audioLevelIndication(127) {} + PacketOptions() : timestamp(0), audioLevelIndication(127) {} }; /** @@ -386,9 +409,7 @@ struct AudioEncodedFrameInfo { * The codec of the packet. */ uint8_t codec; - AudioEncodedFrameInfo() - : sendTs(0), - codec(0) {} + AudioEncodedFrameInfo() : sendTs(0), codec(0) {} }; /** @@ -398,17 +419,18 @@ struct AudioPcmFrame { /** * The buffer size of the PCM audio frame. */ - OPTIONAL_ENUM_SIZE_T { - // Stereo, 32 kHz, 60 ms (2 * 32 * 60) - /** - * The max number of the samples of the data. - * - * When the number of audio channel is two, the sample rate is 32 kHZ, - * the buffer length of the data is 60 ms, the number of the samples of the data is 3840 (2 x 32 x 60). - */ - kMaxDataSizeSamples = 3840, - /** The max number of the bytes of the data. */ - kMaxDataSizeBytes = kMaxDataSizeSamples * sizeof(int16_t), + OPTIONAL_ENUM_SIZE_T{ + // Stereo, 32 kHz, 60 ms (2 * 32 * 60) + /** + * The max number of the samples of the data. + * + * When the number of audio channel is two, the sample rate is 32 kHZ, + * the buffer length of the data is 60 ms, the number of the samples of the data is 3840 (2 x + * 32 x 60). + */ + kMaxDataSizeSamples = 3840, + /** The max number of the bytes of the data. */ + kMaxDataSizeBytes = kMaxDataSizeSamples * sizeof(int16_t), }; /** The timestamp (ms) of the audio frame. @@ -553,7 +575,8 @@ enum VIDEO_PIXEL_FORMAT { */ VIDEO_PIXEL_I422 = 16, /** - * 17: ID3D11Texture2D, only support DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8A8_TYPELESS, DXGI_FORMAT_NV12 texture format + * 17: ID3D11Texture2D, only support DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8A8_TYPELESS, + * DXGI_FORMAT_NV12 texture format */ VIDEO_TEXTURE_ID3D11TEXTURE2D = 17, /** @@ -608,12 +631,12 @@ enum CAMERA_VIDEO_SOURCE_TYPE { * This interface provides access to metadata information. */ class IVideoFrameMetaInfo { - public: - enum META_INFO_KEY { - KEY_FACE_CAPTURE = 0, - }; - virtual ~IVideoFrameMetaInfo() {}; - virtual const char* getMetaInfoStr(META_INFO_KEY key) const = 0; + public: + enum META_INFO_KEY { + KEY_FACE_CAPTURE = 0, + }; + virtual ~IVideoFrameMetaInfo(){}; + virtual const char* getMetaInfoStr(META_INFO_KEY key) const = 0; }; struct ColorSpace { @@ -829,7 +852,7 @@ struct ExternalVideoFrame { d3d11Texture2d(NULL), textureSliceIndex(0){} - /** + /** * The EGL context type. */ enum EGL_CONTEXT_TYPE { @@ -869,6 +892,7 @@ struct ExternalVideoFrame { * The pixel format: #VIDEO_PIXEL_FORMAT */ VIDEO_PIXEL_FORMAT format; + /** * The video buffer. */ @@ -903,30 +927,32 @@ struct ExternalVideoFrame { */ int cropBottom; /** - * [Raw data related parameter] The clockwise rotation information of the video frame. You can set the - * rotation angle as 0, 90, 180, or 270. The default value is 0. + * [Raw data related parameter] The clockwise rotation information of the video frame. You can set + * the rotation angle as 0, 90, 180, or 270. The default value is 0. */ int rotation; /** - * The timestamp (ms) of the incoming video frame. An incorrect timestamp results in a frame loss or - * unsynchronized audio and video. - * + * The timestamp (ms) of the incoming video frame. An incorrect timestamp results in a frame loss + * or unsynchronized audio and video. + * * Please refer to getAgoraCurrentMonotonicTimeInMs or getCurrentMonotonicTimeInMs * to determine how to fill this filed. */ long long timestamp; /** * [Texture-related parameter] - * When using the OpenGL interface (javax.microedition.khronos.egl.*) defined by Khronos, set EGLContext to this field. - * When using the OpenGL interface (android.opengl.*) defined by Android, set EGLContext to this field. + * When using the OpenGL interface (javax.microedition.khronos.egl.*) defined by Khronos, set + * EGLContext to this field. When using the OpenGL interface (android.opengl.*) defined by + * Android, set EGLContext to this field. */ - void *eglContext; + void* eglContext; /** * [Texture related parameter] Texture ID used by the video frame. */ EGL_CONTEXT_TYPE eglType; /** - * [Texture related parameter] Incoming 4 × 4 transformational matrix. The typical value is a unit matrix. + * [Texture related parameter] Incoming 4 × 4 transformational matrix. The typical value is + * a unit matrix. */ int textureId; /** @@ -1060,8 +1086,8 @@ struct VideoFrame { */ int rotation; /** - * The timestamp to render the video stream. Use this parameter for audio-video synchronization when - * rendering the video. + * The timestamp to render the video stream. Use this parameter for audio-video synchronization + * when rendering the video. * * @note This parameter is for rendering the video, not capturing the video. */ @@ -1089,7 +1115,8 @@ struct VideoFrame { */ int textureId; /** - * [Texture related parameter] The pointer of ID3D11Texture2D used by the video frame,for Windows only. + * [Texture related parameter] The pointer of ID3D11Texture2D used by the video frame,for Windows + * only. */ void* d3d11Texture2d; /** @@ -1117,7 +1144,8 @@ struct VideoFrame { */ void* pixelBuffer; /** - * The pointer to IVideoFrameMetaInfo, which is the interface to get metainfo contents from VideoFrame. + * The pointer to IVideoFrameMetaInfo, which is the interface to get metainfo contents from + * VideoFrame. */ IVideoFrameMetaInfo* metaInfo; @@ -1141,7 +1169,8 @@ class IVideoFrameObserver { * Occurs each time the player receives a video frame. * * After registering the video frame observer, - * the callback occurs each time the player receives a video frame to report the detailed information of the video frame. + * the callback occurs each time the player receives a video frame to report the detailed + * information of the video frame. * @param frame The detailed information of the video frame. See {@link VideoFrame}. */ virtual void onFrame(const VideoFrame* frame) = 0; @@ -1179,6 +1208,30 @@ enum VIDEO_MODULE_POSITION { } // namespace base +/** Definition of SnapshotConfig. + */ +struct SnapshotConfig { + /** + * The local path (including filename extensions) of the snapshot. For example: + * - Windows: `C:\Users\\AppData\Local\Agora\\example.jpg` + * - iOS: `/App Sandbox/Library/Caches/example.jpg` + * - macOS: `~/Library/Logs/example.jpg` + * - Android: `/storage/emulated/0/Android/data//files/example.jpg` + */ + const char* filePath; + + /** + * The position of the video observation. See VIDEO_MODULE_POSITION. + * + * Allowed values vary depending on the `uid` parameter passed in `takeSnapshot` or `takeSnapshotEx`: + * - uid = 0: Position 2, 4 and 8 are allowed. + * - uid != 0: Only position 2 is allowed. + * + */ + media::base::VIDEO_MODULE_POSITION position; + SnapshotConfig() :filePath(NULL), position(media::base::POSITION_PRE_ENCODER) {} +}; + /** * The audio frame observer. */ @@ -1238,7 +1291,7 @@ class IAudioFrameObserverBase { */ int samplesPerSec; /** - * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data + * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data * buffer is interleaved. * * Buffer data size: buffer = samplesPerChannel × channels × bytesPerSample. @@ -1247,14 +1300,14 @@ class IAudioFrameObserverBase { /** * The timestamp to render the audio data. * - * You can use this timestamp to restore the order of the captured audio frame, and synchronize - * audio and video frames in video scenarios, including scenarios where external video sources + * You can use this timestamp to restore the order of the captured audio frame, and synchronize + * audio and video frames in video scenarios, including scenarios where external video sources * are used. */ int64_t renderTimeMs; /** * A reserved parameter. - * + * * You can use this presentationMs parameter to indicate the presenation milisecond timestamp, * this will then filled into audio4 extension part, the remote side could use this pts in av * sync process with video frame. @@ -1263,11 +1316,11 @@ class IAudioFrameObserverBase { /** * The pts timestamp of this audio frame. * - * This timestamp is used to indicate the origin pts time of the frame, and sync with video frame by - * the pts time stamp + * This timestamp is used to indicate the origin pts time of the frame, and sync with video + * frame by the pts time stamp */ int64_t presentationMs; - /** + /** * The number of the audio track. */ int audioTrackNumber; @@ -1276,17 +1329,18 @@ class IAudioFrameObserverBase { */ uint32_t rtpTimestamp; - AudioFrame() : type(FRAME_TYPE_PCM16), - samplesPerChannel(0), - bytesPerSample(rtc::TWO_BYTES_PER_SAMPLE), - channels(0), - samplesPerSec(0), - buffer(NULL), - renderTimeMs(0), - avsync_type(0), - presentationMs(0), - audioTrackNumber(0), - rtpTimestamp(0) {} + AudioFrame() + : type(FRAME_TYPE_PCM16), + samplesPerChannel(0), + bytesPerSample(rtc::TWO_BYTES_PER_SAMPLE), + channels(0), + samplesPerSec(0), + buffer(NULL), + renderTimeMs(0), + avsync_type(0), + presentationMs(0), + audioTrackNumber(0), + rtpTimestamp(0) {} }; enum AUDIO_FRAME_POSITION { @@ -1335,8 +1389,17 @@ class IAudioFrameObserverBase { */ int samples_per_call; - AudioParams() : sample_rate(0), channels(0), mode(rtc::RAW_AUDIO_FRAME_OP_MODE_READ_ONLY), samples_per_call(0) {} - AudioParams(int samplerate, int channel, rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE type, int samplesPerCall) : sample_rate(samplerate), channels(channel), mode(type), samples_per_call(samplesPerCall) {} + AudioParams() + : sample_rate(0), + channels(0), + mode(rtc::RAW_AUDIO_FRAME_OP_MODE_READ_ONLY), + samples_per_call(0) {} + AudioParams(int samplerate, int channel, rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE type, + int samplesPerCall) + : sample_rate(samplerate), + channels(channel), + mode(type), + samples_per_call(samplesPerCall) {} }; public: @@ -1386,10 +1449,11 @@ class IAudioFrameObserverBase { * - true: The before-mixing playback audio frame is valid and is encoded and sent. * - false: The before-mixing playback audio frame is invalid and is not encoded or sent. */ - virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, base::user_id_t userId, AudioFrame& audioFrame) { - (void) channelId; - (void) userId; - (void) audioFrame; + virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, base::user_id_t userId, + AudioFrame& audioFrame) { + (void)channelId; + (void)userId; + (void)audioFrame; return true; } @@ -1398,12 +1462,19 @@ class IAudioFrameObserverBase { * @return A bit mask that controls the frame position of the audio observer. * @note - Use '|' (the OR operator) to observe multiple frame positions. *

- * After you successfully register the audio observer, the SDK triggers this callback each time it receives a audio frame. You can determine which position to observe by setting the return value. - * The SDK provides 4 positions for observer. Each position corresponds to a callback function: - * - `AUDIO_FRAME_POSITION_PLAYBACK (1 << 0)`: The position for playback audio frame is received, which corresponds to the \ref onPlaybackFrame "onPlaybackFrame" callback. - * - `AUDIO_FRAME_POSITION_RECORD (1 << 1)`: The position for record audio frame is received, which corresponds to the \ref onRecordFrame "onRecordFrame" callback. - * - `AUDIO_FRAME_POSITION_MIXED (1 << 2)`: The position for mixed audio frame is received, which corresponds to the \ref onMixedFrame "onMixedFrame" callback. - * - `AUDIO_FRAME_POSITION_BEFORE_MIXING (1 << 3)`: The position for playback audio frame before mixing is received, which corresponds to the \ref onPlaybackFrameBeforeMixing "onPlaybackFrameBeforeMixing" callback. + * After you successfully register the audio observer, the SDK triggers this callback each time it + * receives a audio frame. You can determine which position to observe by setting the return + * value. The SDK provides 4 positions for observer. Each position corresponds to a callback + * function: + * - `AUDIO_FRAME_POSITION_PLAYBACK (1 << 0)`: The position for playback audio frame is received, + * which corresponds to the \ref onPlaybackFrame "onPlaybackFrame" callback. + * - `AUDIO_FRAME_POSITION_RECORD (1 << 1)`: The position for record audio frame is received, + * which corresponds to the \ref onRecordFrame "onRecordFrame" callback. + * - `AUDIO_FRAME_POSITION_MIXED (1 << 2)`: The position for mixed audio frame is received, which + * corresponds to the \ref onMixedFrame "onMixedFrame" callback. + * - `AUDIO_FRAME_POSITION_BEFORE_MIXING (1 << 3)`: The position for playback audio frame before + * mixing is received, which corresponds to the \ref onPlaybackFrameBeforeMixing + * "onPlaybackFrameBeforeMixing" callback. * @return The bit mask that controls the audio observation positions. * See AUDIO_FRAME_POSITION. */ @@ -1475,25 +1546,25 @@ class IAudioFrameObserver : public IAudioFrameObserverBase { * - true: The before-mixing playback audio frame is valid and is encoded and sent. * - false: The before-mixing playback audio frame is invalid and is not encoded or sent. */ - virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, rtc::uid_t uid, AudioFrame& audioFrame) = 0; + virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, rtc::uid_t uid, + AudioFrame& audioFrame) = 0; }; struct AudioSpectrumData { /** * The audio spectrum data of audio. */ - const float *audioSpectrumData; + const float* audioSpectrumData; /** * The data length of audio spectrum data. */ int dataLength; AudioSpectrumData() : audioSpectrumData(NULL), dataLength(0) {} - AudioSpectrumData(const float *data, int length) : - audioSpectrumData(data), dataLength(length) {} + AudioSpectrumData(const float* data, int length) : audioSpectrumData(data), dataLength(length) {} }; -struct UserAudioSpectrumInfo { +struct UserAudioSpectrumInfo { /** * User ID of the speaker. */ @@ -1505,14 +1576,15 @@ struct UserAudioSpectrumInfo { UserAudioSpectrumInfo() : uid(0) {} - UserAudioSpectrumInfo(agora::rtc::uid_t uid, const float* data, int length) : uid(uid), spectrumData(data, length) {} + UserAudioSpectrumInfo(agora::rtc::uid_t uid, const float* data, int length) + : uid(uid), spectrumData(data, length) {} }; /** * The IAudioSpectrumObserver class. */ class IAudioSpectrumObserver { -public: + public: virtual ~IAudioSpectrumObserver() {} /** @@ -1521,7 +1593,8 @@ class IAudioSpectrumObserver { * This callback reports the audio spectrum data of the local audio at the moment * in the channel. * - * You can set the time interval of this callback using \ref ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". + * You can set the time interval of this callback using \ref + * ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". * * @param data The audio spectrum data of local audio. * - true: Processed. @@ -1534,10 +1607,12 @@ class IAudioSpectrumObserver { * This callback reports the IDs and audio spectrum data of the loudest speakers at the moment * in the channel. * - * You can set the time interval of this callback using \ref ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". + * You can set the time interval of this callback using \ref + * ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". * - * @param spectrums The pointer to \ref agora::media::UserAudioSpectrumInfo "UserAudioSpectrumInfo", which is an array containing - * the user ID and audio spectrum data for each speaker. + * @param spectrums The pointer to \ref agora::media::UserAudioSpectrumInfo + * "UserAudioSpectrumInfo", which is an array containing the user ID and audio spectrum data for + * each speaker. * - This array contains the following members: * - `uid`, which is the UID of each remote speaker * - `spectrumData`, which reports the audio spectrum of each remote speaker. @@ -1545,7 +1620,8 @@ class IAudioSpectrumObserver { * - true: Processed. * - false: Not processed. */ - virtual bool onRemoteAudioSpectrum(const UserAudioSpectrumInfo* spectrums, unsigned int spectrumNumber) = 0; + virtual bool onRemoteAudioSpectrum(const UserAudioSpectrumInfo* spectrums, + unsigned int spectrumNumber) = 0; }; /** @@ -1563,8 +1639,9 @@ class IVideoEncodedFrameObserver { * - true: Accept. * - false: Do not accept. */ - virtual bool onEncodedVideoFrameReceived(rtc::uid_t uid, const uint8_t* imageBuffer, size_t length, - const rtc::EncodedVideoFrameInfo& videoEncodedFrameInfo) = 0; + virtual bool onEncodedVideoFrameReceived( + rtc::uid_t uid, const uint8_t* imageBuffer, size_t length, + const rtc::EncodedVideoFrameInfo& videoEncodedFrameInfo) = 0; virtual ~IVideoEncodedFrameObserver() {} }; @@ -1581,16 +1658,18 @@ class IVideoFrameObserver { enum VIDEO_FRAME_PROCESS_MODE { /** * Read-only mode. - * + * * In this mode, you do not modify the video frame. The video frame observer is a renderer. */ - PROCESS_MODE_READ_ONLY, // Observer works as a pure renderer and will not modify the original frame. + PROCESS_MODE_READ_ONLY, // Observer works as a pure renderer and will not modify the original + // frame. /** * Read and write mode. - * + * * In this mode, you modify the video frame. The video frame observer is a video filter. */ - PROCESS_MODE_READ_WRITE, // Observer works as a filter that will process the video frame and affect the following frame processing in SDK. + PROCESS_MODE_READ_WRITE, // Observer works as a filter that will process the video frame and + // affect the following frame processing in SDK. }; public: @@ -1599,38 +1678,43 @@ class IVideoFrameObserver { /** * Occurs each time the SDK receives a video frame captured by the local camera. * - * After you successfully register the video frame observer, the SDK triggers this callback each time - * a video frame is received. In this callback, you can get the video data captured by the local - * camera. You can then pre-process the data according to your scenarios. + * After you successfully register the video frame observer, the SDK triggers this callback each + * time a video frame is received. In this callback, you can get the video data captured by the + * local camera. You can then pre-process the data according to your scenarios. * * After pre-processing, you can send the processed video data back to the SDK by setting the * `videoFrame` parameter in this callback. * * @note - * - If you get the video data in RGBA color encoding format, Agora does not support using this callback to send the processed data in RGBA color encoding format back to the SDK. - * - The video data that this callback gets has not been pre-processed, such as watermarking, cropping content, rotating, or image enhancement. + * - If you get the video data in RGBA color encoding format, Agora does not support using this + * callback to send the processed data in RGBA color encoding format back to the SDK. + * - The video data that this callback gets has not been pre-processed, such as watermarking, + * cropping content, rotating, or image enhancement. * * @param videoFrame A pointer to the video frame: VideoFrame * @param sourceType source type of video frame. See #VIDEO_SOURCE_TYPE. * @return Determines whether to ignore the current video frame if the pre-processing fails: * - true: Do not ignore. * - false: Ignore, in which case this method does not sent the current video frame to the SDK. - */ - virtual bool onCaptureVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, VideoFrame& videoFrame) = 0; + */ + virtual bool onCaptureVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, + VideoFrame& videoFrame) = 0; /** * Occurs each time the SDK receives a video frame before encoding. * - * After you successfully register the video frame observer, the SDK triggers this callback each time - * when it receives a video frame. In this callback, you can get the video data before encoding. You can then - * process the data according to your particular scenarios. + * After you successfully register the video frame observer, the SDK triggers this callback each + * time when it receives a video frame. In this callback, you can get the video data before + * encoding. You can then process the data according to your particular scenarios. * * After processing, you can send the processed video data back to the SDK by setting the * `videoFrame` parameter in this callback. * * @note - * - To get the video data captured from the second screen before encoding, you need to set (1 << 2) as a frame position through `getObservedFramePosition`. - * - The video data that this callback gets has been pre-processed, such as watermarking, cropping content, rotating, or image enhancement. + * - To get the video data captured from the second screen before encoding, you need to set (1 << + * 2) as a frame position through `getObservedFramePosition`. + * - The video data that this callback gets has been pre-processed, such as watermarking, cropping + * content, rotating, or image enhancement. * - This callback does not support sending processed RGBA video data back to the SDK. * * @param videoFrame A pointer to the video frame: VideoFrame @@ -1639,7 +1723,8 @@ class IVideoFrameObserver { * - true: Do not ignore. * - false: Ignore, in which case this method does not sent the current video frame to the SDK. */ - virtual bool onPreEncodeVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, VideoFrame& videoFrame) = 0; + virtual bool onPreEncodeVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, + VideoFrame& videoFrame) = 0; /** * Occurs each time the SDK receives a video frame decoded by the MediaPlayer. @@ -1650,10 +1735,13 @@ class IVideoFrameObserver { * * After pre-processing, you can send the processed video data back to the SDK by setting the * `videoFrame` parameter in this callback. - * + * * @note - * - This callback will not be affected by the return values of \ref getVideoFrameProcessMode "getVideoFrameProcessMode", \ref getRotationApplied "getRotationApplied", \ref getMirrorApplied "getMirrorApplied", \ref getObservedFramePosition "getObservedFramePosition". - * - On Android, this callback is not affected by the return value of \ref getVideoFormatPreference "getVideoFormatPreference" + * - This callback will not be affected by the return values of \ref getVideoFrameProcessMode + * "getVideoFrameProcessMode", \ref getRotationApplied "getRotationApplied", \ref getMirrorApplied + * "getMirrorApplied", \ref getObservedFramePosition "getObservedFramePosition". + * - On Android, this callback is not affected by the return value of \ref + * getVideoFormatPreference "getVideoFormatPreference" * * @param videoFrame A pointer to the video frame: VideoFrame * @param mediaPlayerId ID of the mediaPlayer. @@ -1666,13 +1754,13 @@ class IVideoFrameObserver { /** * Occurs each time the SDK receives a video frame sent by the remote user. * - * After you successfully register the video frame observer, the SDK triggers this callback each time a - * video frame is received. In this callback, you can get the video data sent by the remote user. You - * can then post-process the data according to your scenarios. + * After you successfully register the video frame observer, the SDK triggers this callback each + * time a video frame is received. In this callback, you can get the video data sent by the remote + * user. You can then post-process the data according to your scenarios. + * + * After post-processing, you can send the processed data back to the SDK by setting the + * `videoFrame` parameter in this callback. * - * After post-processing, you can send the processed data back to the SDK by setting the `videoFrame` - * parameter in this callback. - * * @note This callback does not support sending processed RGBA video data back to the SDK. * * @param channelId The channel name @@ -1682,45 +1770,48 @@ class IVideoFrameObserver { * - true: Do not ignore. * - false: Ignore, in which case this method does not sent the current video frame to the SDK. */ - virtual bool onRenderVideoFrame(const char* channelId, rtc::uid_t remoteUid, VideoFrame& videoFrame) = 0; + virtual bool onRenderVideoFrame(const char* channelId, rtc::uid_t remoteUid, + VideoFrame& videoFrame) = 0; virtual bool onTranscodedVideoFrame(VideoFrame& videoFrame) = 0; /** - * Occurs each time the SDK receives a video frame and prompts you to set the process mode of the video frame. - * - * After you successfully register the video frame observer, the SDK triggers this callback each time it receives - * a video frame. You need to set your preferred process mode in the return value of this callback. + * Occurs each time the SDK receives a video frame and prompts you to set the process mode of the + * video frame. + * + * After you successfully register the video frame observer, the SDK triggers this callback each + * time it receives a video frame. You need to set your preferred process mode in the return value + * of this callback. * @return VIDEO_FRAME_PROCESS_MODE. */ - virtual VIDEO_FRAME_PROCESS_MODE getVideoFrameProcessMode() { - return PROCESS_MODE_READ_ONLY; - } + virtual VIDEO_FRAME_PROCESS_MODE getVideoFrameProcessMode() { return PROCESS_MODE_READ_ONLY; } /** * Sets the format of the raw video data output by the SDK. * - * If you want to get raw video data in a color encoding format other than YUV 420, register this callback when - * calling `registerVideoFrameObserver`. After you successfully register the video frame observer, the SDK triggers - * this callback each time it receives a video frame. You need to set your preferred video data in the return value - * of this callback. - * - * @note If you want the video captured by the sender to be the original format, set the original video data format - * to VIDEO_PIXEL_DEFAULT in the return value. On different platforms, the original video pixel format is also - * different, for the actual video pixel format, see `VideoFrame`. - * + * If you want to get raw video data in a color encoding format other than YUV 420, register this + * callback when calling `registerVideoFrameObserver`. After you successfully register the video + * frame observer, the SDK triggers this callback each time it receives a video frame. You need to + * set your preferred video data in the return value of this callback. + * + * @note If you want the video captured by the sender to be the original format, set the original + * video data format to VIDEO_PIXEL_DEFAULT in the return value. On different platforms, the + * original video pixel format is also different, for the actual video pixel format, see + * `VideoFrame`. + * * @return Sets the video format. See VIDEO_PIXEL_FORMAT. */ virtual base::VIDEO_PIXEL_FORMAT getVideoFormatPreference() { return base::VIDEO_PIXEL_DEFAULT; } /** - * Occurs each time the SDK receives a video frame, and prompts you whether to rotate the captured video. - * - * If you want to rotate the captured video according to the rotation member in the `VideoFrame` class, register this - * callback by calling `registerVideoFrameObserver`. After you successfully register the video frame observer, the - * SDK triggers this callback each time it receives a video frame. You need to set whether to rotate the video frame - * in the return value of this callback. - * + * Occurs each time the SDK receives a video frame, and prompts you whether to rotate the captured + * video. + * + * If you want to rotate the captured video according to the rotation member in the `VideoFrame` + * class, register this callback by calling `registerVideoFrameObserver`. After you successfully + * register the video frame observer, the SDK triggers this callback each time it receives a video + * frame. You need to set whether to rotate the video frame in the return value of this callback. + * * @note This function only supports video data in RGBA or YUV420. * * @return Determines whether to rotate. @@ -1730,13 +1821,15 @@ class IVideoFrameObserver { virtual bool getRotationApplied() { return false; } /** - * Occurs each time the SDK receives a video frame and prompts you whether or not to mirror the captured video. - * - * If the video data you want to obtain is a mirror image of the original video, you need to register this callback - * when calling `registerVideoFrameObserver`. After you successfully register the video frame observer, the SDK - * triggers this callback each time it receives a video frame. You need to set whether or not to mirror the video - * frame in the return value of this callback. - * + * Occurs each time the SDK receives a video frame and prompts you whether or not to mirror the + * captured video. + * + * If the video data you want to obtain is a mirror image of the original video, you need to + * register this callback when calling `registerVideoFrameObserver`. After you successfully + * register the video frame observer, the SDK triggers this callback each time it receives a video + * frame. You need to set whether or not to mirror the video frame in the return value of this + * callback. + * * @note This function only supports video data in RGBA and YUV420 formats. * * @return Determines whether to mirror. @@ -1748,19 +1841,24 @@ class IVideoFrameObserver { /** * Sets the frame position for the video observer. * - * After you successfully register the video observer, the SDK triggers this callback each time it receives - * a video frame. You can determine which position to observe by setting the return value. The SDK provides - * 3 positions for observer. Each position corresponds to a callback function: + * After you successfully register the video observer, the SDK triggers this callback each time it + * receives a video frame. You can determine which position to observe by setting the return + * value. The SDK provides 3 positions for observer. Each position corresponds to a callback + * function: * - * POSITION_POST_CAPTURER(1 << 0): The position after capturing the video data, which corresponds to the onCaptureVideoFrame callback. - * POSITION_PRE_RENDERER(1 << 1): The position before receiving the remote video data, which corresponds to the onRenderVideoFrame callback. - * POSITION_PRE_ENCODER(1 << 2): The position before encoding the video data, which corresponds to the onPreEncodeVideoFrame callback. + * POSITION_POST_CAPTURER(1 << 0): The position after capturing the video data, which corresponds + * to the onCaptureVideoFrame callback. POSITION_PRE_RENDERER(1 << 1): The position before + * receiving the remote video data, which corresponds to the onRenderVideoFrame callback. + * POSITION_PRE_ENCODER(1 << 2): The position before encoding the video data, which corresponds to + * the onPreEncodeVideoFrame callback. * * To observe multiple frame positions, use '|' (the OR operator). - * This callback observes POSITION_POST_CAPTURER(1 << 0) and POSITION_PRE_RENDERER(1 << 1) by default. - * To conserve the system consumption, you can reduce the number of frame positions that you want to observe. + * This callback observes POSITION_POST_CAPTURER(1 << 0) and POSITION_PRE_RENDERER(1 << 1) by + * default. To conserve the system consumption, you can reduce the number of frame positions that + * you want to observe. * - * @return A bit mask that controls the frame position of the video observer: VIDEO_OBSERVER_POSITION. + * @return A bit mask that controls the frame position of the video observer: + * VIDEO_OBSERVER_POSITION. */ virtual uint32_t getObservedFramePosition() { return base::POSITION_POST_CAPTURER | base::POSITION_PRE_RENDERER; @@ -1854,7 +1952,8 @@ enum RecorderReasonCode { */ RECORDER_REASON_WRITE_FAILED = 1, /** - * 2: The SDK does not detect audio and video streams to be recorded, or audio and video streams are interrupted for more than five seconds during recording. + * 2: The SDK does not detect audio and video streams to be recorded, or audio and video streams + * are interrupted for more than five seconds during recording. */ RECORDER_REASON_NO_STREAM = 2, /** @@ -1882,7 +1981,8 @@ struct MediaRecorderConfiguration { */ const char* storagePath; /** - * The format of the recording file. See \ref agora::rtc::MediaRecorderContainerFormat "MediaRecorderContainerFormat". + * The format of the recording file. See \ref agora::rtc::MediaRecorderContainerFormat + * "MediaRecorderContainerFormat". */ MediaRecorderContainerFormat containerFormat; /** @@ -1900,23 +2000,70 @@ struct MediaRecorderConfiguration { * callback to report the updated recording information. */ int recorderInfoUpdateInterval; - - MediaRecorderConfiguration() : storagePath(NULL), containerFormat(FORMAT_MP4), streamType(STREAM_TYPE_BOTH), maxDurationMs(120000), recorderInfoUpdateInterval(0) {} - MediaRecorderConfiguration(const char* path, MediaRecorderContainerFormat format, MediaRecorderStreamType type, int duration, int interval) : storagePath(path), containerFormat(format), streamType(type), maxDurationMs(duration), recorderInfoUpdateInterval(interval) {} + /** + * The video width + */ + int width; + /** + * The video height + */ + int height; + /** + * The video fps + */ + int fps; + /** + * The audio sample rate + */ + int sample_rate; + /** + * The audio channel nums + */ + int channel_num; + /** + * The video source just for out channel recoder + */ + agora::rtc::VIDEO_SOURCE_TYPE videoSourceType; + + MediaRecorderConfiguration() + : storagePath(NULL), + containerFormat(FORMAT_MP4), + streamType(STREAM_TYPE_BOTH), + maxDurationMs(120000), + recorderInfoUpdateInterval(0), + width(1280), + height(720), + fps(30), + sample_rate(48000), + channel_num(1), + videoSourceType(rtc::VIDEO_SOURCE_CAMERA_PRIMARY) {} + MediaRecorderConfiguration(const char* path, MediaRecorderContainerFormat format, + MediaRecorderStreamType type, int duration, int interval) + : storagePath(path), + containerFormat(format), + streamType(type), + maxDurationMs(duration), + recorderInfoUpdateInterval(interval), + width(1280), + height(720), + fps(30), + sample_rate(48000), + channel_num(1), + videoSourceType(rtc::VIDEO_SOURCE_CAMERA_PRIMARY) {} }; class IFaceInfoObserver { -public: - /** - * Occurs when the face info is received. - * @param outFaceInfo The output face info. - * @return - * - true: The face info is valid. - * - false: The face info is invalid. + public: + /** + * Occurs when the face info is received. + * @param outFaceInfo The output face info. + * @return + * - true: The face info is valid. + * - false: The face info is invalid. */ - virtual bool onFaceInfo(const char* outFaceInfo) = 0; - - virtual ~IFaceInfoObserver() {} + virtual bool onFaceInfo(const char* outFaceInfo) = 0; + + virtual ~IFaceInfoObserver() {} }; /** @@ -1939,7 +2086,8 @@ struct RecorderInfo { unsigned int fileSize; RecorderInfo() : fileName(NULL), durationMs(0), fileSize(0) {} - RecorderInfo(const char* name, unsigned int dur, unsigned int size) : fileName(name), durationMs(dur), fileSize(size) {} + RecorderInfo(const char* name, unsigned int dur, unsigned int size) + : fileName(name), durationMs(dur), fileSize(size) {} }; class IMediaRecorderObserver { @@ -1949,30 +2097,35 @@ class IMediaRecorderObserver { * * @since v4.0.0 * - * When the local audio and video recording state changes, the SDK triggers this callback to report the current - * recording state and the reason for the change. + * When the local audio and video recording state changes, the SDK triggers this callback to + * report the current recording state and the reason for the change. * * @param channelId The channel name. * @param uid ID of the user. * @param state The current recording state. See \ref agora::media::RecorderState "RecorderState". - * @param reason The reason for the state change. See \ref agora::media::RecorderReasonCode "RecorderReasonCode". + * @param reason The reason for the state change. See \ref agora::media::RecorderReasonCode + * "RecorderReasonCode". */ - virtual void onRecorderStateChanged(const char* channelId, rtc::uid_t uid, RecorderState state, RecorderReasonCode reason) = 0; + virtual void onRecorderStateChanged(const char* channelId, rtc::uid_t uid, RecorderState state, + RecorderReasonCode reason) = 0; /** * Occurs when the recording information is updated. * * @since v4.0.0 * - * After you successfully register this callback and enable the local audio and video recording, the SDK periodically triggers - * the `onRecorderInfoUpdated` callback based on the set value of `recorderInfoUpdateInterval`. This callback reports the - * filename, duration, and size of the current recording file. + * After you successfully register this callback and enable the local audio and video recording, + * the SDK periodically triggers the `onRecorderInfoUpdated` callback based on the set value of + * `recorderInfoUpdateInterval`. This callback reports the filename, duration, and size of the + * current recording file. * * @param channelId The channel name. * @param uid ID of the user. - * @param info Information about the recording file. See \ref agora::media::RecorderInfo "RecorderInfo". + * @param info Information about the recording file. See \ref agora::media::RecorderInfo + * "RecorderInfo". * */ - virtual void onRecorderInfoUpdated(const char* channelId, rtc::uid_t uid, const RecorderInfo& info) = 0; + virtual void onRecorderInfoUpdated(const char* channelId, rtc::uid_t uid, + const RecorderInfo& info) = 0; virtual ~IMediaRecorderObserver() {} }; diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraMediaEngine.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraMediaEngine.h index 2ad93eef..00c6e872 100644 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraMediaEngine.h +++ b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraMediaEngine.h @@ -141,6 +141,24 @@ class IMediaEngine { bool enabled, bool useTexture, EXTERNAL_VIDEO_SOURCE_TYPE sourceType = VIDEO_FRAME, rtc::SenderOptions encodedVideoOption = rtc::SenderOptions()) = 0; +#if defined(__ANDROID__) + /** + * Sets the remote eglContext. + * + * When the engine is destroyed, the SDK will automatically release the eglContext. + * + * @param eglContext. + * + * @note + * setExternalRemoteEglContext needs to be called before joining the channel. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setExternalRemoteEglContext(void* eglContext) = 0; +#endif + /** * Sets the external audio source. * diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraMediaRecorder.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraMediaRecorder.h index 33f5a30e..b2558f5e 100644 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraMediaRecorder.h +++ b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraMediaRecorder.h @@ -7,7 +7,6 @@ #include "AgoraBase.h" #include "AgoraMediaBase.h" -#include "IAgoraRtcEngineEx.h" namespace agora { namespace rtc { diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraRtcEngine.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraRtcEngine.h index 9f52693c..76932118 100644 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraRtcEngine.h +++ b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraRtcEngine.h @@ -97,12 +97,14 @@ enum AUDIO_MIXING_REASON_TYPE { AUDIO_MIXING_REASON_TOO_FREQUENT_CALL = 702, /** 703: The audio mixing file playback is interrupted. */ AUDIO_MIXING_REASON_INTERRUPTED_EOF = 703, - /** 715: The audio mixing file is played once. */ + /** 721: The audio mixing file is played once. */ AUDIO_MIXING_REASON_ONE_LOOP_COMPLETED = 721, - /** 716: The audio mixing file is all played out. */ + /** 723: The audio mixing file is all played out. */ AUDIO_MIXING_REASON_ALL_LOOPS_COMPLETED = 723, - /** 716: The audio mixing file stopped by user */ + /** 724: The audio mixing file stopped by user */ AUDIO_MIXING_REASON_STOPPED_BY_USER = 724, + /** 726: The audio mixing playback has resumed by user */ + AUDIO_MIXING_REASON_RESUMED_BY_USER = 726, /** 0: The SDK can open the audio mixing file. */ AUDIO_MIXING_REASON_OK = 0, }; @@ -854,7 +856,7 @@ struct ScreenCaptureConfiguration { /** * (macOS only) The display ID of the screen. */ - uint32_t displayId; + int64_t displayId; /** * (Windows only) The relative position of the shared screen to the virtual screen. * @note This parameter takes effect only when you want to capture the screen on Windows. @@ -864,7 +866,7 @@ struct ScreenCaptureConfiguration { * (For Windows and macOS only) The window ID. * @note This parameter takes effect only when you want to capture the window. */ - view_t windowId; + int64_t windowId; /** * (For Windows and macOS only) The screen capture configuration. For details, see ScreenCaptureParameters. */ @@ -944,7 +946,7 @@ struct ScreenCaptureSourceInfo { /** * The window ID for a window or the display ID for a screen. */ - view_t sourceId; + int64_t sourceId; /** * The name of the window or screen. UTF-8 encoding. */ @@ -987,11 +989,11 @@ struct ScreenCaptureSourceInfo { * ID to the display monitor that has the largest area of intersection with the window, Otherwise * the return value is -2. */ - view_t sourceDisplayId; - ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(nullptr), sourceName(nullptr), - processPath(nullptr), sourceTitle(nullptr), primaryMonitor(false), isOccluded(false), minimizeWindow(false), sourceDisplayId((view_t)-2) {} + int64_t sourceDisplayId; + ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(0), sourceName(nullptr), + processPath(nullptr), sourceTitle(nullptr), primaryMonitor(false), isOccluded(false), minimizeWindow(false), sourceDisplayId(-2) {} #else - ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(nullptr), sourceName(nullptr), processPath(nullptr), sourceTitle(nullptr), primaryMonitor(false), isOccluded(false) {} + ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(0), sourceName(nullptr), processPath(nullptr), sourceTitle(nullptr), primaryMonitor(false), isOccluded(false) {} #endif }; /** @@ -4187,6 +4189,32 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int getFaceShapeAreaOptions(agora::rtc::FaceShapeAreaOptions::FACE_SHAPE_AREA shapeArea, FaceShapeAreaOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + /** + * Sets filter effect options. + * + * @since v4.4.1 + * You can call this method to enable the filter effect feature and set the options of the filter effect. + * + * @note + * - Before calling this method, ensure that you have integrated the following dynamic library into your project: + * - Android: `libagora_clear_vision_extension.so` + * - iOS/macOS: `AgoraClearVisionExtension.xcframework` + * - Windows: `libagora_clear_vision_extension.dll` + * - Call this method after calling the \ref IRtcEngine::enableVideo "enableVideo" method. + * - You can call this method either before or after joining a channel. + * - The filter effect feature has specific performance requirements for devices. If your device overheats after enabling the filter effect, Agora recommends disabling it entirely. + * + * @param enabled. Whether to enable filter effect: + * - `true`: Enable. + * - `false`: (Default) Disable. + * @param options. Set the filter effect options. See FilterEffectOptions. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setFilterEffectOptions(bool enabled, const FilterEffectOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + /** * Sets low-light enhancement. * @@ -4198,9 +4226,9 @@ class IRtcEngine : public agora::base::IEngineBase { * * @note * - Before calling this method, ensure that you have integrated the following dynamic library into your project: - * - Android: `libagora_segmentation_extension.so` - * - iOS/macOS: `AgoraVideoSegmentationExtension.xcframework` - * - Windows: `libagora_segmentation_extension.dll` + * - Android: `libagora_clear_vision_extension.so` + * - iOS/macOS: `AgoraClearVisionExtension.xcframework` + * - Windows: `libagora_clear_vision_extension.dll` * - Call this method after \ref IRtcEngine::enableVideo "enableVideo". * - The low-light enhancement feature has certain performance requirements on devices. If your device overheats after you enable low-light enhancement, Agora recommends modifying the low-light enhancement options to a less performance-consuming level or disabling low-light enhancement entirely. * @@ -4225,9 +4253,9 @@ class IRtcEngine : public agora::base::IEngineBase { * * @note * - Before calling this method, ensure that you have integrated the following dynamic library into your project: - * - Android: `libagora_segmentation_extension.so` - * - iOS/macOS: `AgoraVideoSegmentationExtension.xcframework` - * - Windows: `libagora_segmentation_extension.dll` + * - Android: `libagora_clear_vision_extension.so` + * - iOS/macOS: `AgoraClearVisionExtension.xcframework` + * - Windows: `libagora_clear_vision_extension.dll` * - Call this method after \ref IRtcEngine::enableVideo "enableVideo". * - The video noise reduction feature has certain performance requirements on devices. If your device overheats after you enable video noise reduction, Agora recommends modifying the video noise reduction options to a less performance-consuming level or disabling video noise reduction entirely. * @@ -4252,9 +4280,9 @@ class IRtcEngine : public agora::base::IEngineBase { * * @note * - Before calling this method, ensure that you have integrated the following dynamic library into your project: - * - Android: `libagora_segmentation_extension.so` - * - iOS/macOS: `AgoraVideoSegmentationExtension.xcframework` - * - Windows: `libagora_segmentation_extension.dll` + * - Android: `libagora_clear_vision_extension.so` + * - iOS/macOS: `AgoraClearVisionExtension.xcframework` + * - Windows: `libagora_clear_vision_extension.dll` * - Call this method after \ref IRtcEngine::enableVideo "enableVideo". * - The color enhancement feature has certain performance requirements on devices. If your device overheats after you enable color enhancement, Agora recommends modifying the color enhancement options to a less performance-consuming level or disabling color enhancement entirely. * @@ -6000,7 +6028,26 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int setRemoteRenderMode(uid_t uid, media::base::RENDER_MODE_TYPE renderMode, VIDEO_MIRROR_MODE_TYPE mirrorMode) = 0; - + /** + * Sets the target frames per second (FPS) for the local render target. + * + * @param sourceType The type of video source. + * @param targetFps The target frames per second to be set. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setLocalRenderTargetFps(VIDEO_SOURCE_TYPE sourceType, int targetFps) = 0; + /** + * Sets the target frames per second (FPS) for the remote render target. + * + * @param targetFps The target frames per second to be set for the remote render target. + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setRemoteRenderTargetFps(int targetFps) = 0; // The following APIs are either deprecated and going to deleted. /** @@ -7021,7 +7068,7 @@ class IRtcEngine : public agora::base::IEngineBase { - ERR_INVALID_ARGUMENT (2): The argument is invalid. - ERR_NOT_INITIALIZED (7): You have not initialized IRtcEngine when try to start screen capture. */ - virtual int startScreenCaptureByDisplayId(uint32_t displayId, const Rectangle& regionRect, + virtual int startScreenCaptureByDisplayId(int64_t displayId, const Rectangle& regionRect, const ScreenCaptureParameters& captureParams) = 0; #endif // __APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE @@ -7084,7 +7131,7 @@ class IRtcEngine : public agora::base::IEngineBase { * - ERR_INVALID_ARGUMENT (2): The argument is invalid. * - ERR_NOT_INITIALIZED (7): You have not initialized IRtcEngine when try to start screen capture. */ - virtual int startScreenCaptureByWindowId(view_t windowId, const Rectangle& regionRect, + virtual int startScreenCaptureByWindowId(int64_t windowId, const Rectangle& regionRect, const ScreenCaptureParameters& captureParams) = 0; /** @@ -7186,6 +7233,26 @@ class IRtcEngine : public agora::base::IEngineBase { * - < 0: Failure.. */ virtual int queryCameraFocalLengthCapability(agora::rtc::FocalLengthInfo* focalLengthInfos, int& size) = 0; + +#if defined(__ANDROID__) + /** + * Sets screen sharing using the Android native class MediaProjection. + * + * When screen capture stopped, the SDK will automatically release the MediaProjection internally. + * + * @param mediaProjection MediaProjection is an Android class that provides access to screen capture and recording capabiliies. + * + * @note + * Additional MediaProjection is primarily used for specific scenarios, + * such as IOT custom devices or subprocess screen sharing. + * + * @return + * - 0: Success. + * - < 0: Failure. + * @technical preview + */ + virtual int setExternalMediaProjection(void* mediaProjection) = 0; +#endif #endif #if defined(_WIN32) || defined(__APPLE__) || defined(__ANDROID__) @@ -7346,6 +7413,40 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int stopRtmpStream(const char* url) = 0; virtual int stopLocalVideoTranscoder() = 0; + + /** + * Starts the local audio with a mixed audio stream. + * @param config Sets the mixed audio stream source settings. + * @return + * - 0: Success. + * - < 0: Failure. + * - #ERR_NOT_INITIALIZED (7): You have not initialized the RTC engine when publishing the + * stream. + */ + virtual int startLocalAudioMixer(const LocalAudioMixerConfiguration& config) = 0; + + /** + * Update the source stream settings for the mixed audio stream. + * @param config Update the source audio stream settings. See + * @return + * - 0: Success. + * - < 0: Failure. + * - #ERR_NOT_INITIALIZED (7): You have not initialized the RTC engine when publishing the + stream. + */ + virtual int updateLocalAudioMixerConfiguration(const LocalAudioMixerConfiguration& config) = 0; + + /** + * Stops a mixed audio track. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - #ERR_NOT_INITIALIZED (7): You have not initialized the RTC engine when publishing the + * stream. + */ + virtual int stopLocalAudioMixer() = 0; + /** * Starts video capture with a camera. * @@ -8106,6 +8207,32 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int takeSnapshot(uid_t uid, const char* filePath) = 0; + /** + * Takes a snapshot of a video stream. + * + * This method takes a snapshot of a video stream from the specified user, generates a JPG + * image, and saves it to the specified path. + * + * The method is asynchronous, and the SDK has not taken the snapshot when the method call + * returns. After a successful method call, the SDK triggers the `onSnapshotTaken` callback + * to report whether the snapshot is successfully taken, as well as the details for that + * snapshot. + * + * @note + * - Call this method after joining a channel. + * - This method takes a snapshot of the published video stream specified in `ChannelMediaOptions`. + * + * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. + * @param config The configuration for the take snapshot. See SnapshotConfig. + * + * Ensure that the path you specify exists and is writable. + * @return + * - 0 : Success. + * - < 0: Failure. + * - -4: Incorrect observation position. Modify the input observation position according to the reqiurements specified in SnapshotConfig. + */ + virtual int takeSnapshot(uid_t uid, const media::SnapshotConfig& config) = 0; + /** Enables the content inspect. @param enabled Whether to enable content inspect: - `true`: Yes. @@ -8332,6 +8459,17 @@ class IRtcEngine : public agora::base::IEngineBase { * @technical preview */ virtual int sendAudioMetadata(const char* metadata, size_t length) = 0; + + /** + * @brief Queries the HDR capability of the video module + * @param videoModule The video module. See VIDEO_MODULE_TYPE + * @param capability HDR capability of video module. See HDR_CAPABILITY + * @return + * - 0: success + * - <0: failure + * @technical preview + */ + virtual int queryHDRCapability(VIDEO_MODULE_TYPE videoModule, HDR_CAPABILITY& capability) = 0; }; // The following types are either deprecated or not implmented yet. diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraRtcEngineEx.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraRtcEngineEx.h index 099de84d..6a233f4e 100644 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraRtcEngineEx.h +++ b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/IAgoraRtcEngineEx.h @@ -1127,6 +1127,55 @@ class IRtcEngineEx : public IRtcEngine { */ virtual int leaveChannelEx(const RtcConnection& connection, const LeaveChannelOptions& options) = 0; + /** + * Leaves a channel with the channel ID and user account. + * + * This method allows a user to leave the channel, for example, by hanging up or exiting a call. + * + * This method is an asynchronous call, which means that the result of this method returns even before + * the user has not actually left the channel. Once the user successfully leaves the channel, the + * SDK triggers the \ref IRtcEngineEventHandler::onLeaveChannel "onLeaveChannel" callback. + * + * @param channelId The channel name. The maximum length of this parameter is 64 bytes. Supported character scopes are: + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - The space character. + * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * @param userAccount The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are: + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - The space character. + * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int leaveChannelWithUserAccountEx(const char* channelId, const char* userAccount) = 0; + + /** + * Leaves a channel with the channel ID and user account and sets the options for leaving. + * + * @param channelId The channel name. The maximum length of this parameter is 64 bytes. Supported character scopes are: + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - The space character. + * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * @param userAccount The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are: + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - The space character. + * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * @param options The options for leaving the channel. See #LeaveChannelOptions. + * @return int + * - 0: Success. + * - < 0: Failure. + */ + virtual int leaveChannelWithUserAccountEx(const char* channelId, const char* userAccount, const LeaveChannelOptions& options) = 0; + /** * Updates the channel media options after joining the channel. * @@ -1915,6 +1964,33 @@ class IRtcEngineEx : public IRtcEngine { */ virtual int takeSnapshotEx(const RtcConnection& connection, uid_t uid, const char* filePath) = 0; + /** + * Takes a snapshot of a video stream. + * + * This method takes a snapshot of a video stream from the specified user, generates a JPG + * image, and saves it to the specified path. + * + * The method is asynchronous, and the SDK has not taken the snapshot when the method call + * returns. After a successful method call, the SDK triggers the `onSnapshotTaken` callback + * to report whether the snapshot is successfully taken, as well as the details for that + * snapshot. + * + * @note + * - Call this method after joining a channel. + * - This method takes a snapshot of the published video stream specified in `ChannelMediaOptions`. + * + * @param connection The RtcConnection object. + * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. + * @param config The configuration for the take snapshot. See SnapshotConfig. + * + * Ensure that the path you specify exists and is writable. + * @return + * - 0 : Success. + * - < 0: Failure. + * - -4: Incorrect observation position. Modify the input observation position according to the reqiurements specified in SnapshotConfig. + */ + virtual int takeSnapshotEx(const RtcConnection& connection, uid_t uid, const media::SnapshotConfig& config) = 0; + /** Enables video screenshot and upload with the connection ID. @param enabled Whether to enable video screenshot and upload: - `true`: Yes. diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp.h index 4f4c42ca..f6d0ce69 100644 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp.h +++ b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp.h @@ -6,9 +6,9 @@ */ #pragma once -#include "rte_cpp_error.h" // IWYU pragma: export -#include "rte_cpp_player.h" // IWYU pragma: export -#include "rte_cpp_rte.h" // IWYU pragma: export -#include "rte_cpp_canvas.h" // IWYU pragma: export -#include "rte_cpp_string.h" // IWYU pragma: export -#include "rte_cpp_callback_utils.h" // IWYU pragma: export +#include "rte_base/rte_cpp_error.h" // IWYU pragma: export +#include "rte_base/rte_cpp_player.h" // IWYU pragma: export +#include "rte_base/rte_cpp_rte.h" // IWYU pragma: export +#include "rte_base/rte_cpp_canvas.h" // IWYU pragma: export +#include "rte_base/rte_cpp_string.h" // IWYU pragma: export +#include "rte_base/rte_cpp_callback_utils.h" // IWYU pragma: export diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_callback_utils.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_callback_utils.h deleted file mode 100644 index 48d9d545..00000000 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_callback_utils.h +++ /dev/null @@ -1,132 +0,0 @@ -#pragma once -#include -#include "rte_cpp_error.h" -#include "internal/c/handle.h" - -namespace rte { - -template -class SingleUseCallback { - public: - - using CallbackType = std::function; - - SingleUseCallback(){}; - - void Store(T* self, CallbackType cb, void* cb_data){ - self_ = self; - cb_ = cb; - cb_data_ = cb_data; - } - - void Invoke(RteError* err){ - if(cb_ != nullptr){ - cb_(self_, cb_data_, err); - - self_ = nullptr; - cb_ = nullptr; - cb_data_ = nullptr; - } - } - - bool Invalid(){ - return cb_ == nullptr; - } - - CallbackType cb_; - void* cb_data_; - T* self_; -}; // class SingleUseCallback - -template -class CallbackContext { - public: - - using CallbackType = std::function; - using CallbackTypeWithCppError = std::function; - - CallbackContext(T* self, CallbackType cb, void* cb_data) - :self_(self), cb_(cb), cb_data_(cb_data) {} - - CallbackContext(T* self, CallbackTypeWithCppError cb, void* cb_data) - :self_(self), cb_with_cpp_error_(cb), cb_data_(cb_data) {} - - CallbackType cb_; - CallbackTypeWithCppError cb_with_cpp_error_; - void* cb_data_; - T* self_; -}; - -template -void CallbackFunc(FromeType* self, void* cb_data, RteError* err){ - auto *ctx = static_cast*>(cb_data); - - if(ctx->cb_with_cpp_error_ != nullptr){ - rte::Error cpp_err(err); - ctx->cb_with_cpp_error_( self != nullptr ? ctx->self_ : nullptr, ctx->cb_data_, &cpp_err); - } - - if(ctx->cb_ != nullptr){ - ctx->cb_(self != nullptr ? ctx->self_ : nullptr, ctx->cb_data_, err); - } - - delete ctx; -} - -template -class CallbackContextWithArgs { - public: - - using CallbackType = std::function; - using CallbackTypeWithCppError = std::function; - - CallbackContextWithArgs(T* self, CallbackType cb, void* cb_data) - :self_(self), cb_(cb), cb_data_(cb_data) {} - - CallbackContextWithArgs(T* self, CallbackTypeWithCppError cb, void* cb_data) - :self_(self), cb_with_cpp_error_(cb), cb_data_(cb_data) {} - - CallbackType cb_; - CallbackTypeWithCppError cb_with_cpp_error_; - void* cb_data_; - T* self_; -}; - -template -void CallbackFuncWithArgs(FromeType* self, Args... args, void* cb_data, RteError* err){ - auto *ctx = static_cast*>(cb_data); - - if(ctx->cb_with_cpp_error_ != nullptr){ - Error cpp_err(err); - ctx->cb_with_cpp_error_(ctx->self_, args..., ctx->cb_data_, &cpp_err); - } - - if(ctx->cb_ != nullptr){ - ctx->cb_(ctx->self_, args..., ctx->cb_data_, err); - } - delete ctx; -} - -template -class ObserverDestroyContext { - public: - - using ObserverDestroyer = std::function; - - ObserverDestroyContext(ObserverDestroyer destroyer, void* cb_data) - :destroyer_(destroyer), cb_data_(cb_data) {} - - ObserverDestroyer destroyer_; - void* cb_data_; -}; - -template -void ObserverDestroyProxy(FromeType* observer, void* cb_data){ - auto *ctx = static_cast*>(cb_data); - if(ctx->destroyer_ != nullptr){ - ctx->destroyer_(static_cast(observer->base_observer.me_in_target_lang), ctx->cb_data_); - } - delete ctx; -} - -} // namespace rte diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_canvas.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_canvas.h deleted file mode 100644 index b5635c46..00000000 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_canvas.h +++ /dev/null @@ -1,109 +0,0 @@ -#pragma once - -#include "internal/c/c_player.h" -#include "internal/c/handle.h" -#include "internal/c/track/canvas.h" - -#include "rte_cpp_error.h" -#include "rte_cpp_rte.h" -#include "rte_cpp_callback_utils.h" - - -namespace rte { - -using VideoRenderMode = ::RteVideoRenderMode; -using VideoMirrorMode = ::RteVideoMirrorMode; -using ViewConfig = ::RteViewConfig; -using View = ::RteView; -using Rect = ::RteRect; - -class CanvasInitialConfig { - public: - CanvasInitialConfig() {RteCanvasInitialConfigInit(&c_canvas_initial_config, nullptr);} - ~CanvasInitialConfig() {RteCanvasInitialConfigDeinit(&c_canvas_initial_config, nullptr);} - - private: - friend class Canvas; - ::RteCanvasInitialConfig c_canvas_initial_config; -}; - - -class CanvasConfig { - public: - CanvasConfig() {RteCanvasConfigInit(&c_canvas_config, nullptr);} - ~CanvasConfig() {RteCanvasConfigDeinit(&c_canvas_config, nullptr);} - - void SetRenderMode(VideoRenderMode mode, Error *err) { - RteCanvasConfigSetVideoRenderMode(&c_canvas_config, mode, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - VideoRenderMode GetRenderMode(Error *err) { - VideoRenderMode mode; - RteCanvasConfigGetVideoRenderMode(&c_canvas_config, &mode, err != nullptr ? err->get_underlying_impl() : nullptr); - return mode; - } - - void SetMirrorMode(VideoMirrorMode mode, Error *err) { - RteCanvasConfigSetVideoMirrorMode(&c_canvas_config, mode, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - VideoMirrorMode GetMirrorMode(Error *err) { - VideoMirrorMode mode; - RteCanvasConfigGetVideoMirrorMode(&c_canvas_config, &mode, err != nullptr ? err->get_underlying_impl() : nullptr); - return mode; - } - - void SetCropArea(RteRect &crop_area, Error *err) { - RteCanvasConfigSetCropArea(&c_canvas_config, crop_area, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - RteRect GetCropArea(Error *err) { - RteRect crop_area; - RteCanvasConfigGetCropArea(&c_canvas_config, &crop_area, err != nullptr ? err->get_underlying_impl() : nullptr); - return crop_area; - } - - private: - friend class Canvas; - ::RteCanvasConfig c_canvas_config; -}; - -class Canvas { - public: - Canvas(Rte *rte, CanvasInitialConfig *initial_config) { - c_canvas = ::RteCanvasCreate(&rte->c_rte, &initial_config->c_canvas_initial_config, nullptr); - }; - ~Canvas() { RteCanvasDestroy(&c_canvas, nullptr); }; - - void Destroy(Error *err = nullptr) { - RteCanvasDestroy(&c_canvas, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - Canvas(const Canvas& other) = delete; - Canvas(Canvas&& other) = delete; - Canvas& operator=(const Canvas& other) = delete; - Canvas& operator=(Canvas&& other) = delete; - - void GetConfigs(CanvasConfig *config, Error *err) { - RteCanvasGetConfigs(&c_canvas, &config->c_canvas_config, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void SetConfigs(CanvasConfig *config, std::function cb, void *cb_data) { - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RteCanvasSetConfigs(&c_canvas, &config->c_canvas_config, &CallbackFunc<::RteCanvas, Canvas>, callbackCtx); - } - - void AddView(View *view, ViewConfig *config, std::function cb, void *cb_data) { - CallbackContextWithArgs *ctx = new CallbackContextWithArgs(this, cb, cb_data); - RteCanvasAddView(&c_canvas, view, config, &CallbackFuncWithArgs<::RteCanvas, Canvas, View*>, ctx); - } - - private: - - friend class Player; - - ::RteCanvas c_canvas; -}; - -} // namespace rte \ No newline at end of file diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_error.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_error.h deleted file mode 100644 index 4016f9c7..00000000 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_error.h +++ /dev/null @@ -1,67 +0,0 @@ -/** - * - * Agora Real Time Engagement - * Copyright (c) 2024 Agora IO. All rights reserved. - * - */ -#pragma once - -#include -#include - -#include "internal/c/c_error.h" -#include "internal/c/utils/string.h" - -namespace rte { - - -class Rte; -class Player; -class Canvas; -class Config; -class PlayerConfig; -class CanvasConfig; - -class Error { - public: - - using ErrorCode = ::RteErrorCode; - - Error() : c_error(RteErrorCreate()) {} - explicit Error(::RteError *error) : c_error(error), c_error_owned(false) {} - - ~Error() { - if (c_error != nullptr && c_error_owned) { - RteErrorDestroy(c_error); - } - } - - // @{ - Error(Error &other) = delete; - Error(Error &&other) = delete; - Error &operator=(const Error &cmd) = delete; - Error &operator=(Error &&cmd) = delete; - // @} - - void Set(ErrorCode code, const char *message) { - RteErrorSet(c_error, code, "%s", message); - } - - ErrorCode Code() const { return c_error != nullptr ? c_error->code : kRteErrorDefault; } - - const char *Message() const { - if(c_error != nullptr && c_error->message != nullptr){ - return RteStringCStr(c_error->message, nullptr); - } - return ""; - } - - ::RteError *get_underlying_impl() const { return c_error; } - - private: - - ::RteError *c_error; - bool c_error_owned = true; -}; - -} // namespace rte diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_player.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_player.h deleted file mode 100644 index 1c3c7138..00000000 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_player.h +++ /dev/null @@ -1,443 +0,0 @@ -/** - * - * Agora Real Time Engagement - * Copyright (c) 2024 Agora IO. All rights reserved. - * - */ -#pragma once -#include - -#include "internal/c/c_rte.h" -#include "internal/c/c_player.h" - -#include "rte_cpp_error.h" -#include "rte_cpp_callback_utils.h" -#include "rte_cpp_canvas.h" -#include "rte_cpp_string.h" -#include "rte_cpp_stream.h" - -struct RtePlayerObserver; - -namespace rte { - - -using PlayerState = ::RtePlayerState; -using PlayerEvent = ::RtePlayerEvent; -using PlayerMetadataType = ::RtePlayerMetadataType; -using PlayerInfo = ::RtePlayerInfo; -using PlayerStats = ::RtePlayerStats; -using PlayerCustomSourceProvider = ::RtePlayerCustomSourceProvider; - -class PlayerInitialConfig {}; - -static void onStateChanged(::RtePlayerObserver *observer, - RtePlayerState old_state, RtePlayerState new_state, - RteError *err); - -static void onPositionChanged(::RtePlayerObserver *observer, uint64_t curr_time, - uint64_t utc_time); - -static void onResolutionChanged(::RtePlayerObserver *observer, int width, int height); - -static void onEvent(::RtePlayerObserver *observer, RtePlayerEvent event); - -static void onMetadata(::RtePlayerObserver *observer, RtePlayerMetadataType type, - const uint8_t *data, size_t length); - -static void onPlayerInfoUpdated(::RtePlayerObserver *observer, const RtePlayerInfo *info); - -static void onAudioVolumeIndication(::RtePlayerObserver *observer, int32_t volume); - - -class PlayerObserver { - public: - PlayerObserver() : c_rte_observer(::RtePlayerObserverCreate(nullptr)) { - - c_rte_observer->base_observer.me_in_target_lang = this; - - c_rte_observer->on_state_changed = rte::onStateChanged; - c_rte_observer->on_position_changed = rte::onPositionChanged; - c_rte_observer->on_resolution_changed = rte::onResolutionChanged; - c_rte_observer->on_event = rte::onEvent; - c_rte_observer->on_metadata = rte::onMetadata; - c_rte_observer->on_player_info_updated = rte::onPlayerInfoUpdated; - c_rte_observer->on_audio_volume_indication = rte::onAudioVolumeIndication; - } - virtual ~PlayerObserver(){ RtePlayerObserverDestroy(c_rte_observer, nullptr); } - - // @{ - PlayerObserver(PlayerObserver &other) = delete; - PlayerObserver(PlayerObserver &&other) = delete; - PlayerObserver &operator=(const PlayerObserver &cmd) = delete; - PlayerObserver &operator=(PlayerObserver &&cmd) = delete; - // @} - - virtual void onStateChanged(PlayerState old_state, PlayerState new_state, - rte::Error *err) = 0; - virtual void onPositionChanged(uint64_t curr_time, - uint64_t utc_time) = 0; - virtual void onResolutionChanged(int width, int height) = 0; - virtual void onEvent(PlayerEvent event) = 0; - virtual void onMetadata(PlayerMetadataType type, - const uint8_t *data, size_t length) = 0; - - virtual void onPlayerInfoUpdated(const PlayerInfo *info) = 0; - - virtual void onAudioVolumeIndication(int32_t volume) = 0; - - private: - friend class Player; - - ::RtePlayerObserver *c_rte_observer; -}; - -void onStateChanged(::RtePlayerObserver *observer, - RtePlayerState old_state, RtePlayerState new_state, - RteError *err){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - Error cpp_err(err); - player_observer->onStateChanged(old_state, new_state, &cpp_err); - } -} -void onPositionChanged(::RtePlayerObserver *observer, uint64_t curr_time, - uint64_t utc_time){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onPositionChanged(curr_time, utc_time); - } -} - -void onResolutionChanged(::RtePlayerObserver *observer, int width, int height){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onResolutionChanged(width, height); - } -} - -void onEvent(::RtePlayerObserver *observer, RtePlayerEvent event){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onEvent(event); - } - -} - -void onMetadata(::RtePlayerObserver *observer, RtePlayerMetadataType type, - const uint8_t *data, size_t length){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onMetadata(type, data, length); - } -} - -void onPlayerInfoUpdated(::RtePlayerObserver *observer, const RtePlayerInfo *info){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onPlayerInfoUpdated(info); - } -} - -void onAudioVolumeIndication(::RtePlayerObserver *observer, int32_t volume){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onAudioVolumeIndication(volume); - } -} - -class PlayerConfig { - public: - PlayerConfig() { RtePlayerConfigInit(&c_rte_player_config, nullptr); } - ~PlayerConfig() { RtePlayerConfigDeinit(&c_rte_player_config, nullptr); } - - // @{ - PlayerConfig(PlayerConfig &other) = delete; - PlayerConfig(PlayerConfig &&other) = delete; - PlayerConfig &operator=(const PlayerConfig &cmd) = delete; - PlayerConfig &operator=(PlayerConfig &&cmd) = delete; - // @} - - void SetAutoPlay(bool auto_play, Error *err) { - RtePlayerConfigSetAutoPlay(&c_rte_player_config, auto_play, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - bool GetAutoPlay(Error *err) { - bool auto_play; - RtePlayerConfigGetAutoPlay(&c_rte_player_config, &auto_play, - err != nullptr ? err->get_underlying_impl() : nullptr); - return auto_play; - } - - void SetPlaybackSpeed(int32_t speed, Error *err) { - RtePlayerConfigSetPlaybackSpeed(&c_rte_player_config, speed, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPlaybackSpeed(Error *err) { - int32_t speed; - RtePlayerConfigGetPlaybackSpeed(&c_rte_player_config, &speed, - err != nullptr ? err->get_underlying_impl() : nullptr); - return speed; - } - - void SetPlayoutAudioTrackIdx(int idx, Error *err) { - RtePlayerConfigSetPlayoutAudioTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPlayoutAudioTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetPlayoutAudioTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetPublishAudioTrackIdx(int32_t idx, Error *err) { - RtePlayerConfigSetPublishAudioTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPublishAudioTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetPublishAudioTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetAudioTrackIdx(int32_t idx, Error *err) { - RtePlayerConfigSetAudioTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetAudioTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetAudioTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetSubtitleTrackIdx(int32_t idx, Error *err) { - RtePlayerConfigSetSubtitleTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetSubtitleTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetSubtitleTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetExternalSubtitleTrackIdx(int32_t idx, Error *err) { - RtePlayerConfigSetExternalSubtitleTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetExternalSubtitleTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetExternalSubtitleTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetAudioPitch(int32_t audio_pitch, Error *err) { - RtePlayerConfigSetAudioPitch(&c_rte_player_config, audio_pitch, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetAudioPitch(Error *err) { - int32_t audio_pitch; - RtePlayerConfigGetAudioPitch(&c_rte_player_config, &audio_pitch, - err != nullptr ? err->get_underlying_impl() : nullptr); - return audio_pitch; - } - - void SetPlayoutVolume(int32_t volume, Error *err) { - RtePlayerConfigSetPlayoutVolume(&c_rte_player_config, volume, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPlayoutVolume(Error *err) { - int32_t volume; - RtePlayerConfigGetPlayoutVolume(&c_rte_player_config, &volume, - err != nullptr ? err->get_underlying_impl() : nullptr); - return volume; - } - - void SetAudioPlaybackDelay(int32_t delay, Error *err) { - RtePlayerConfigSetAudioPlaybackDelay(&c_rte_player_config, delay, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetAudioPlaybackDelay(Error *err) { - int32_t delay; - RtePlayerConfigGetAudioPlaybackDelay(&c_rte_player_config, &delay, - err != nullptr ? err->get_underlying_impl() : nullptr); - return delay; - } - - void SetAudioDualMonoMode(RteAudioDualMonoMode mode, Error *err) { - RtePlayerConfigSetAudioDualMonoMode(&c_rte_player_config, mode, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - RteAudioDualMonoMode GetAudioDualMonoMode(Error *err) { - RteAudioDualMonoMode mode; - RtePlayerConfigGetAudioDualMonoMode(&c_rte_player_config, &mode, - err != nullptr ? err->get_underlying_impl() : nullptr); - return mode; - } - - void SetPublishVolume(int32_t volume, Error *err) { - RtePlayerConfigSetPublishVolume(&c_rte_player_config, volume, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPublishVolume(Error *err) { - int32_t volume; - RtePlayerConfigGetPublishVolume(&c_rte_player_config, &volume, - err != nullptr ? err->get_underlying_impl() : nullptr); - return volume; - } - - void SetLoopCount(int32_t count, Error *err) { - RtePlayerConfigSetLoopCount(&c_rte_player_config, count, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetLoopCount(Error *err) { - int32_t count; - RtePlayerConfigGetLoopCount(&c_rte_player_config, &count, - err != nullptr ? err->get_underlying_impl() : nullptr); - return count; - } - - void SetJsonParameter(const char *json_parameter, Error *err) { - String str(json_parameter); - RtePlayerConfigSetJsonParameter(&c_rte_player_config, str.get_underlying_impl(), - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char *GetJsonParameter(Error *err) { - String str; - RtePlayerConfigGetJsonParameter(&c_rte_player_config, str.get_underlying_impl(), - err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - private: - ::RtePlayerConfig* get_underlying_impl() { return &c_rte_player_config; } - - private: - friend class Player; - - ::RtePlayerConfig c_rte_player_config; -}; - - -class Player { - public: - explicit Player(Rte *self, PlayerInitialConfig *config = nullptr) - : c_rte(::RtePlayerCreate(&self->c_rte, nullptr, nullptr)) {}; - ~Player() { RtePlayerDestroy(&c_rte, nullptr); }; - - void Destroy(Error *err = nullptr){ - RtePlayerDestroy(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); -}; - - Player(Player &other) = default; - Player(Player &&other) = default; - - // @{ - Player &operator=(const Player &cmd) = delete; - Player &operator=(Player &&cmd) = delete; - // @} - - void PreloadWithUrl(const char* url, Error* err) { - RtePlayerPreloadWithUrl(&c_rte, url, err != nullptr ? err->get_underlying_impl() : nullptr); - }; - - void OpenWithUrl(const char* url, uint64_t start_time, std::function cb, - void* cb_data) { - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RtePlayerOpenWithUrl(&c_rte, url, start_time, &CallbackFunc<::RtePlayer, Player>, callbackCtx); - }; - - void OpenWithCustomSourceProvider(PlayerCustomSourceProvider* provider, uint64_t start_time, - std::function cb, - void* cb_data) { - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RtePlayerOpenWithCustomSourceProvider(&c_rte, provider, start_time, &CallbackFunc<::RtePlayer, Player>, callbackCtx); - }; - - - void OpenWithStream(Stream* stream, std::function cb, - void* cb_data) { - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RtePlayerOpenWithStream(&c_rte, stream != nullptr ? &stream->c_rte_stream : nullptr, &CallbackFunc<::RtePlayer, Player>, callbackCtx); - }; - - void GetStats(std::function cb, void *cb_data){ - CallbackContextWithArgs *ctx = new CallbackContextWithArgs(this, cb, cb_data); - RtePlayerGetStats(&c_rte, &CallbackFuncWithArgs<::RtePlayer, Player, rte::PlayerStats*>, ctx); - } - - void SetCanvas(Canvas *canvas, Error *err) { - RtePlayerSetCanvas(&c_rte, canvas != nullptr ? &canvas->c_canvas : nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); - }; - - void Play(Error* err) { - RtePlayerPlay(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - void Stop(Error* err) { - RtePlayerStop(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - void Pause(Error* err) { - RtePlayerPause(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - void Seek(uint64_t new_time, Error* err) { - RtePlayerSeek(&c_rte, new_time, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void MuteAudio(bool mute, Error* err) { - RtePlayerMuteAudio(&c_rte, mute, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void MuteVideo(bool mute, Error* err) { - RtePlayerMuteVideo(&c_rte, mute, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - uint64_t GetPosition(Error *err){ - return RtePlayerGetPosition(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - void GetInfo(PlayerInfo *info, Error *err){ - RtePlayerGetInfo(&c_rte, info, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void GetConfigs(PlayerConfig* config, Error* err) { - RtePlayerGetConfigs(&c_rte, config->get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void SetConfigs(PlayerConfig* config, std::function cb, - void* cb_data) { - - rte::CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RtePlayerSetConfigs(&c_rte, config->get_underlying_impl(), &CallbackFunc<::RtePlayer, Player>, callbackCtx); - } - - bool RegisterObserver(PlayerObserver *observer, Error *err) { - return RtePlayerRegisterObserver( - &c_rte, observer->c_rte_observer, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void UnregisterObserver(PlayerObserver *observer, Error *err){ - RtePlayerUnregisterObserver(&c_rte, observer->c_rte_observer, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - private: - ::RtePlayer c_rte; -}; - -} // namespace rte diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_rte.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_rte.h deleted file mode 100644 index 95b32602..00000000 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_rte.h +++ /dev/null @@ -1,218 +0,0 @@ -/** - * - * Agora Real Time Engagement - * Copyright (c) 2024 Agora IO. All rights reserved. - * - */ -#pragma once - -#include "internal/c/c_rte.h" -#include "internal/c/bridge.h" - -#include "rte_cpp_error.h" -#include "rte_cpp_callback_utils.h" -#include "rte_cpp_string.h" - - -struct RteObserver; -struct RteInitialConfig; -struct RteConfig; - -namespace rte { - -class Player; - -class RteInitialConfig { - ::RteInitialConfig *c_rte_init_cfg; -}; - -class RteObserver { - public: - RteObserver(): c_rte_observer(::RteObserverCreate(nullptr)) { - c_rte_observer->base_observer.me_in_target_lang = this;} - ~RteObserver() { RteObserverDestroy(c_rte_observer, nullptr); } - - // @{ - RteObserver(RteObserver &other) = delete; - RteObserver(RteObserver &&other) = delete; - RteObserver &operator=(const RteObserver &cmd) = delete; - RteObserver &operator=(RteObserver &&cmd) = delete; - // @} - - private: - friend class Rte; - - ::RteObserver *c_rte_observer; -}; - -class Config { - public: - Config() {RteConfigInit(&c_rte_config, nullptr);} - ~Config() {RteConfigDeinit(&c_rte_config, nullptr);} - - // @{ - Config(Config &other) = delete; - Config(Config &&other) = delete; - Config &operator=(const Config &cmd) = delete; - Config &operator=(Config &&cmd) = delete; - // @} - - void SetAppId(const char *app_id, Error *err){ - String str(app_id); - RteConfigSetAppId(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char* GetAppId(Error *err){ - String str; - RteConfigGetAppId(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - void SetLogFolder(const char *log_folder, Error *err){ - String str(log_folder); - RteConfigSetLogFolder(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char* GetLogFolder(Error *err){ - String str; - RteConfigGetLogFolder(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - void SetLogFileSize(size_t log_file_size, Error *err){ - RteConfigSetLogFileSize(&c_rte_config, log_file_size, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - size_t GetLogFileSize(Error *err){ - size_t log_file_size; - RteConfigGetLogFileSize(&c_rte_config, &log_file_size, err != nullptr ? err->get_underlying_impl() : nullptr); - return log_file_size; - } - - void SetAreaCode(int32_t area_code, Error *err){ - RteConfigSetAreaCode(&c_rte_config, area_code, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetAreaCode(Error *err){ - int32_t area_code; - RteConfigGetAreaCode(&c_rte_config, &area_code, err != nullptr ? err->get_underlying_impl() : nullptr); - return area_code; - } - - void SetCloudProxy(const char *cloud_proxy, Error *err){ - String str(cloud_proxy); - RteConfigSetCloudProxy(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char* GetCloudProxy(Error *err){ - String str; - RteConfigGetCloudProxy(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - void SetJsonParameter(const char *json_parameter, Error *err){ - String str(json_parameter); - RteConfigSetJsonParameter(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char* GetJsonParameter(Error *err){ - String str; - RteConfigGetJsonParameter(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - private: - ::RteConfig* get_underlying_impl() { return &c_rte_config; } - - private: - friend class Rte; - ::RteConfig c_rte_config; -}; - -class Rte { - public: - - static Rte GetFromBridge(Error* err = nullptr){ - Rte rte( RteGetFromBridge(err != nullptr ? err->get_underlying_impl() : nullptr)); - return rte; - } - - explicit Rte(::RteInitialConfig *config = nullptr): c_rte(::RteCreate(config, nullptr)) {} - ~Rte()=default; - - void Destroy(Error *err = nullptr) { - RteDestroy(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - bool RegisterObserver(RteObserver *observer, Error *err){ - return RteRegisterObserver(&c_rte, observer->c_rte_observer, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - bool UnregisterObserver(RteObserver *observer, Error *err){ - return RteUnregisterObserver(&c_rte, observer->c_rte_observer, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - bool InitMediaEngine(std::function cb, void *cb_data, Error *err = nullptr){ - auto* ctx = new CallbackContext(this, cb, cb_data); - return RteInitMediaEngine(&c_rte, &CallbackFunc<::Rte, Rte>, ctx, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - Rte(Rte &other) = default; - Rte(Rte &&other) = default; - - // @{ - Rte &operator=(const Rte &cmd) = delete; - Rte &operator=(Rte &&cmd) = delete; - // @} - - void GetConfigs(Config *config, Error *err){ - RteGetConfigs(&c_rte, config != nullptr ? config->get_underlying_impl(): nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); - } - bool SetConfigs(Config *config, std::function cb, void *cb_data, Error *err = nullptr){ - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - return RteSetConfigs(&c_rte, config != nullptr ? config->get_underlying_impl(): nullptr, &CallbackFunc<::Rte, Rte>, callbackCtx, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - private: - - explicit Rte(::Rte other) { c_rte = other; } - - private: - friend class Player; - friend class Canvas; - - ::Rte c_rte; - -// struct RteInitMediaEngineCtx { -// RteInitMediaEngineCtx(InitMediaEngineCb cb, void *cb_data) -// : cb(cb), cb_data(cb_data) {} - -// ~RteInitMediaEngineCtx() = default; - -// // @{ -// RteInitMediaEngineCtx(RteInitMediaEngineCtx &other) = delete; -// RteInitMediaEngineCtx(RteInitMediaEngineCtx &&other) = delete; -// RteInitMediaEngineCtx &operator=(const RteInitMediaEngineCtx &cmd) = delete; -// RteInitMediaEngineCtx &operator=(RteInitMediaEngineCtx &&cmd) = delete; -// // @} - -// InitMediaEngineCb cb; -// void *cb_data; -// }; - -// static void RteInitMediaEngineCtxProxy(::Rte *self, void *cb_data, -// ::RteError *err){ -// auto *ctx = static_cast(cb_data); - -// Rte rte; -// rte.c_rte = *self; - -// Error cpp_err(err); -// ctx->cb(&rte, ctx->cb_data, &cpp_err); - -// delete ctx; -// } -}; - -} // namespace rte diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_stream.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_stream.h deleted file mode 100644 index bc3df3fd..00000000 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_stream.h +++ /dev/null @@ -1,25 +0,0 @@ -/** - * - * Agora Real Time Engagement - * Copyright (c) 2024 Agora IO. All rights reserved. - * - */ -#pragma once -#include "internal/c/stream/stream.h" - -namespace rte { - -class Stream { - - public: - Stream() = default; - ~Stream() = default; - - private: - friend class Rte; - friend class Player; - - ::RteStream c_rte_stream; -}; - -} // namespace rte \ No newline at end of file diff --git a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_string.h b/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_string.h deleted file mode 100644 index be4642f9..00000000 --- a/Agora-Unreal-SDK-CPP/AgoraPlugin/Source/AgoraPlugin/Public/AgoraCppPlugin/include/rte_cpp_string.h +++ /dev/null @@ -1,61 +0,0 @@ -/** - * - * Agora Real Time Engagement - * Copyright (c) 2024 Agora IO. All rights reserved. - * - */ -#pragma once -#include "internal/c/utils/string.h" - -namespace rte { - -class Config; -class PlayerConfig; - -class String { - public: - - String(){ - c_rte_string = RteStringCreate(nullptr); - RteStringInit(c_rte_string, nullptr); - } - - String(const char* str) { - c_rte_string = RteStringCreate(nullptr); - RteStringInit(c_rte_string, nullptr); - if(nullptr != str){ - RteStringInitWithCStr(c_rte_string, str, nullptr); - } - } - - ~String() { - RteStringDeinit(c_rte_string, nullptr); - RteStringDestroy(c_rte_string, nullptr); - } - - void Format(const char* fmt, ...) { - va_list args; - va_start(args, fmt); - RteStringInitWithValue(c_rte_string, nullptr, fmt, args); - va_end(args); - } - - void Copy(const String &other) { - RteStringCopy(c_rte_string, other.c_rte_string, nullptr); - } - - const char* Cstr() const { - return RteStringCStr(c_rte_string, nullptr); - } - - friend class Config; - friend class PlayerConfig; - - private: - ::RteString* get_underlying_impl() const { return c_rte_string; } - - private: - ::RteString *c_rte_string; -}; - -} // namespace rte