diff --git a/plugin.xml b/plugin.xml index c42209f..17e1a61 100644 --- a/plugin.xml +++ b/plugin.xml @@ -20,128 +20,60 @@ - + + - - - - - - - - - - - - - - - - - - - + + - + + - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - + + - - + + - - - - - + - - - + + + + + + + + + - - - - - - - - - - - - - - - - - + - + + @@ -158,8 +90,8 @@ - -用户没有允许需要的权限,加入通话失败 + + 用户没有允许需要的权限,加入通话失败 基础功能 语音通话 双人/多人语音通话、包含静音/免提等功能 @@ -211,7 +143,7 @@ - + @@ -258,7 +190,7 @@ - + diff --git a/src/android/java/com/tencent/trtc/CordovaEventKit.java b/src/android/java/com/tencent/trtc/CordovaEventKit.java index 8af6b43..6ff1c90 100644 --- a/src/android/java/com/tencent/trtc/CordovaEventKit.java +++ b/src/android/java/com/tencent/trtc/CordovaEventKit.java @@ -17,6 +17,7 @@ public class CordovaEventKit { } public static void init(CordovaPlugin plugin){ + Log.d(TAG, "init"); if(CordovaEventKit.kit == null){ CordovaEventKit.kit = new CordovaEventKit(plugin); } diff --git a/src/android/java/com/tencent/trtc/CustomVideoView.java b/src/android/java/com/tencent/trtc/CustomVideoView.java index 286f6e5..0a62564 100644 --- a/src/android/java/com/tencent/trtc/CustomVideoView.java +++ b/src/android/java/com/tencent/trtc/CustomVideoView.java @@ -69,7 +69,7 @@ public class CustomVideoView extends RelativeLayout { Log.d(TAG,"TRTC - changeUser: main?:"+mainView+",alwaysHide?:"+alwaysHide+",user:" + (userInfo == null ? null : userInfo.getPersonid())); this.setBackgroundColor(Color.TRANSPARENT); if(userInfo == null){ - setVisibility(View.GONE);; + setVisibility(View.GONE); }else { setVisibility(alwaysHide?INVISIBLE:VISIBLE); } @@ -88,7 +88,9 @@ public class CustomVideoView extends RelativeLayout { mTRTCCloud.setRemoteRenderParams(this.userInfo.getPersonid(),mainView ? TRTCCloudDef.TRTC_VIDEO_STREAM_TYPE_BIG : TRTCCloudDef.TRTC_VIDEO_STREAM_TYPE_SUB,params); try { - Thread.sleep(1000); + if(this.userInfo.isShareUser()){ + Thread.sleep(1000); + } } catch (InterruptedException e) { e.printStackTrace(); } @@ -133,12 +135,14 @@ public class CustomVideoView extends RelativeLayout { } public CustomVideoView setMainView(boolean mainView) { + Log.d(TAG,"TRTC - setMainView mainView:"+mainView); if(this.mTRTCCloud == null){ this.mTRTCCloud = TRTCCloud.sharedInstance(mContext); } this.mainView = mainView; if(!mainView){ Events.addListener("subview.always.hide",(extra)->{ + Log.d(TAG,"TRTC - event listener (subview.always.hide):"+extra); this.alwaysHide = extra.getBoolean("alwaysHide",false); if(this.alwaysHide){ this.setVisibility(INVISIBLE); @@ -161,6 +165,7 @@ public class CustomVideoView extends RelativeLayout { }); } Events.addListener("userinfo.update",(extra) -> { + Log.d(TAG,"TRTC - event listener (userinfo.update):"+extra + ",userinfo:"+this.userInfo); if(this.userInfo !=null && !this.userInfo.isLocal() && extra.getString("userId").equals(this.userInfo.getPersonid())){ this.userInfo.setDisplayName(extra.getString("displayName")); this.titleView.setText(this.userInfo.getDisplayName() + (this.userInfo.isShareUser() ? "的屏幕分享": "")); diff --git a/src/android/java/com/tencent/trtc/videocall/UserInfo.java b/src/android/java/com/tencent/trtc/videocall/UserInfo.java index 9527f43..8149d07 100644 --- a/src/android/java/com/tencent/trtc/videocall/UserInfo.java +++ b/src/android/java/com/tencent/trtc/videocall/UserInfo.java @@ -68,4 +68,14 @@ public class UserInfo { } return false; } + + @Override + public String toString() { + return "UserInfo{" + + "personid='" + personid + '\'' + + ", displayName='" + displayName + '\'' + + ", local=" + local + + ", frontCamera=" + frontCamera + + '}'; + } } diff --git a/src/android/java/com/tencent/trtc/videocall/VideoCallingActivity.java b/src/android/java/com/tencent/trtc/videocall/VideoCallingActivity.java index a598278..380e32d 100644 --- a/src/android/java/com/tencent/trtc/videocall/VideoCallingActivity.java +++ b/src/android/java/com/tencent/trtc/videocall/VideoCallingActivity.java @@ -347,6 +347,7 @@ public class VideoCallingActivity extends TRTCBaseActivity implements View.OnCli @Override public void onUserVideoAvailable(String userId, boolean available) { + super.onUserVideoAvailable(userId, available); Log.d(TAG, "onUserVideoAvailable userId " + userId + ", mUserCount " + mUserCount + ",available " + available); UserInfo info = new UserInfo().setPersonid(userId); int index = mUserList.indexOf(info); diff --git a/src/ios/BoringSSL.xcframework/Info.plist b/src/ios/BoringSSL.xcframework/Info.plist new file mode 100644 index 0000000..23fa145 --- /dev/null +++ b/src/ios/BoringSSL.xcframework/Info.plist @@ -0,0 +1,26 @@ + + + + + AvailableLibraries + + + LibraryIdentifier + ios-arm64_armv7 + LibraryPath + BoringSSL.framework + SupportedArchitectures + + arm64 + armv7 + + SupportedPlatform + ios + + + CFBundlePackageType + XFWK + XCFrameworkFormatVersion + 1.0 + + diff --git a/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/BoringSSL b/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/BoringSSL new file mode 100755 index 0000000..10b2d12 Binary files /dev/null and b/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/BoringSSL differ diff --git a/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/Headers/BoringSSL.h b/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/Headers/BoringSSL.h new file mode 100644 index 0000000..5174846 --- /dev/null +++ b/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/Headers/BoringSSL.h @@ -0,0 +1,17 @@ +// +// BoringSSL.h +// BoringSSL +// +// + +#import + +//! Project version number for BoringSSL. +FOUNDATION_EXPORT double BoringSSL_VersionNumber; + +//! Project version string for BoringSSL. +FOUNDATION_EXPORT const unsigned char BoringSSL_VersionString[]; + +// In this header, you should import all the public headers of your framework using statements like #import + + diff --git a/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/Headers/boringssl_prefix.pch b/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/Headers/boringssl_prefix.pch new file mode 100644 index 0000000..cb53928 --- /dev/null +++ b/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/Headers/boringssl_prefix.pch @@ -0,0 +1,2761 @@ +#define ASN1_INTEGER_dup qcloudssl_ASN1_INTEGER_dup +#define ASN1_PRINTABLESTRING_free qcloudssl_ASN1_PRINTABLESTRING_free +#define ECDSA_SIG_to_bytes qcloudssl_ECDSA_SIG_to_bytes +#define EVP_PKEY_derive qcloudssl_EVP_PKEY_derive +#define SSL_use_PrivateKey qcloudssl_SSL_use_PrivateKey +#define PEM_dek_info qcloudssl_PEM_dek_info +#define ed25519_asn1_meth qcloudssl_ed25519_asn1_meth +#define d2i_ECDSA_SIG qcloudssl_d2i_ECDSA_SIG +#define X509_ATTRIBUTE_create_by_OBJ qcloudssl_X509_ATTRIBUTE_create_by_OBJ +#define DSA_generate_key qcloudssl_DSA_generate_key +#define X509V3_EXT_add_list qcloudssl_X509V3_EXT_add_list +#define ASN1_OCTET_STRING_dup qcloudssl_ASN1_OCTET_STRING_dup +#define SSL_set_max_proto_version qcloudssl_SSL_set_max_proto_version +#define EC_KEY_dup qcloudssl_EC_KEY_dup +#define SSL_set_verify_depth qcloudssl_SSL_set_verify_depth +#define EVP_aead_aes_128_ctr_hmac_sha256 qcloudssl_EVP_aead_aes_128_ctr_hmac_sha256 +#define X509v3_get_ext_by_critical qcloudssl_X509v3_get_ext_by_critical +#define d2i_DSAPublicKey qcloudssl_d2i_DSAPublicKey +#define c2i_ASN1_INTEGER qcloudssl_c2i_ASN1_INTEGER +#define EVP_PKEY_derive_init qcloudssl_EVP_PKEY_derive_init +#define SHA256_Update qcloudssl_SHA256_Update +#define AES_cfb128_encrypt qcloudssl_AES_cfb128_encrypt +#define DSA_do_sign qcloudssl_DSA_do_sign +#define X509at_add1_attr_by_txt qcloudssl_X509at_add1_attr_by_txt +#define X509_TRUST_cleanup qcloudssl_X509_TRUST_cleanup +#define i2d_PKCS8_PRIV_KEY_INFO qcloudssl_i2d_PKCS8_PRIV_KEY_INFO +#define BN_mod_exp_mont qcloudssl_BN_mod_exp_mont +#define EVP_PKEY_CTX_new_id qcloudssl_EVP_PKEY_CTX_new_id +#define ec_GFp_simple_is_on_curve qcloudssl_ec_GFp_simple_is_on_curve +#define EC_GROUP_get_cofactor qcloudssl_EC_GROUP_get_cofactor +#define d2i_X509_SIG qcloudssl_d2i_X509_SIG +#define X509_STORE_CTX_set_ex_data qcloudssl_X509_STORE_CTX_set_ex_data +#define X509_ATTRIBUTE_create_by_txt qcloudssl_X509_ATTRIBUTE_create_by_txt +#define EC_GROUP_get0_order qcloudssl_EC_GROUP_get0_order +#define ASN1_tag2bit qcloudssl_ASN1_tag2bit +#define ASN1_NULL_new qcloudssl_ASN1_NULL_new +#define d2i_X509_CERT_AUX qcloudssl_d2i_X509_CERT_AUX +#define PKCS12_parse qcloudssl_PKCS12_parse +#define PEM_def_callback qcloudssl_PEM_def_callback +#define X509_STORE_load_locations qcloudssl_X509_STORE_load_locations +#define X509_STORE_set_trust qcloudssl_X509_STORE_set_trust +#define BUF_memdup qcloudssl_BUF_memdup +#define d2i_PKCS8PrivateKey_bio qcloudssl_d2i_PKCS8PrivateKey_bio +#define GENERAL_SUBTREE_free qcloudssl_GENERAL_SUBTREE_free +#define d2i_X509_bio qcloudssl_d2i_X509_bio +#define ASN1_STRING_cmp qcloudssl_ASN1_STRING_cmp +#define SSL_get_client_random qcloudssl_SSL_get_client_random +#define d2i_RSAPublicKey_fp qcloudssl_d2i_RSAPublicKey_fp +#define SSL_CTX_set1_param qcloudssl_SSL_CTX_set1_param +#define ASN1_STRING_TABLE_add qcloudssl_ASN1_STRING_TABLE_add +#define PEM_read_bio_PKCS8_PRIV_KEY_INFO qcloudssl_PEM_read_bio_PKCS8_PRIV_KEY_INFO +#define ASN1_TIME_diff qcloudssl_ASN1_TIME_diff +#define SXNETID_new qcloudssl_SXNETID_new +#define CBB_add_u8_length_prefixed qcloudssl_CBB_add_u8_length_prefixed +#define X509_PUBKEY_it qcloudssl_X509_PUBKEY_it +#define CTR_DRBG_reseed qcloudssl_CTR_DRBG_reseed +#define X509_CRL_diff qcloudssl_X509_CRL_diff +#define sk_find qcloudssl_sk_find +#define DES_ede2_cbc_encrypt qcloudssl_DES_ede2_cbc_encrypt +#define ERR_load_SSL_strings qcloudssl_ERR_load_SSL_strings +#define i2d_RSA_PSS_PARAMS qcloudssl_i2d_RSA_PSS_PARAMS +#define SSL_check_private_key qcloudssl_SSL_check_private_key +#define bn_mul_words qcloudssl_bn_mul_words +#define d2i_ECPrivateKey qcloudssl_d2i_ECPrivateKey +#define PEM_read_bio_X509_CRL qcloudssl_PEM_read_bio_X509_CRL +#define DSA_sign_setup qcloudssl_DSA_sign_setup +#define SSL_set_mtu qcloudssl_SSL_set_mtu +#define ec_GFp_mont_field_decode qcloudssl_ec_GFp_mont_field_decode +#define CBS_len qcloudssl_CBS_len +#define bn_cmp_words qcloudssl_bn_cmp_words +#define BN_bin2bn qcloudssl_BN_bin2bn +#define BIO_new_connect qcloudssl_BIO_new_connect +#define DSA_sign qcloudssl_DSA_sign +#define SSL_CTX_enable_signed_cert_timestamps qcloudssl_SSL_CTX_enable_signed_cert_timestamps +#define EC_GROUP_set_asn1_flag qcloudssl_EC_GROUP_set_asn1_flag +#define PEM_write_X509 qcloudssl_PEM_write_X509 +#define SSL_CTX_set_tlsext_ticket_key_cb qcloudssl_SSL_CTX_set_tlsext_ticket_key_cb +#define i2a_ASN1_INTEGER qcloudssl_i2a_ASN1_INTEGER +#define OBJ_txt2nid qcloudssl_OBJ_txt2nid +#define BN_print qcloudssl_BN_print +#define X509_PUBKEY_free qcloudssl_X509_PUBKEY_free +#define PEM_write qcloudssl_PEM_write +#define v3_idp qcloudssl_v3_idp +#define SSL_CTX_clear_extra_chain_certs qcloudssl_SSL_CTX_clear_extra_chain_certs +#define EVP_aead_aes_128_gcm_tls12 qcloudssl_EVP_aead_aes_128_gcm_tls12 +#define X509_check_ca qcloudssl_X509_check_ca +#define EVP_aead_aes_256_cbc_sha384_tls qcloudssl_EVP_aead_aes_256_cbc_sha384_tls +#define i2d_CRL_DIST_POINTS qcloudssl_i2d_CRL_DIST_POINTS +#define NETSCAPE_SPKI_sign qcloudssl_NETSCAPE_SPKI_sign +#define EVP_MD_CTX_md qcloudssl_EVP_MD_CTX_md +#define ERR_remove_thread_state qcloudssl_ERR_remove_thread_state +#define EC_KEY_up_ref qcloudssl_EC_KEY_up_ref +#define EVP_CipherUpdate qcloudssl_EVP_CipherUpdate +#define PEM_write_RSAPublicKey qcloudssl_PEM_write_RSAPublicKey +#define bn_wexpand qcloudssl_bn_wexpand +#define bn_add_words qcloudssl_bn_add_words +#define v3_policy_mappings qcloudssl_v3_policy_mappings +#define aes_ctr_set_key qcloudssl_aes_ctr_set_key +#define EC_KEY_get0_private_key qcloudssl_EC_KEY_get0_private_key +#define X509v3_get_ext_by_NID qcloudssl_X509v3_get_ext_by_NID +#define X509V3_NAME_from_section qcloudssl_X509V3_NAME_from_section +#define ASN1_BIT_STRING_free qcloudssl_ASN1_BIT_STRING_free +#define d2i_AUTHORITY_KEYID qcloudssl_d2i_AUTHORITY_KEYID +#define BN_sub qcloudssl_BN_sub +#define PEM_write_X509_REQ qcloudssl_PEM_write_X509_REQ +#define EC_KEY_set_ex_data qcloudssl_EC_KEY_set_ex_data +#define CRYPTO_gcm128_aad qcloudssl_CRYPTO_gcm128_aad +#define ASN1_item_new qcloudssl_ASN1_item_new +#define SSL_set_msg_callback_arg qcloudssl_SSL_set_msg_callback_arg +#define d2i_ASN1_ENUMERATED qcloudssl_d2i_ASN1_ENUMERATED +#define EC_POINT_cmp qcloudssl_EC_POINT_cmp +#define DES_encrypt3 qcloudssl_DES_encrypt3 +#define SSL_get_read_ahead qcloudssl_SSL_get_read_ahead +#define BIO_ctrl_get_read_request qcloudssl_BIO_ctrl_get_read_request +#define ec_GFp_mont_field_encode qcloudssl_ec_GFp_mont_field_encode +#define BN_mod_inverse_blinded qcloudssl_BN_mod_inverse_blinded +#define SSL_cutthrough_complete qcloudssl_SSL_cutthrough_complete +#define SSL_CTX_enable_tls_channel_id qcloudssl_SSL_CTX_enable_tls_channel_id +#define BN_mul qcloudssl_BN_mul +#define SSL_CTX_set_max_send_fragment qcloudssl_SSL_CTX_set_max_send_fragment +#define SSL_CTX_set_custom_verify qcloudssl_SSL_CTX_set_custom_verify +#define EVP_PKEY_CTX_set_rsa_pss_saltlen qcloudssl_EVP_PKEY_CTX_set_rsa_pss_saltlen +#define X509V3_parse_list qcloudssl_X509V3_parse_list +#define i2d_ECPrivateKey_fp qcloudssl_i2d_ECPrivateKey_fp +#define X509_CRL_print qcloudssl_X509_CRL_print +#define SPAKE2_CTX_new qcloudssl_SPAKE2_CTX_new +#define X509_VAL_new qcloudssl_X509_VAL_new +#define SSL_get_peer_finished qcloudssl_SSL_get_peer_finished +#define d2i_ASN1_UTCTIME qcloudssl_d2i_ASN1_UTCTIME +#define SSL_CTX_set_msg_callback_arg qcloudssl_SSL_CTX_set_msg_callback_arg +#define SSL_add_file_cert_subjects_to_stack qcloudssl_SSL_add_file_cert_subjects_to_stack +#define X509_REQ_add_extensions_nid qcloudssl_X509_REQ_add_extensions_nid +#define PEM_read_X509_AUX qcloudssl_PEM_read_X509_AUX +#define BN_mod_mul_montgomery qcloudssl_BN_mod_mul_montgomery +#define EVP_VerifyUpdate qcloudssl_EVP_VerifyUpdate +#define EVP_PKEY_CTX_set_rsa_keygen_pubexp qcloudssl_EVP_PKEY_CTX_set_rsa_keygen_pubexp +#define SSL_get_wfd qcloudssl_SSL_get_wfd +#define ASN1_PRINTABLESTRING_new qcloudssl_ASN1_PRINTABLESTRING_new +#define X509_add1_ext_i2d qcloudssl_X509_add1_ext_i2d +#define ec_GFp_mont_group_init qcloudssl_ec_GFp_mont_group_init +#define EVP_aead_aes_256_ctr_hmac_sha256 qcloudssl_EVP_aead_aes_256_ctr_hmac_sha256 +#define SSL_CTX_set_tlsext_use_srtp qcloudssl_SSL_CTX_set_tlsext_use_srtp +#define i2d_X509 qcloudssl_i2d_X509 +#define AUTHORITY_INFO_ACCESS_new qcloudssl_AUTHORITY_INFO_ACCESS_new +#define ERR_load_crypto_strings qcloudssl_ERR_load_crypto_strings +#define EVP_CIPHER_CTX_cipher qcloudssl_EVP_CIPHER_CTX_cipher +#define CBB_flush qcloudssl_CBB_flush +#define X509_VERIFY_PARAM_set_depth qcloudssl_X509_VERIFY_PARAM_set_depth +#define BN_is_negative qcloudssl_BN_is_negative +#define SSL_set_connect_state qcloudssl_SSL_set_connect_state +#define EVP_sha512 qcloudssl_EVP_sha512 +#define EVP_get_cipherbynid qcloudssl_EVP_get_cipherbynid +#define CBS_get_u16_length_prefixed qcloudssl_CBS_get_u16_length_prefixed +#define SSL_want qcloudssl_SSL_want +#define d2i_CERTIFICATEPOLICIES qcloudssl_d2i_CERTIFICATEPOLICIES +#define GENERAL_NAME_it qcloudssl_GENERAL_NAME_it +#define CRYPTO_STATIC_MUTEX_lock_read qcloudssl_CRYPTO_STATIC_MUTEX_lock_read +#define i2d_X509_CINF qcloudssl_i2d_X509_CINF +#define DES_ede3_cbc_encrypt qcloudssl_DES_ede3_cbc_encrypt +#define i2d_PUBKEY_fp qcloudssl_i2d_PUBKEY_fp +#define X509_CINF_it qcloudssl_X509_CINF_it +#define X509_STORE_set_verify_cb qcloudssl_X509_STORE_set_verify_cb +#define EDIPARTYNAME_new qcloudssl_EDIPARTYNAME_new +#define X509_VERIFY_PARAM_set_hostflags qcloudssl_X509_VERIFY_PARAM_set_hostflags +#define SSL_CTX_enable_ocsp_stapling qcloudssl_SSL_CTX_enable_ocsp_stapling +#define d2i_ASN1_BIT_STRING qcloudssl_d2i_ASN1_BIT_STRING +#define DTLSv1_set_initial_timeout_duration qcloudssl_DTLSv1_set_initial_timeout_duration +#define BIO_get_retry_reason qcloudssl_BIO_get_retry_reason +#define d2i_RSA_PUBKEY_fp qcloudssl_d2i_RSA_PUBKEY_fp +#define SSL_set_SSL_CTX qcloudssl_SSL_set_SSL_CTX +#define SSL_CTX_set_tls13_variant qcloudssl_SSL_CTX_set_tls13_variant +#define X509at_get_attr_by_OBJ qcloudssl_X509at_get_attr_by_OBJ +#define i2d_GENERAL_NAMES qcloudssl_i2d_GENERAL_NAMES +#define SSL_CTX_set0_client_CAs qcloudssl_SSL_CTX_set0_client_CAs +#define CRYPTO_once qcloudssl_CRYPTO_once +#define EVP_EncodedLength qcloudssl_EVP_EncodedLength +#define ec_GFp_simple_point_set_affine_coordinates qcloudssl_ec_GFp_simple_point_set_affine_coordinates +#define sk_free qcloudssl_sk_free +#define SSL_CTX_set1_curves qcloudssl_SSL_CTX_set1_curves +#define d2i_PKCS12_fp qcloudssl_d2i_PKCS12_fp +#define BN_rand_range qcloudssl_BN_rand_range +#define EVP_PKEY_verify_recover qcloudssl_EVP_PKEY_verify_recover +#define X509_NAME_ENTRY_dup qcloudssl_X509_NAME_ENTRY_dup +#define ASN1_TYPE_get qcloudssl_ASN1_TYPE_get +#define SSL_dup_CA_list qcloudssl_SSL_dup_CA_list +#define asn1_enc_init qcloudssl_asn1_enc_init +#define d2i_PKCS8_PRIV_KEY_INFO_fp qcloudssl_d2i_PKCS8_PRIV_KEY_INFO_fp +#define i2d_RSAPublicKey_fp qcloudssl_i2d_RSAPublicKey_fp +#define X509_ATTRIBUTE_count qcloudssl_X509_ATTRIBUTE_count +#define SSL_CTX_set_ex_data qcloudssl_SSL_CTX_set_ex_data +#define X509_get_default_cert_file_env qcloudssl_X509_get_default_cert_file_env +#define DH_get0_pqg qcloudssl_DH_get0_pqg +#define X509_TRUST_get_flags qcloudssl_X509_TRUST_get_flags +#define PEM_write_bio_RSAPublicKey qcloudssl_PEM_write_bio_RSAPublicKey +#define EC_POINT_point2oct qcloudssl_EC_POINT_point2oct +#define CERTIFICATEPOLICIES_it qcloudssl_CERTIFICATEPOLICIES_it +#define EVP_DigestSignUpdate qcloudssl_EVP_DigestSignUpdate +#define X509V3_set_nconf qcloudssl_X509V3_set_nconf +#define ASN1_STRING_set0 qcloudssl_ASN1_STRING_set0 +#define X509_REVOKED_delete_ext qcloudssl_X509_REVOKED_delete_ext +#define X509V3_get_d2i qcloudssl_X509V3_get_d2i +#define EVP_aead_aes_256_gcm_tls12 qcloudssl_EVP_aead_aes_256_gcm_tls12 +#define X509_NAME_entry_count qcloudssl_X509_NAME_entry_count +#define SSL_CTX_set_tmp_rsa qcloudssl_SSL_CTX_set_tmp_rsa +#define d2i_ASN1_UTF8STRING qcloudssl_d2i_ASN1_UTF8STRING +#define BN_BLINDING_new qcloudssl_BN_BLINDING_new +#define SSL_CTX_set_channel_id_cb qcloudssl_SSL_CTX_set_channel_id_cb +#define EVP_DigestVerifyInit qcloudssl_EVP_DigestVerifyInit +#define EVP_PKEY_set_type qcloudssl_EVP_PKEY_set_type +#define SSL_get0_alpn_selected qcloudssl_SSL_get0_alpn_selected +#define ASN1_i2d_bio qcloudssl_ASN1_i2d_bio +#define CRYPTO_get_thread_local qcloudssl_CRYPTO_get_thread_local +#define DTLSv1_2_method qcloudssl_DTLSv1_2_method +#define EVP_PKEY_get0_RSA qcloudssl_EVP_PKEY_get0_RSA +#define d2i_PUBKEY qcloudssl_d2i_PUBKEY +#define SSL_alert_desc_string qcloudssl_SSL_alert_desc_string +#define i2d_PrivateKey_fp qcloudssl_i2d_PrivateKey_fp +#define X509_STORE_add_cert qcloudssl_X509_STORE_add_cert +#define ASN1_TIME_check qcloudssl_ASN1_TIME_check +#define SHA384_Update qcloudssl_SHA384_Update +#define i2d_ECParameters qcloudssl_i2d_ECParameters +#define PEM_read_bio_DSAPrivateKey qcloudssl_PEM_read_bio_DSAPrivateKey +#define GENERAL_NAME_set0_value qcloudssl_GENERAL_NAME_set0_value +#define CMAC_Init qcloudssl_CMAC_Init +#define X509_LOOKUP_file qcloudssl_X509_LOOKUP_file +#define TLSv1_client_method qcloudssl_TLSv1_client_method +#define d2i_DSAparams qcloudssl_d2i_DSAparams +#define BN_asc2bn qcloudssl_BN_asc2bn +#define GENERAL_NAMES_it qcloudssl_GENERAL_NAMES_it +#define d2i_NOTICEREF qcloudssl_d2i_NOTICEREF +#define BIO_copy_next_retry qcloudssl_BIO_copy_next_retry +#define CRYPTO_STATIC_MUTEX_lock_write qcloudssl_CRYPTO_STATIC_MUTEX_lock_write +#define HMAC_Update qcloudssl_HMAC_Update +#define SSL_CTX_sess_cache_full qcloudssl_SSL_CTX_sess_cache_full +#define PEM_read_bio_EC_PUBKEY qcloudssl_PEM_read_bio_EC_PUBKEY +#define BIO_up_ref qcloudssl_BIO_up_ref +#define EVP_PKEY_cmp_parameters qcloudssl_EVP_PKEY_cmp_parameters +#define CMAC_CTX_free qcloudssl_CMAC_CTX_free +#define AUTHORITY_KEYID_free qcloudssl_AUTHORITY_KEYID_free +#define X509_STORE_CTX_get_ex_data qcloudssl_X509_STORE_CTX_get_ex_data +#define EC_KEY_marshal_private_key qcloudssl_EC_KEY_marshal_private_key +#define EVP_sha1 qcloudssl_EVP_sha1 +#define i2d_DIST_POINT_NAME qcloudssl_i2d_DIST_POINT_NAME +#define PKCS5_pbe2_encrypt_init qcloudssl_PKCS5_pbe2_encrypt_init +#define BIO_set_close qcloudssl_BIO_set_close +#define i2s_ASN1_INTEGER qcloudssl_i2s_ASN1_INTEGER +#define X509_REQ_it qcloudssl_X509_REQ_it +#define SSL_clear_chain_certs qcloudssl_SSL_clear_chain_certs +#define PEM_write_DSAparams qcloudssl_PEM_write_DSAparams +#define DTLS_method qcloudssl_DTLS_method +#define PEM_write_bio_PUBKEY qcloudssl_PEM_write_bio_PUBKEY +#define X509_load_cert_crl_file qcloudssl_X509_load_cert_crl_file +#define AES_cbc_encrypt qcloudssl_AES_cbc_encrypt +#define EVP_aead_aes_128_cbc_sha1_tls qcloudssl_EVP_aead_aes_128_cbc_sha1_tls +#define X509V3_EXT_add_nconf qcloudssl_X509V3_EXT_add_nconf +#define SSL_clear_mode qcloudssl_SSL_clear_mode +#define OBJ_create qcloudssl_OBJ_create +#define DH_generate_key qcloudssl_DH_generate_key +#define ec_pkey_meth qcloudssl_ec_pkey_meth +#define i2d_AUTHORITY_KEYID qcloudssl_i2d_AUTHORITY_KEYID +#define X509_VERIFY_PARAM_set_flags qcloudssl_X509_VERIFY_PARAM_set_flags +#define BN_BLINDING_convert qcloudssl_BN_BLINDING_convert +#define EC_GROUP_new_by_curve_name qcloudssl_EC_GROUP_new_by_curve_name +#define EVP_aes_192_ecb qcloudssl_EVP_aes_192_ecb +#define BN_div_word qcloudssl_BN_div_word +#define X509_CRL_new qcloudssl_X509_CRL_new +#define SSL_CTX_get_ciphers qcloudssl_SSL_CTX_get_ciphers +#define X509_STORE_CTX_get0_param qcloudssl_X509_STORE_CTX_get0_param +#define policy_cache_free qcloudssl_policy_cache_free +#define OBJ_find_sigid_algs qcloudssl_OBJ_find_sigid_algs +#define EVP_VerifyFinal qcloudssl_EVP_VerifyFinal +#define X509V3_add_value_bool qcloudssl_X509V3_add_value_bool +#define d2i_DIRECTORYSTRING qcloudssl_d2i_DIRECTORYSTRING +#define ERR_print_errors_cb qcloudssl_ERR_print_errors_cb +#define DTLS_client_method qcloudssl_DTLS_client_method +#define ASN1_BOOLEAN_it qcloudssl_ASN1_BOOLEAN_it +#define SSL_CTX_set_allow_unknown_alpn_protos qcloudssl_SSL_CTX_set_allow_unknown_alpn_protos +#define PEM_write_X509_AUX qcloudssl_PEM_write_X509_AUX +#define kBoringSSLRSASqrtTwoLen qcloudssl_kBoringSSLRSASqrtTwoLen +#define ASN1_OCTET_STRING_NDEF_it qcloudssl_ASN1_OCTET_STRING_NDEF_it +#define SSL_CTX_set0_buffer_pool qcloudssl_SSL_CTX_set0_buffer_pool +#define PEM_write_PKCS8_PRIV_KEY_INFO qcloudssl_PEM_write_PKCS8_PRIV_KEY_INFO +#define ec_GFp_mont_group_set_curve qcloudssl_ec_GFp_mont_group_set_curve +#define ASN1_INTEGER_free qcloudssl_ASN1_INTEGER_free +#define EVP_CipherFinal_ex qcloudssl_EVP_CipherFinal_ex +#define SSL_CTX_sess_set_cache_size qcloudssl_SSL_CTX_sess_set_cache_size +#define ASN1_STRING_print_ex_fp qcloudssl_ASN1_STRING_print_ex_fp +#define i2d_PKCS8_PRIV_KEY_INFO_fp qcloudssl_i2d_PKCS8_PRIV_KEY_INFO_fp +#define EVP_PKEY_CTX_set_rsa_padding qcloudssl_EVP_PKEY_CTX_set_rsa_padding +#define EC_GROUP_get_curve_GFp qcloudssl_EC_GROUP_get_curve_GFp +#define BIO_write_filename qcloudssl_BIO_write_filename +#define CRYPTO_get_ex_new_index qcloudssl_CRYPTO_get_ex_new_index +#define BIO_indent qcloudssl_BIO_indent +#define SSL_alert_type_string qcloudssl_SSL_alert_type_string +#define BIO_free qcloudssl_BIO_free +#define EVP_MD_CTX_init qcloudssl_EVP_MD_CTX_init +#define ec_point_set_Jprojective_coordinates_GFp qcloudssl_ec_point_set_Jprojective_coordinates_GFp +#define i2d_X509_CRL_bio qcloudssl_i2d_X509_CRL_bio +#define v3_ext_ku qcloudssl_v3_ext_ku +#define ec_GFp_simple_field_sqr qcloudssl_ec_GFp_simple_field_sqr +#define SHA224_Init qcloudssl_SHA224_Init +#define SSL_need_tmp_RSA qcloudssl_SSL_need_tmp_RSA +#define BIO_set_write_buffer_size qcloudssl_BIO_set_write_buffer_size +#define SSL_state_string_long qcloudssl_SSL_state_string_long +#define NOTICEREF_it qcloudssl_NOTICEREF_it +#define sk_pop_free qcloudssl_sk_pop_free +#define BASIC_CONSTRAINTS_free qcloudssl_BASIC_CONSTRAINTS_free +#define BIO_wpending qcloudssl_BIO_wpending +#define X509_STORE_CTX_set0_crls qcloudssl_X509_STORE_CTX_set0_crls +#define v3_nscert qcloudssl_v3_nscert +#define d2i_RSA_PSS_PARAMS qcloudssl_d2i_RSA_PSS_PARAMS +#define d2i_X509_REQ_INFO qcloudssl_d2i_X509_REQ_INFO +#define d2i_ASN1_NULL qcloudssl_d2i_ASN1_NULL +#define ED25519_sign qcloudssl_ED25519_sign +#define X509_CINF_new qcloudssl_X509_CINF_new +#define ASN1_BIT_STRING_set_bit qcloudssl_ASN1_BIT_STRING_set_bit +#define SSLv23_server_method qcloudssl_SSLv23_server_method +#define SSL_CTX_set0_chain qcloudssl_SSL_CTX_set0_chain +#define d2i_BASIC_CONSTRAINTS qcloudssl_d2i_BASIC_CONSTRAINTS +#define d2i_X509_NAME qcloudssl_d2i_X509_NAME +#define i2d_DSAPrivateKey_fp qcloudssl_i2d_DSAPrivateKey_fp +#define X509_get_serialNumber qcloudssl_X509_get_serialNumber +#define crypto_gcm_clmul_enabled qcloudssl_crypto_gcm_clmul_enabled +#define i2d_RSAPublicKey qcloudssl_i2d_RSAPublicKey +#define v3_alt qcloudssl_v3_alt +#define EVP_PKEY_print_params qcloudssl_EVP_PKEY_print_params +#define X509_STORE_CTX_set_purpose qcloudssl_X509_STORE_CTX_set_purpose +#define i2o_ECPublicKey qcloudssl_i2o_ECPublicKey +#define i2d_ASN1_BIT_STRING qcloudssl_i2d_ASN1_BIT_STRING +#define i2d_ASN1_UNIVERSALSTRING qcloudssl_i2d_ASN1_UNIVERSALSTRING +#define bn_set_words qcloudssl_bn_set_words +#define BIO_s_connect qcloudssl_BIO_s_connect +#define X509V3_EXT_i2d qcloudssl_X509V3_EXT_i2d +#define X509_issuer_and_serial_cmp qcloudssl_X509_issuer_and_serial_cmp +#define SSL_get_max_cert_list qcloudssl_SSL_get_max_cert_list +#define PEM_read_DSAparams qcloudssl_PEM_read_DSAparams +#define EVP_PKEY_new qcloudssl_EVP_PKEY_new +#define X509V3_get_value_bool qcloudssl_X509V3_get_value_bool +#define NETSCAPE_SPKI_it qcloudssl_NETSCAPE_SPKI_it +#define SSL_set_options qcloudssl_SSL_set_options +#define SSL_CTX_use_PrivateKey qcloudssl_SSL_CTX_use_PrivateKey +#define EVP_AEAD_CTX_aead qcloudssl_EVP_AEAD_CTX_aead +#define EVP_Digest qcloudssl_EVP_Digest +#define d2i_X509_AUX qcloudssl_d2i_X509_AUX +#define X509_get0_signature qcloudssl_X509_get0_signature +#define X509_STORE_CTX_get_error qcloudssl_X509_STORE_CTX_get_error +#define X25519_public_from_private qcloudssl_X25519_public_from_private +#define bn_cmp_part_words qcloudssl_bn_cmp_part_words +#define X509V3_EXT_nconf qcloudssl_X509V3_EXT_nconf +#define DSA_get0_key qcloudssl_DSA_get0_key +#define X509_NAME_print_ex_fp qcloudssl_X509_NAME_print_ex_fp +#define X509_ALGOR_set_md qcloudssl_X509_ALGOR_set_md +#define EVP_aes_128_ecb qcloudssl_EVP_aes_128_ecb +#define EVP_DecodeInit qcloudssl_EVP_DecodeInit +#define EC_GROUP_get_curve_name qcloudssl_EC_GROUP_get_curve_name +#define DH_marshal_parameters qcloudssl_DH_marshal_parameters +#define ASN1_d2i_bio qcloudssl_ASN1_d2i_bio +#define SSL_CTX_sess_get_cache_size qcloudssl_SSL_CTX_sess_get_cache_size +#define EC_KEY_set_group qcloudssl_EC_KEY_set_group +#define RAND_get_rand_method qcloudssl_RAND_get_rand_method +#define ec_GFp_simple_set_Jprojective_coordinates_GFp qcloudssl_ec_GFp_simple_set_Jprojective_coordinates_GFp +#define X509_TRUST_get0 qcloudssl_X509_TRUST_get0 +#define X509_CERT_AUX_it qcloudssl_X509_CERT_AUX_it +#define i2d_X509_SIG qcloudssl_i2d_X509_SIG +#define BN_rshift1 qcloudssl_BN_rshift1 +#define ASN1_digest qcloudssl_ASN1_digest +#define SSL_renegotiate qcloudssl_SSL_renegotiate +#define HKDF_extract qcloudssl_HKDF_extract +#define X509_check_purpose qcloudssl_X509_check_purpose +#define i2d_ASN1_UTF8STRING qcloudssl_i2d_ASN1_UTF8STRING +#define X509_REQ_digest qcloudssl_X509_REQ_digest +#define EVP_CIPHER_CTX_free qcloudssl_EVP_CIPHER_CTX_free +#define DES_decrypt3 qcloudssl_DES_decrypt3 +#define DSA_get_ex_new_index qcloudssl_DSA_get_ex_new_index +#define i2d_X509_bio qcloudssl_i2d_X509_bio +#define ASN1_GENERALIZEDTIME_free qcloudssl_ASN1_GENERALIZEDTIME_free +#define EXTENDED_KEY_USAGE_free qcloudssl_EXTENDED_KEY_USAGE_free +#define ASN1_OCTET_STRING_set qcloudssl_ASN1_OCTET_STRING_set +#define SSL_CTX_get_mode qcloudssl_SSL_CTX_get_mode +#define d2i_ASN1_TYPE qcloudssl_d2i_ASN1_TYPE +#define DSA_get0_pqg qcloudssl_DSA_get0_pqg +#define X509_PURPOSE_add qcloudssl_X509_PURPOSE_add +#define EC_KEY_generate_key_fips qcloudssl_EC_KEY_generate_key_fips +#define i2d_DSAPrivateKey qcloudssl_i2d_DSAPrivateKey +#define SSL_CTX_up_ref qcloudssl_SSL_CTX_up_ref +#define X509_NAME_get_text_by_NID qcloudssl_X509_NAME_get_text_by_NID +#define d2i_SSL_SESSION_bio qcloudssl_d2i_SSL_SESSION_bio +#define X509V3_EXT_d2i qcloudssl_X509V3_EXT_d2i +#define X509_EXTENSION_get_object qcloudssl_X509_EXTENSION_get_object +#define X509_print_ex qcloudssl_X509_print_ex +#define EVP_PKEY_up_ref qcloudssl_EVP_PKEY_up_ref +#define ASN1_BIT_STRING_check qcloudssl_ASN1_BIT_STRING_check +#define BN_mod_sqrt qcloudssl_BN_mod_sqrt +#define RSA_get0_factors qcloudssl_RSA_get0_factors +#define USERNOTICE_it qcloudssl_USERNOTICE_it +#define GENERAL_SUBTREE_it qcloudssl_GENERAL_SUBTREE_it +#define SSL_is_dtls qcloudssl_SSL_is_dtls +#define RSA_PSS_PARAMS_it qcloudssl_RSA_PSS_PARAMS_it +#define BIO_test_flags qcloudssl_BIO_test_flags +#define X509_STORE_CTX_trusted_stack qcloudssl_X509_STORE_CTX_trusted_stack +#define BN_value_one qcloudssl_BN_value_one +#define EVP_PKEY_encrypt qcloudssl_EVP_PKEY_encrypt +#define SSL_COMP_free_compression_methods qcloudssl_SSL_COMP_free_compression_methods +#define EVP_aes_256_ecb qcloudssl_EVP_aes_256_ecb +#define GENERAL_NAME_get0_otherName qcloudssl_GENERAL_NAME_get0_otherName +#define PKCS8_pkey_set0 qcloudssl_PKCS8_pkey_set0 +#define BIO_clear_flags qcloudssl_BIO_clear_flags +#define X509_REQ_get_attr_by_OBJ qcloudssl_X509_REQ_get_attr_by_OBJ +#define PROXY_CERT_INFO_EXTENSION_it qcloudssl_PROXY_CERT_INFO_EXTENSION_it +#define i2d_PKCS8PrivateKeyInfo_fp qcloudssl_i2d_PKCS8PrivateKeyInfo_fp +#define X509_LOOKUP_init qcloudssl_X509_LOOKUP_init +#define CRYPTO_get_ex_data qcloudssl_CRYPTO_get_ex_data +#define OBJ_txt2obj qcloudssl_OBJ_txt2obj +#define SSL_set_signed_cert_timestamp_list qcloudssl_SSL_set_signed_cert_timestamp_list +#define bn_sqr_comba4 qcloudssl_bn_sqr_comba4 +#define X509_CRL_INFO_new qcloudssl_X509_CRL_INFO_new +#define ERR_load_BIO_strings qcloudssl_ERR_load_BIO_strings +#define EVP_MD_CTX_create qcloudssl_EVP_MD_CTX_create +#define SSL_CTX_set_client_CA_list qcloudssl_SSL_CTX_set_client_CA_list +#define BN_CTX_free qcloudssl_BN_CTX_free +#define PEM_read_X509 qcloudssl_PEM_read_X509 +#define d2i_ASN1_T61STRING qcloudssl_d2i_ASN1_T61STRING +#define SSL_set_ocsp_response qcloudssl_SSL_set_ocsp_response +#define CBS_data qcloudssl_CBS_data +#define SSL_get_rfd qcloudssl_SSL_get_rfd +#define EVP_AEAD_CTX_tag_len qcloudssl_EVP_AEAD_CTX_tag_len +#define SSL_CIPHER_get_auth_nid qcloudssl_SSL_CIPHER_get_auth_nid +#define RSA_public_encrypt qcloudssl_RSA_public_encrypt +#define X509_STORE_set_depth qcloudssl_X509_STORE_set_depth +#define BIO_push qcloudssl_BIO_push +#define a2d_ASN1_OBJECT qcloudssl_a2d_ASN1_OBJECT +#define i2d_NETSCAPE_SPKI qcloudssl_i2d_NETSCAPE_SPKI +#define EVP_PKEY_keygen qcloudssl_EVP_PKEY_keygen +#define ERR_add_error_data qcloudssl_ERR_add_error_data +#define CBB_finish qcloudssl_CBB_finish +#define EVP_PKEY_print_private qcloudssl_EVP_PKEY_print_private +#define ASN1_OCTET_STRING_it qcloudssl_ASN1_OCTET_STRING_it +#define X509_REVOKED_add1_ext_i2d qcloudssl_X509_REVOKED_add1_ext_i2d +#define EVP_aead_aes_128_cbc_sha1_tls_implicit_iv qcloudssl_EVP_aead_aes_128_cbc_sha1_tls_implicit_iv +#define ec_GFp_nistp_recode_scalar_bits qcloudssl_ec_GFp_nistp_recode_scalar_bits +#define OPENSSL_gmtime_adj qcloudssl_OPENSSL_gmtime_adj +#define ASN1_STRING_set_by_NID qcloudssl_ASN1_STRING_set_by_NID +#define CRYPTO_ctr128_encrypt_ctr32 qcloudssl_CRYPTO_ctr128_encrypt_ctr32 +#define RSA_generate_key_fips qcloudssl_RSA_generate_key_fips +#define SSL_add1_chain_cert qcloudssl_SSL_add1_chain_cert +#define SSL_get_shutdown qcloudssl_SSL_get_shutdown +#define SSL_CTX_load_verify_locations qcloudssl_SSL_CTX_load_verify_locations +#define X509_STORE_CTX_new qcloudssl_X509_STORE_CTX_new +#define ASN1_item_pack qcloudssl_ASN1_item_pack +#define i2d_EDIPARTYNAME qcloudssl_i2d_EDIPARTYNAME +#define AES_ctr128_encrypt qcloudssl_AES_ctr128_encrypt +#define BIO_mem_contents qcloudssl_BIO_mem_contents +#define BN_div qcloudssl_BN_div +#define SSL_set_wfd qcloudssl_SSL_set_wfd +#define CONF_VALUE_new qcloudssl_CONF_VALUE_new +#define SSL_set_ex_data qcloudssl_SSL_set_ex_data +#define CRYPTO_set_dynlock_lock_callback qcloudssl_CRYPTO_set_dynlock_lock_callback +#define X509_get_issuer_name qcloudssl_X509_get_issuer_name +#define ec_GFp_simple_invert qcloudssl_ec_GFp_simple_invert +#define DH_generate_parameters_ex qcloudssl_DH_generate_parameters_ex +#define X509_STORE_CTX_get_explicit_policy qcloudssl_X509_STORE_CTX_get_explicit_policy +#define BN_free qcloudssl_BN_free +#define X509_REQ_set_subject_name qcloudssl_X509_REQ_set_subject_name +#define X509_EXTENSION_set_critical qcloudssl_X509_EXTENSION_set_critical +#define i2d_GENERAL_NAME qcloudssl_i2d_GENERAL_NAME +#define OPENSSL_tolower qcloudssl_OPENSSL_tolower +#define SSL_CTX_set_max_proto_version qcloudssl_SSL_CTX_set_max_proto_version +#define BN_generate_dsa_nonce qcloudssl_BN_generate_dsa_nonce +#define BN_hex2bn qcloudssl_BN_hex2bn +#define CRYPTO_gcm128_tag qcloudssl_CRYPTO_gcm128_tag +#define X509_ATTRIBUTE_it qcloudssl_X509_ATTRIBUTE_it +#define BN_CTX_end qcloudssl_BN_CTX_end +#define BIO_shutdown_wr qcloudssl_BIO_shutdown_wr +#define d2i_ASN1_TIME qcloudssl_d2i_ASN1_TIME +#define sk_deep_copy qcloudssl_sk_deep_copy +#define kBoringSSLRSASqrtTwo qcloudssl_kBoringSSLRSASqrtTwo +#define SSL_set1_chain qcloudssl_SSL_set1_chain +#define BIO_read_filename qcloudssl_BIO_read_filename +#define RSA_encrypt qcloudssl_RSA_encrypt +#define AES_set_encrypt_key qcloudssl_AES_set_encrypt_key +#define i2d_EC_PUBKEY_fp qcloudssl_i2d_EC_PUBKEY_fp +#define d2i_PKCS8_PRIV_KEY_INFO_bio qcloudssl_d2i_PKCS8_PRIV_KEY_INFO_bio +#define X509_STORE_CTX_get0_policy_tree qcloudssl_X509_STORE_CTX_get0_policy_tree +#define SSL_CTX_use_certificate_ASN1 qcloudssl_SSL_CTX_use_certificate_ASN1 +#define i2d_PKCS8_bio qcloudssl_i2d_PKCS8_bio +#define EVP_PKEY_CTX_get_signature_md qcloudssl_EVP_PKEY_CTX_get_signature_md +#define BN_CTX_start qcloudssl_BN_CTX_start +#define SSL_CIPHER_get_kx_name qcloudssl_SSL_CIPHER_get_kx_name +#define X509V3_conf_free qcloudssl_X509V3_conf_free +#define d2i_SXNET qcloudssl_d2i_SXNET +#define rsa_default_decrypt qcloudssl_rsa_default_decrypt +#define i2d_ASN1_BMPSTRING qcloudssl_i2d_ASN1_BMPSTRING +#define CRYPTO_has_asm qcloudssl_CRYPTO_has_asm +#define MD5_Transform qcloudssl_MD5_Transform +#define PKCS12_free qcloudssl_PKCS12_free +#define X509_REVOKED_get_ext_by_OBJ qcloudssl_X509_REVOKED_get_ext_by_OBJ +#define BN_mod_add qcloudssl_BN_mod_add +#define DTLSv1_2_server_method qcloudssl_DTLSv1_2_server_method +#define i2d_X509_ALGORS qcloudssl_i2d_X509_ALGORS +#define EVP_PKEY_get1_RSA qcloudssl_EVP_PKEY_get1_RSA +#define X509_print_fp qcloudssl_X509_print_fp +#define SSL_CTX_set_dos_protection_cb qcloudssl_SSL_CTX_set_dos_protection_cb +#define X509_CRL_check_suiteb qcloudssl_X509_CRL_check_suiteb +#define BIO_set_conn_hostname qcloudssl_BIO_set_conn_hostname +#define SSL_set0_verify_cert_store qcloudssl_SSL_set0_verify_cert_store +#define d2i_X509_CRL qcloudssl_d2i_X509_CRL +#define BIO_append_filename qcloudssl_BIO_append_filename +#define d2i_X509_REQ_bio qcloudssl_d2i_X509_REQ_bio +#define X509_STORE_CTX_set_error qcloudssl_X509_STORE_CTX_set_error +#define ASN1_OBJECT_free qcloudssl_ASN1_OBJECT_free +#define EVP_aead_aes_128_gcm qcloudssl_EVP_aead_aes_128_gcm +#define EVP_AEAD_key_length qcloudssl_EVP_AEAD_key_length +#define X509_PURPOSE_set qcloudssl_X509_PURPOSE_set +#define CBS_get_u24 qcloudssl_CBS_get_u24 +#define EVP_aead_aes_128_gcm_siv qcloudssl_EVP_aead_aes_128_gcm_siv +#define SSL_get_peer_cert_chain qcloudssl_SSL_get_peer_cert_chain +#define X509V3_get_section qcloudssl_X509V3_get_section +#define SSL_CTX_set_verify_algorithm_prefs qcloudssl_SSL_CTX_set_verify_algorithm_prefs +#define EVP_des_ede3 qcloudssl_EVP_des_ede3 +#define asn1_refcount_dec_and_test_zero qcloudssl_asn1_refcount_dec_and_test_zero +#define SSL_total_renegotiations qcloudssl_SSL_total_renegotiations +#define SSL_CTX_set_msg_callback qcloudssl_SSL_CTX_set_msg_callback +#define CRYPTO_cfb128_encrypt qcloudssl_CRYPTO_cfb128_encrypt +#define EVP_CIPHER_CTX_iv_length qcloudssl_EVP_CIPHER_CTX_iv_length +#define X509_VERIFY_PARAM_get0_name qcloudssl_X509_VERIFY_PARAM_get0_name +#define PEM_read_X509_REQ qcloudssl_PEM_read_X509_REQ +#define BN_get_u64 qcloudssl_BN_get_u64 +#define SSL_CTX_add_client_CA qcloudssl_SSL_CTX_add_client_CA +#define BIO_read qcloudssl_BIO_read +#define ENGINE_load_builtin_engines qcloudssl_ENGINE_load_builtin_engines +#define SSL_alert_desc_string_long qcloudssl_SSL_alert_desc_string_long +#define EVP_CipherInit qcloudssl_EVP_CipherInit +#define i2d_SSL_SESSION_bio qcloudssl_i2d_SSL_SESSION_bio +#define BIO_set_retry_read qcloudssl_BIO_set_retry_read +#define ISSUING_DIST_POINT_new qcloudssl_ISSUING_DIST_POINT_new +#define X509_REQ_get_pubkey qcloudssl_X509_REQ_get_pubkey +#define X509_policy_check qcloudssl_X509_policy_check +#define X509V3_extensions_print qcloudssl_X509V3_extensions_print +#define SSL_get_verify_result qcloudssl_SSL_get_verify_result +#define X509_STORE_set1_param qcloudssl_X509_STORE_set1_param +#define asn1_do_adb qcloudssl_asn1_do_adb +#define ASN1_INTEGER_set qcloudssl_ASN1_INTEGER_set +#define EVP_SignUpdate qcloudssl_EVP_SignUpdate +#define ASN1_PRINTABLESTRING_it qcloudssl_ASN1_PRINTABLESTRING_it +#define X509_get_pubkey qcloudssl_X509_get_pubkey +#define SSL_new qcloudssl_SSL_new +#define PKCS8_PRIV_KEY_INFO_new qcloudssl_PKCS8_PRIV_KEY_INFO_new +#define PKEY_USAGE_PERIOD_it qcloudssl_PKEY_USAGE_PERIOD_it +#define CTR_DRBG_generate qcloudssl_CTR_DRBG_generate +#define x509_rsa_pss_to_ctx qcloudssl_x509_rsa_pss_to_ctx +#define EVP_aead_chacha20_poly1305 qcloudssl_EVP_aead_chacha20_poly1305 +#define EVP_des_cbc qcloudssl_EVP_des_cbc +#define CBB_reserve qcloudssl_CBB_reserve +#define sk_new_null qcloudssl_sk_new_null +#define PEM_write_bio_DHparams qcloudssl_PEM_write_bio_DHparams +#define ASN1_BMPSTRING_free qcloudssl_ASN1_BMPSTRING_free +#define pkcs8_pbe_decrypt qcloudssl_pkcs8_pbe_decrypt +#define AES_set_decrypt_key qcloudssl_AES_set_decrypt_key +#define X509_NAME_ENTRIES_it qcloudssl_X509_NAME_ENTRIES_it +#define EVP_aead_aes_256_cbc_sha1_ssl3 qcloudssl_EVP_aead_aes_256_cbc_sha1_ssl3 +#define RSA_free qcloudssl_RSA_free +#define RAND_set_urandom_fd qcloudssl_RAND_set_urandom_fd +#define ASN1_STRING_type qcloudssl_ASN1_STRING_type +#define SSL_CTX_sess_accept_renegotiate qcloudssl_SSL_CTX_sess_accept_renegotiate +#define PEM_write_bio_ECPrivateKey qcloudssl_PEM_write_bio_ECPrivateKey +#define NCONF_free qcloudssl_NCONF_free +#define d2i_ASN1_OCTET_STRING qcloudssl_d2i_ASN1_OCTET_STRING +#define CBB_init qcloudssl_CBB_init +#define PEM_write_X509_REQ_NEW qcloudssl_PEM_write_X509_REQ_NEW +#define X509_VERIFY_PARAM_set1_ip qcloudssl_X509_VERIFY_PARAM_set1_ip +#define CONF_modules_load_file qcloudssl_CONF_modules_load_file +#define OPENSSL_gmtime_diff qcloudssl_OPENSSL_gmtime_diff +#define SSL_alert_type_string_long qcloudssl_SSL_alert_type_string_long +#define ec_GFp_mont_field_mul qcloudssl_ec_GFp_mont_field_mul +#define PEM_write_bio_X509_CRL qcloudssl_PEM_write_bio_X509_CRL +#define BN_num_bits qcloudssl_BN_num_bits +#define X509at_add1_attr qcloudssl_X509at_add1_attr +#define ISSUING_DIST_POINT_free qcloudssl_ISSUING_DIST_POINT_free +#define SSL_CTX_set_chain_and_key qcloudssl_SSL_CTX_set_chain_and_key +#define AES_CMAC qcloudssl_AES_CMAC +#define i2d_PKCS8PrivateKeyInfo_bio qcloudssl_i2d_PKCS8PrivateKeyInfo_bio +#define EVP_PKEY_assign_RSA qcloudssl_EVP_PKEY_assign_RSA +#define PEM_write_PKCS8PrivateKey_nid qcloudssl_PEM_write_PKCS8PrivateKey_nid +#define X509_VERIFY_PARAM_get0 qcloudssl_X509_VERIFY_PARAM_get0 +#define ASN1_UNIVERSALSTRING_it qcloudssl_ASN1_UNIVERSALSTRING_it +#define EVP_sha384 qcloudssl_EVP_sha384 +#define SSL_get_servername_type qcloudssl_SSL_get_servername_type +#define SSL_CTX_set_cipher_list qcloudssl_SSL_CTX_set_cipher_list +#define PKCS8_marshal_encrypted_private_key qcloudssl_PKCS8_marshal_encrypted_private_key +#define i2d_ASN1_GENERALIZEDTIME qcloudssl_i2d_ASN1_GENERALIZEDTIME +#define DIRECTORYSTRING_new qcloudssl_DIRECTORYSTRING_new +#define PEM_read_bio qcloudssl_PEM_read_bio +#define DH_get0_key qcloudssl_DH_get0_key +#define i2d_EXTENDED_KEY_USAGE qcloudssl_i2d_EXTENDED_KEY_USAGE +#define RC4_set_key qcloudssl_RC4_set_key +#define ERR_remove_state qcloudssl_ERR_remove_state +#define i2d_ASN1_SEQUENCE_ANY qcloudssl_i2d_ASN1_SEQUENCE_ANY +#define ENGINE_set_RSA_method qcloudssl_ENGINE_set_RSA_method +#define EVP_aes_192_ctr qcloudssl_EVP_aes_192_ctr +#define DISPLAYTEXT_new qcloudssl_DISPLAYTEXT_new +#define SSL_set_alpn_protos qcloudssl_SSL_set_alpn_protos +#define SXNET_add_id_ulong qcloudssl_SXNET_add_id_ulong +#define CRYPTO_ghash_init qcloudssl_CRYPTO_ghash_init +#define X509_LOOKUP_shutdown qcloudssl_X509_LOOKUP_shutdown +#define EVP_PKEY_CTX_get_rsa_padding qcloudssl_EVP_PKEY_CTX_get_rsa_padding +#define X509_CRL_it qcloudssl_X509_CRL_it +#define DISPLAYTEXT_free qcloudssl_DISPLAYTEXT_free +#define SSL_CTX_set_signed_cert_timestamp_list qcloudssl_SSL_CTX_set_signed_cert_timestamp_list +#define X509_issuer_name_hash qcloudssl_X509_issuer_name_hash +#define PEM_read_bio_ECPrivateKey qcloudssl_PEM_read_bio_ECPrivateKey +#define PEM_read_bio_RSAPrivateKey qcloudssl_PEM_read_bio_RSAPrivateKey +#define kOpenSSLReasonValuesLen qcloudssl_kOpenSSLReasonValuesLen +#define v3_ocsp_accresp qcloudssl_v3_ocsp_accresp +#define BN_CTX_new qcloudssl_BN_CTX_new +#define i2v_GENERAL_NAME qcloudssl_i2v_GENERAL_NAME +#define EVP_VerifyInit qcloudssl_EVP_VerifyInit +#define SSL_CTX_set_psk_client_callback qcloudssl_SSL_CTX_set_psk_client_callback +#define NETSCAPE_SPKI_new qcloudssl_NETSCAPE_SPKI_new +#define SSL_SESSION_get_id qcloudssl_SSL_SESSION_get_id +#define CMAC_CTX_new qcloudssl_CMAC_CTX_new +#define X509_SIG_new qcloudssl_X509_SIG_new +#define SSL_CTX_set_select_certificate_cb qcloudssl_SSL_CTX_set_select_certificate_cb +#define ASN1_INTEGER_it qcloudssl_ASN1_INTEGER_it +#define ASN1_PRINTABLE_it qcloudssl_ASN1_PRINTABLE_it +#define ASN1_T61STRING_free qcloudssl_ASN1_T61STRING_free +#define SSL_set_read_ahead qcloudssl_SSL_set_read_ahead +#define EVP_PKEY_assign_EC_KEY qcloudssl_EVP_PKEY_assign_EC_KEY +#define i2a_ASN1_ENUMERATED qcloudssl_i2a_ASN1_ENUMERATED +#define DIST_POINT_new qcloudssl_DIST_POINT_new +#define EC_KEY_generate_key qcloudssl_EC_KEY_generate_key +#define SHA384_Init qcloudssl_SHA384_Init +#define CMAC_Final qcloudssl_CMAC_Final +#define EVP_aead_aes_128_cbc_sha1_ssl3 qcloudssl_EVP_aead_aes_128_cbc_sha1_ssl3 +#define BN_MONT_CTX_copy qcloudssl_BN_MONT_CTX_copy +#define X509_PUBKEY_set0_param qcloudssl_X509_PUBKEY_set0_param +#define EVP_EncryptInit qcloudssl_EVP_EncryptInit +#define BN_MONT_CTX_set_locked qcloudssl_BN_MONT_CTX_set_locked +#define EC_POINT_set_to_infinity qcloudssl_EC_POINT_set_to_infinity +#define CBB_add_asn1 qcloudssl_CBB_add_asn1 +#define BIO_set_retry_special qcloudssl_BIO_set_retry_special +#define SSL_free qcloudssl_SSL_free +#define X509_policy_node_get0_policy qcloudssl_X509_policy_node_get0_policy +#define ASN1_TYPE_cmp qcloudssl_ASN1_TYPE_cmp +#define ASN1_ENUMERATED_it qcloudssl_ASN1_ENUMERATED_it +#define RSA_get_ex_new_index qcloudssl_RSA_get_ex_new_index +#define i2d_RSAPrivateKey_fp qcloudssl_i2d_RSAPrivateKey_fp +#define RAND_set_rand_method qcloudssl_RAND_set_rand_method +#define SHA512 qcloudssl_SHA512 +#define i2d_X509_AUX qcloudssl_i2d_X509_AUX +#define BIO_ctrl_pending qcloudssl_BIO_ctrl_pending +#define policy_data_new qcloudssl_policy_data_new +#define X509_CRL_get0_by_serial qcloudssl_X509_CRL_get0_by_serial +#define ASN1_GENERALIZEDTIME_set qcloudssl_ASN1_GENERALIZEDTIME_set +#define X509_EXTENSION_create_by_NID qcloudssl_X509_EXTENSION_create_by_NID +#define X509_NAME_delete_entry qcloudssl_X509_NAME_delete_entry +#define d2i_RSAPublicKey_bio qcloudssl_d2i_RSAPublicKey_bio +#define BUF_strndup qcloudssl_BUF_strndup +#define SSL_set_psk_client_callback qcloudssl_SSL_set_psk_client_callback +#define i2d_EC_PUBKEY qcloudssl_i2d_EC_PUBKEY +#define EVP_PKEY_print_public qcloudssl_EVP_PKEY_print_public +#define SSL_set_tls13_variant qcloudssl_SSL_set_tls13_variant +#define BN_bn2hex qcloudssl_BN_bn2hex +#define d2i_POLICYINFO qcloudssl_d2i_POLICYINFO +#define HMAC_Final qcloudssl_HMAC_Final +#define ASN1_template_free qcloudssl_ASN1_template_free +#define X509_subject_name_hash qcloudssl_X509_subject_name_hash +#define UTF8_getc qcloudssl_UTF8_getc +#define ASN1_UTCTIME_free qcloudssl_ASN1_UTCTIME_free +#define EVP_MD_CTX_copy qcloudssl_EVP_MD_CTX_copy +#define EVP_CIPHER_CTX_get_app_data qcloudssl_EVP_CIPHER_CTX_get_app_data +#define BIO_set_nbio qcloudssl_BIO_set_nbio +#define X509_VERIFY_PARAM_set_purpose qcloudssl_X509_VERIFY_PARAM_set_purpose +#define SSL_CTX_set_info_callback qcloudssl_SSL_CTX_set_info_callback +#define EVP_aes_128_ctr qcloudssl_EVP_aes_128_ctr +#define EVP_PKEY_missing_parameters qcloudssl_EVP_PKEY_missing_parameters +#define X509_EXTENSION_dup qcloudssl_X509_EXTENSION_dup +#define OPENSSL_ia32cap_P qcloudssl_OPENSSL_ia32cap_P +#define X509_SIG_free qcloudssl_X509_SIG_free +#define ASN1_item_ndef_i2d qcloudssl_ASN1_item_ndef_i2d +#define PEM_read_ECPrivateKey qcloudssl_PEM_read_ECPrivateKey +#define NETSCAPE_SPKI_b64_decode qcloudssl_NETSCAPE_SPKI_b64_decode +#define SPAKE2_process_msg qcloudssl_SPAKE2_process_msg +#define v3_akey_id qcloudssl_v3_akey_id +#define d2i_USERNOTICE qcloudssl_d2i_USERNOTICE +#define ed25519_pkey_meth qcloudssl_ed25519_pkey_meth +#define PEM_write_bio_PKCS8PrivateKey qcloudssl_PEM_write_bio_PKCS8PrivateKey +#define CBS_skip qcloudssl_CBS_skip +#define DTLSv1_server_method qcloudssl_DTLSv1_server_method +#define CRYPTO_gcm128_setiv qcloudssl_CRYPTO_gcm128_setiv +#define X509_REQ_set_version qcloudssl_X509_REQ_set_version +#define SSL_CTX_set1_verify_cert_store qcloudssl_SSL_CTX_set1_verify_cert_store +#define PEM_read_PrivateKey qcloudssl_PEM_read_PrivateKey +#define CBS_get_last_u8 qcloudssl_CBS_get_last_u8 +#define pkcs12_key_gen qcloudssl_pkcs12_key_gen +#define d2i_AutoPrivateKey qcloudssl_d2i_AutoPrivateKey +#define ASN1_OCTET_STRING_free qcloudssl_ASN1_OCTET_STRING_free +#define EVP_PKEY_CTX_get_rsa_pss_saltlen qcloudssl_EVP_PKEY_CTX_get_rsa_pss_saltlen +#define BUF_MEM_grow_clean qcloudssl_BUF_MEM_grow_clean +#define SSL_CTX_get_quiet_shutdown qcloudssl_SSL_CTX_get_quiet_shutdown +#define SSL_get_structure_sizes qcloudssl_SSL_get_structure_sizes +#define ERR_pop_to_mark qcloudssl_ERR_pop_to_mark +#define X509_PUBKEY_set qcloudssl_X509_PUBKEY_set +#define OPENSSL_malloc qcloudssl_OPENSSL_malloc +#define X509_PKEY_new qcloudssl_X509_PKEY_new +#define X509_LOOKUP_free qcloudssl_X509_LOOKUP_free +#define DSA_new qcloudssl_DSA_new +#define EVP_DigestInit qcloudssl_EVP_DigestInit +#define PEM_write_bio_PKCS8 qcloudssl_PEM_write_bio_PKCS8 +#define X509_NAME_hash qcloudssl_X509_NAME_hash +#define ASN1_primitive_new qcloudssl_ASN1_primitive_new +#define BIO_set_fd qcloudssl_BIO_set_fd +#define RSA_parse_private_key qcloudssl_RSA_parse_private_key +#define SSL_set1_curves_list qcloudssl_SSL_set1_curves_list +#define CRYPTO_cfb128_8_encrypt qcloudssl_CRYPTO_cfb128_8_encrypt +#define SSL_CTX_set0_verify_cert_store qcloudssl_SSL_CTX_set0_verify_cert_store +#define rsa_asn1_meth qcloudssl_rsa_asn1_meth +#define SSL_use_certificate_file qcloudssl_SSL_use_certificate_file +#define X509_NAME_get_text_by_OBJ qcloudssl_X509_NAME_get_text_by_OBJ +#define SSLeay qcloudssl_SSLeay +#define ASN1_UNIVERSALSTRING_new qcloudssl_ASN1_UNIVERSALSTRING_new +#define CBS_copy_bytes qcloudssl_CBS_copy_bytes +#define d2i_PKCS8_fp qcloudssl_d2i_PKCS8_fp +#define SSL_use_PrivateKey_file qcloudssl_SSL_use_PrivateKey_file +#define CRYPTO_malloc_init qcloudssl_CRYPTO_malloc_init +#define GENERAL_SUBTREE_new qcloudssl_GENERAL_SUBTREE_new +#define PKCS8_decrypt qcloudssl_PKCS8_decrypt +#define PEM_read_bio_X509 qcloudssl_PEM_read_bio_X509 +#define SSL_clear qcloudssl_SSL_clear +#define EVP_md4 qcloudssl_EVP_md4 +#define SSL_set_rfd qcloudssl_SSL_set_rfd +#define SSL_get_extms_support qcloudssl_SSL_get_extms_support +#define X509_EXTENSION_new qcloudssl_X509_EXTENSION_new +#define X25519 qcloudssl_X25519 +#define DSAparams_dup qcloudssl_DSAparams_dup +#define EVP_EncryptFinal_ex qcloudssl_EVP_EncryptFinal_ex +#define d2i_ASN1_BMPSTRING qcloudssl_d2i_ASN1_BMPSTRING +#define EVP_md5_sha1 qcloudssl_EVP_md5_sha1 +#define X509_REQ_print qcloudssl_X509_REQ_print +#define EVP_CIPHER_key_length qcloudssl_EVP_CIPHER_key_length +#define PEM_X509_INFO_read_bio qcloudssl_PEM_X509_INFO_read_bio +#define ERR_print_errors qcloudssl_ERR_print_errors +#define OPENSSL_config qcloudssl_OPENSSL_config +#define SSL_get_srtp_profiles qcloudssl_SSL_get_srtp_profiles +#define X509V3_EXT_print_fp qcloudssl_X509V3_EXT_print_fp +#define SSL_CTX_sess_set_new_cb qcloudssl_SSL_CTX_sess_set_new_cb +#define NETSCAPE_SPKI_b64_encode qcloudssl_NETSCAPE_SPKI_b64_encode +#define d2i_X509_ALGORS qcloudssl_d2i_X509_ALGORS +#define SSL_CTX_get_verify_depth qcloudssl_SSL_CTX_get_verify_depth +#define EVP_cleanup qcloudssl_EVP_cleanup +#define EVP_aes_256_ctr qcloudssl_EVP_aes_256_ctr +#define d2i_PKCS12 qcloudssl_d2i_PKCS12 +#define i2v_ASN1_BIT_STRING qcloudssl_i2v_ASN1_BIT_STRING +#define X509_CRL_get_ext_by_critical qcloudssl_X509_CRL_get_ext_by_critical +#define X509_ALGOR_new qcloudssl_X509_ALGOR_new +#define d2i_EXTENDED_KEY_USAGE qcloudssl_d2i_EXTENDED_KEY_USAGE +#define EVP_CIPHER_CTX_copy qcloudssl_EVP_CIPHER_CTX_copy +#define X509_NAME_ENTRY_create_by_txt qcloudssl_X509_NAME_ENTRY_create_by_txt +#define X509_STORE_get1_certs qcloudssl_X509_STORE_get1_certs +#define ASN1_TYPE_set1 qcloudssl_ASN1_TYPE_set1 +#define X509_REQ_print_fp qcloudssl_X509_REQ_print_fp +#define EVP_PKEY_assign qcloudssl_EVP_PKEY_assign +#define EVP_DecryptUpdate qcloudssl_EVP_DecryptUpdate +#define i2d_DSAPrivateKey_bio qcloudssl_i2d_DSAPrivateKey_bio +#define PEM_write_PrivateKey qcloudssl_PEM_write_PrivateKey +#define EVP_PKEY_set1_DSA qcloudssl_EVP_PKEY_set1_DSA +#define BN_abs_is_word qcloudssl_BN_abs_is_word +#define EVP_MD_block_size qcloudssl_EVP_MD_block_size +#define SSL_set_private_key_method qcloudssl_SSL_set_private_key_method +#define X509_CRL_add0_revoked qcloudssl_X509_CRL_add0_revoked +#define i2d_DIST_POINT qcloudssl_i2d_DIST_POINT +#define X509_REQ_add1_attr_by_NID qcloudssl_X509_REQ_add1_attr_by_NID +#define ASN1_UTCTIME_print qcloudssl_ASN1_UTCTIME_print +#define EC_KEY_get0_group qcloudssl_EC_KEY_get0_group +#define EVP_SignInit_ex qcloudssl_EVP_SignInit_ex +#define SSL_certs_clear qcloudssl_SSL_certs_clear +#define PKCS8_encrypt qcloudssl_PKCS8_encrypt +#define ec_GFp_simple_point_init qcloudssl_ec_GFp_simple_point_init +#define DES_ncbc_encrypt qcloudssl_DES_ncbc_encrypt +#define EVP_PKCS82PKEY qcloudssl_EVP_PKCS82PKEY +#define d2i_DISPLAYTEXT qcloudssl_d2i_DISPLAYTEXT +#define PEM_X509_INFO_write_bio qcloudssl_PEM_X509_INFO_write_bio +#define ASN1_item_unpack qcloudssl_ASN1_item_unpack +#define PEM_read_bio_PKCS8 qcloudssl_PEM_read_bio_PKCS8 +#define EVP_EncodeUpdate qcloudssl_EVP_EncodeUpdate +#define SSL_set_early_data_enabled qcloudssl_SSL_set_early_data_enabled +#define ERR_error_string qcloudssl_ERR_error_string +#define SSL_set_psk_server_callback qcloudssl_SSL_set_psk_server_callback +#define PKCS5_PBKDF2_HMAC_SHA1 qcloudssl_PKCS5_PBKDF2_HMAC_SHA1 +#define NCONF_get_section qcloudssl_NCONF_get_section +#define sk_set_cmp_func qcloudssl_sk_set_cmp_func +#define ASN1_GENERALIZEDTIME_it qcloudssl_ASN1_GENERALIZEDTIME_it +#define X509_CRL_get_ext_by_OBJ qcloudssl_X509_CRL_get_ext_by_OBJ +#define SHA224_Update qcloudssl_SHA224_Update +#define RSA_get0_key qcloudssl_RSA_get0_key +#define X509_issuer_name_cmp qcloudssl_X509_issuer_name_cmp +#define BIO_find_type qcloudssl_BIO_find_type +#define EC_GROUP_cmp qcloudssl_EC_GROUP_cmp +#define SSL_CTX_set_alpn_protos qcloudssl_SSL_CTX_set_alpn_protos +#define SSL_CTX_set_retain_only_sha256_of_client_certs qcloudssl_SSL_CTX_set_retain_only_sha256_of_client_certs +#define d2i_X509_fp qcloudssl_d2i_X509_fp +#define ASN1_OCTET_STRING_new qcloudssl_ASN1_OCTET_STRING_new +#define GENERAL_NAME_set0_othername qcloudssl_GENERAL_NAME_set0_othername +#define POLICYINFO_free qcloudssl_POLICYINFO_free +#define CBS_get_asn1_uint64 qcloudssl_CBS_get_asn1_uint64 +#define d2i_DSA_PUBKEY_fp qcloudssl_d2i_DSA_PUBKEY_fp +#define SSL_CTX_set_signing_algorithm_prefs qcloudssl_SSL_CTX_set_signing_algorithm_prefs +#define EVP_rc2_40_cbc qcloudssl_EVP_rc2_40_cbc +#define X509at_add1_attr_by_NID qcloudssl_X509at_add1_attr_by_NID +#define BIO_int_ctrl qcloudssl_BIO_int_ctrl +#define EVP_md5 qcloudssl_EVP_md5 +#define SSL_enable_ocsp_stapling qcloudssl_SSL_enable_ocsp_stapling +#define ERR_free_strings qcloudssl_ERR_free_strings +#define AUTHORITY_KEYID_new qcloudssl_AUTHORITY_KEYID_new +#define X509_STORE_CTX_get0_current_issuer qcloudssl_X509_STORE_CTX_get0_current_issuer +#define DSA_up_ref qcloudssl_DSA_up_ref +#define X509_NAME_ENTRY_set_object qcloudssl_X509_NAME_ENTRY_set_object +#define EVP_PKEY_CTX_set_rsa_mgf1_md qcloudssl_EVP_PKEY_CTX_set_rsa_mgf1_md +#define X509at_get_attr_count qcloudssl_X509at_get_attr_count +#define X509_time_adj_ex qcloudssl_X509_time_adj_ex +#define DSA_set_ex_data qcloudssl_DSA_set_ex_data +#define SSL_COMP_get_compression_methods qcloudssl_SSL_COMP_get_compression_methods +#define SSL_CTX_get_cert_store qcloudssl_SSL_CTX_get_cert_store +#define GENERAL_NAME_cmp qcloudssl_GENERAL_NAME_cmp +#define SHA1 qcloudssl_SHA1 +#define ERR_load_ERR_strings qcloudssl_ERR_load_ERR_strings +#define ec_GFp_simple_point_finish qcloudssl_ec_GFp_simple_point_finish +#define i2d_PROXY_POLICY qcloudssl_i2d_PROXY_POLICY +#define i2d_X509_CRL qcloudssl_i2d_X509_CRL +#define EVP_PKEY_assign_DSA qcloudssl_EVP_PKEY_assign_DSA +#define SSL_renegotiate_pending qcloudssl_SSL_renegotiate_pending +#define i2d_ISSUING_DIST_POINT qcloudssl_i2d_ISSUING_DIST_POINT +#define BN_mod_exp_mont_consttime qcloudssl_BN_mod_exp_mont_consttime +#define ec_GFp_mont_group_finish qcloudssl_ec_GFp_mont_group_finish +#define BN_new qcloudssl_BN_new +#define EVP_PKEY_CTX_dup qcloudssl_EVP_PKEY_CTX_dup +#define SSL_CTX_set_timeout qcloudssl_SSL_CTX_set_timeout +#define X509_REVOKED_get_ext_by_NID qcloudssl_X509_REVOKED_get_ext_by_NID +#define BIO_pending qcloudssl_BIO_pending +#define X509_alias_set1 qcloudssl_X509_alias_set1 +#define BIO_new_socket qcloudssl_BIO_new_socket +#define UTF8_putc qcloudssl_UTF8_putc +#define c2i_ASN1_BIT_STRING qcloudssl_c2i_ASN1_BIT_STRING +#define EC_GROUP_method_of qcloudssl_EC_GROUP_method_of +#define CBB_add_u24 qcloudssl_CBB_add_u24 +#define d2i_X509_CINF qcloudssl_d2i_X509_CINF +#define d2i_PUBKEY_fp qcloudssl_d2i_PUBKEY_fp +#define PEM_do_header qcloudssl_PEM_do_header +#define SSL_CTX_sess_cb_hits qcloudssl_SSL_CTX_sess_cb_hits +#define PEM_read_PKCS8_PRIV_KEY_INFO qcloudssl_PEM_read_PKCS8_PRIV_KEY_INFO +#define TLSv1_1_server_method qcloudssl_TLSv1_1_server_method +#define X509_it qcloudssl_X509_it +#define SSL_CTX_add_server_custom_ext qcloudssl_SSL_CTX_add_server_custom_ext +#define X509_free qcloudssl_X509_free +#define policy_cache_set_mapping qcloudssl_policy_cache_set_mapping +#define X509_STORE_CTX_set0_param qcloudssl_X509_STORE_CTX_set0_param +#define X509V3_add_value_int qcloudssl_X509V3_add_value_int +#define X509_CRL_match qcloudssl_X509_CRL_match +#define i2d_X509_CERT_AUX qcloudssl_i2d_X509_CERT_AUX +#define BN_sqrt qcloudssl_BN_sqrt +#define MD4_Transform qcloudssl_MD4_Transform +#define BN_nnmod qcloudssl_BN_nnmod +#define i2d_ASN1_PRINTABLE qcloudssl_i2d_ASN1_PRINTABLE +#define RSA_check_fips qcloudssl_RSA_check_fips +#define BN_gcd qcloudssl_BN_gcd +#define SSL_get0_chain_certs qcloudssl_SSL_get0_chain_certs +#define EVP_PKEY_bits qcloudssl_EVP_PKEY_bits +#define CBB_add_u24_length_prefixed qcloudssl_CBB_add_u24_length_prefixed +#define X509_CRL_free qcloudssl_X509_CRL_free +#define BIO_set_ssl qcloudssl_BIO_set_ssl +#define EVP_aes_192_cbc qcloudssl_EVP_aes_192_cbc +#define RSA_is_opaque qcloudssl_RSA_is_opaque +#define PKCS8_PRIV_KEY_INFO_free qcloudssl_PKCS8_PRIV_KEY_INFO_free +#define USERNOTICE_new qcloudssl_USERNOTICE_new +#define X509_REQ_free qcloudssl_X509_REQ_free +#define sk_set qcloudssl_sk_set +#define SSL_CTX_get_extra_chain_certs qcloudssl_SSL_CTX_get_extra_chain_certs +#define X509_NAME_dup qcloudssl_X509_NAME_dup +#define CTR_DRBG_init qcloudssl_CTR_DRBG_init +#define SSL_SESSION_set_time qcloudssl_SSL_SESSION_set_time +#define X509_REVOKED_dup qcloudssl_X509_REVOKED_dup +#define SSL_read qcloudssl_SSL_read +#define X509_REQ_get_extensions qcloudssl_X509_REQ_get_extensions +#define ERR_error_string_n qcloudssl_ERR_error_string_n +#define sk_insert qcloudssl_sk_insert +#define d2i_ASN1_OBJECT qcloudssl_d2i_ASN1_OBJECT +#define PROXY_POLICY_free qcloudssl_PROXY_POLICY_free +#define EVP_PKEY_encrypt_init qcloudssl_EVP_PKEY_encrypt_init +#define ERR_print_errors_fp qcloudssl_ERR_print_errors_fp +#define d2i_ECParameters qcloudssl_d2i_ECParameters +#define sk_value qcloudssl_sk_value +#define X509_add_ext qcloudssl_X509_add_ext +#define c2i_ASN1_OBJECT qcloudssl_c2i_ASN1_OBJECT +#define PEM_X509_INFO_read qcloudssl_PEM_X509_INFO_read +#define SSL_load_client_CA_file qcloudssl_SSL_load_client_CA_file +#define EVP_PKEY_CTX_set0_rsa_oaep_label qcloudssl_EVP_PKEY_CTX_set0_rsa_oaep_label +#define X509_STORE_up_ref qcloudssl_X509_STORE_up_ref +#define ASN1_OCTET_STRING_cmp qcloudssl_ASN1_OCTET_STRING_cmp +#define SSL_set_chain_and_key qcloudssl_SSL_set_chain_and_key +#define X509_SIG_it qcloudssl_X509_SIG_it +#define BN_nnmod_pow2 qcloudssl_BN_nnmod_pow2 +#define BASIC_CONSTRAINTS_new qcloudssl_BASIC_CONSTRAINTS_new +#define ERR_get_error qcloudssl_ERR_get_error +#define X509_STORE_CTX_free qcloudssl_X509_STORE_CTX_free +#define X509_REVOKED_set_serialNumber qcloudssl_X509_REVOKED_set_serialNumber +#define EVP_CIPHER_CTX_key_length qcloudssl_EVP_CIPHER_CTX_key_length +#define BIO_read_asn1 qcloudssl_BIO_read_asn1 +#define RSA_private_key_from_bytes qcloudssl_RSA_private_key_from_bytes +#define CRYPTO_cbc128_encrypt qcloudssl_CRYPTO_cbc128_encrypt +#define CRYPTO_BUFFER_len qcloudssl_CRYPTO_BUFFER_len +#define X509_TRUST_get_trust qcloudssl_X509_TRUST_get_trust +#define DSA_dup_DH qcloudssl_DSA_dup_DH +#define EC_GROUP_get_degree qcloudssl_EC_GROUP_get_degree +#define PEM_read_DSA_PUBKEY qcloudssl_PEM_read_DSA_PUBKEY +#define SSL_CTX_set_psk_server_callback qcloudssl_SSL_CTX_set_psk_server_callback +#define asn1_ex_c2i qcloudssl_asn1_ex_c2i +#define ASN1_VISIBLESTRING_new qcloudssl_ASN1_VISIBLESTRING_new +#define i2d_DSA_PUBKEY_bio qcloudssl_i2d_DSA_PUBKEY_bio +#define ASN1_UTF8STRING_it qcloudssl_ASN1_UTF8STRING_it +#define EVP_aes_128_ofb qcloudssl_EVP_aes_128_ofb +#define ERR_get_error_line qcloudssl_ERR_get_error_line +#define BIO_s_mem qcloudssl_BIO_s_mem +#define SSL_set_retain_only_sha256_of_client_certs qcloudssl_SSL_set_retain_only_sha256_of_client_certs +#define BN_mod_sqr qcloudssl_BN_mod_sqr +#define RSA_sign_pss_mgf1 qcloudssl_RSA_sign_pss_mgf1 +#define EVP_set_buggy_rsa_parser qcloudssl_EVP_set_buggy_rsa_parser +#define CBB_cleanup qcloudssl_CBB_cleanup +#define x25519_ge_tobytes qcloudssl_x25519_ge_tobytes +#define BIO_next qcloudssl_BIO_next +#define ASN1_STRING_TABLE_get qcloudssl_ASN1_STRING_TABLE_get +#define X509_get1_ocsp qcloudssl_X509_get1_ocsp +#define PEM_write_DHparams qcloudssl_PEM_write_DHparams +#define X509_NAME_hash_old qcloudssl_X509_NAME_hash_old +#define CRYPTO_gcm128_encrypt qcloudssl_CRYPTO_gcm128_encrypt +#define v3_sinfo qcloudssl_v3_sinfo +#define POLICY_MAPPING_new qcloudssl_POLICY_MAPPING_new +#define X509_REQ_dup qcloudssl_X509_REQ_dup +#define BN_mod_lshift1 qcloudssl_BN_mod_lshift1 +#define RSA_add_pkcs1_prefix qcloudssl_RSA_add_pkcs1_prefix +#define OPENSSL_strncasecmp qcloudssl_OPENSSL_strncasecmp +#define X509_LOOKUP_hash_dir qcloudssl_X509_LOOKUP_hash_dir +#define SSL_get_ivs qcloudssl_SSL_get_ivs +#define ENGINE_get_ECDSA_method qcloudssl_ENGINE_get_ECDSA_method +#define X509_TRUST_get_count qcloudssl_X509_TRUST_get_count +#define BN_mod_lshift1_quick qcloudssl_BN_mod_lshift1_quick +#define ASN1_TIME_free qcloudssl_ASN1_TIME_free +#define SHA512_Transform qcloudssl_SHA512_Transform +#define SSL_CTX_new qcloudssl_SSL_CTX_new +#define TLS_server_method qcloudssl_TLS_server_method +#define SSL_SESSION_get_ex_data qcloudssl_SSL_SESSION_get_ex_data +#define lh_insert qcloudssl_lh_insert +#define SSL_CTX_get_max_cert_list qcloudssl_SSL_CTX_get_max_cert_list +#define RSA_PSS_PARAMS_new qcloudssl_RSA_PSS_PARAMS_new +#define SSL_get_current_cipher qcloudssl_SSL_get_current_cipher +#define CRYPTO_get_dynlock_create_callback qcloudssl_CRYPTO_get_dynlock_create_callback +#define ECDSA_size qcloudssl_ECDSA_size +#define EVP_aes_128_cbc qcloudssl_EVP_aes_128_cbc +#define SSL_get_default_timeout qcloudssl_SSL_get_default_timeout +#define BN_equal_consttime qcloudssl_BN_equal_consttime +#define EC_POINT_mul qcloudssl_EC_POINT_mul +#define SSL_SESSION_new qcloudssl_SSL_SESSION_new +#define ECDSA_do_verify qcloudssl_ECDSA_do_verify +#define X509_digest qcloudssl_X509_digest +#define SSL_in_init qcloudssl_SSL_in_init +#define X509_LOOKUP_by_fingerprint qcloudssl_X509_LOOKUP_by_fingerprint +#define ASN1_INTEGER_new qcloudssl_ASN1_INTEGER_new +#define X509_CINF_free qcloudssl_X509_CINF_free +#define EVP_DecryptInit qcloudssl_EVP_DecryptInit +#define SSL_set_session qcloudssl_SSL_set_session +#define d2i_X509_CRL_bio qcloudssl_d2i_X509_CRL_bio +#define SSL_add0_chain_cert qcloudssl_SSL_add0_chain_cert +#define BN_bn2le_padded qcloudssl_BN_bn2le_padded +#define PEM_ASN1_read qcloudssl_PEM_ASN1_read +#define SSL_CTX_set_max_cert_list qcloudssl_SSL_CTX_set_max_cert_list +#define CRYPTO_set_ex_data qcloudssl_CRYPTO_set_ex_data +#define CRYPTO_BUFFER_free qcloudssl_CRYPTO_BUFFER_free +#define DIST_POINT_set_dpname qcloudssl_DIST_POINT_set_dpname +#define MD4 qcloudssl_MD4 +#define EVP_DigestSign qcloudssl_EVP_DigestSign +#define OBJ_sn2nid qcloudssl_OBJ_sn2nid +#define X509_EXTENSION_set_object qcloudssl_X509_EXTENSION_set_object +#define X509_CRL_sort qcloudssl_X509_CRL_sort +#define X509_NAME_cmp qcloudssl_X509_NAME_cmp +#define SSL_CTX_free qcloudssl_SSL_CTX_free +#define CBS_get_optional_asn1_bool qcloudssl_CBS_get_optional_asn1_bool +#define MD5 qcloudssl_MD5 +#define X25519_keypair qcloudssl_X25519_keypair +#define EVP_PKEY_id qcloudssl_EVP_PKEY_id +#define EVP_aes_256_ofb qcloudssl_EVP_aes_256_ofb +#define lh_retrieve qcloudssl_lh_retrieve +#define d2i_RSAPublicKey qcloudssl_d2i_RSAPublicKey +#define i2s_ASN1_ENUMERATED_TABLE qcloudssl_i2s_ASN1_ENUMERATED_TABLE +#define d2i_ASN1_UNIVERSALSTRING qcloudssl_d2i_ASN1_UNIVERSALSTRING +#define ERR_peek_error_line_data qcloudssl_ERR_peek_error_line_data +#define x25519_ge_p1p1_to_p3 qcloudssl_x25519_ge_p1p1_to_p3 +#define CBS_get_asn1_element qcloudssl_CBS_get_asn1_element +#define d2i_ISSUING_DIST_POINT qcloudssl_d2i_ISSUING_DIST_POINT +#define BIO_get_mem_data qcloudssl_BIO_get_mem_data +#define CBS_asn1_ber_to_der qcloudssl_CBS_asn1_ber_to_der +#define SSL_CIPHER_get_rfc_name qcloudssl_SSL_CIPHER_get_rfc_name +#define SSL_CTX_set_default_verify_paths qcloudssl_SSL_CTX_set_default_verify_paths +#define X509_CRL_verify qcloudssl_X509_CRL_verify +#define DIST_POINT_NAME_free qcloudssl_DIST_POINT_NAME_free +#define SSL_get_rbio qcloudssl_SSL_get_rbio +#define DH_check_pub_key qcloudssl_DH_check_pub_key +#define EVP_EncryptUpdate qcloudssl_EVP_EncryptUpdate +#define d2i_DSAPrivateKey_fp qcloudssl_d2i_DSAPrivateKey_fp +#define EVP_SignFinal qcloudssl_EVP_SignFinal +#define CBS_get_optional_asn1_octet_string qcloudssl_CBS_get_optional_asn1_octet_string +#define i2d_ASN1_IA5STRING qcloudssl_i2d_ASN1_IA5STRING +#define X509V3_add_value qcloudssl_X509V3_add_value +#define ASN1_STRING_data qcloudssl_ASN1_STRING_data +#define BIO_free_all qcloudssl_BIO_free_all +#define sk_num qcloudssl_sk_num +#define ASN1_item_ex_i2d qcloudssl_ASN1_item_ex_i2d +#define CRYPTO_POLYVAL_finish qcloudssl_CRYPTO_POLYVAL_finish +#define i2d_RSA_PUBKEY_fp qcloudssl_i2d_RSA_PUBKEY_fp +#define RSA_get0_crt_params qcloudssl_RSA_get0_crt_params +#define X509_add1_reject_object qcloudssl_X509_add1_reject_object +#define X509_time_adj qcloudssl_X509_time_adj +#define PEM_read_bio_DSA_PUBKEY qcloudssl_PEM_read_bio_DSA_PUBKEY +#define X509V3_EXT_nconf_nid qcloudssl_X509V3_EXT_nconf_nid +#define X509_REQ_INFO_it qcloudssl_X509_REQ_INFO_it +#define EVP_aes_256_cbc qcloudssl_EVP_aes_256_cbc +#define SSL_set_tls_channel_id_enabled qcloudssl_SSL_set_tls_channel_id_enabled +#define BN_MONT_CTX_free qcloudssl_BN_MONT_CTX_free +#define X509v3_get_ext_by_OBJ qcloudssl_X509v3_get_ext_by_OBJ +#define ERR_peek_error_line qcloudssl_ERR_peek_error_line +#define SSL_CTX_set_client_cert_cb qcloudssl_SSL_CTX_set_client_cert_cb +#define HKDF qcloudssl_HKDF +#define X509_CRL_add1_ext_i2d qcloudssl_X509_CRL_add1_ext_i2d +#define X509_CRL_sign qcloudssl_X509_CRL_sign +#define EVP_PKEY_sign_init qcloudssl_EVP_PKEY_sign_init +#define ec_GFp_simple_field_mul qcloudssl_ec_GFp_simple_field_mul +#define ASN1_TYPE_free qcloudssl_ASN1_TYPE_free +#define X509_REQ_add1_attr_by_txt qcloudssl_X509_REQ_add1_attr_by_txt +#define PEM_write_DSA_PUBKEY qcloudssl_PEM_write_DSA_PUBKEY +#define ASN1_item_digest qcloudssl_ASN1_item_digest +#define X509_REQ_sign qcloudssl_X509_REQ_sign +#define SSL_get_psk_identity qcloudssl_SSL_get_psk_identity +#define SSL_use_psk_identity_hint qcloudssl_SSL_use_psk_identity_hint +#define CBS_get_asn1_implicit_string qcloudssl_CBS_get_asn1_implicit_string +#define DTLSv1_handle_timeout qcloudssl_DTLSv1_handle_timeout +#define BN_parse_asn1_unsigned qcloudssl_BN_parse_asn1_unsigned +#define DTLSv1_2_client_method qcloudssl_DTLSv1_2_client_method +#define BN_copy qcloudssl_BN_copy +#define RAND_poll qcloudssl_RAND_poll +#define X509_VAL_it qcloudssl_X509_VAL_it +#define EVP_Cipher qcloudssl_EVP_Cipher +#define OPENSSL_cleanse qcloudssl_OPENSSL_cleanse +#define SSL_get_curve_name qcloudssl_SSL_get_curve_name +#define SSLv23_method qcloudssl_SSLv23_method +#define X509_NAME_add_entry_by_NID qcloudssl_X509_NAME_add_entry_by_NID +#define X509_REVOKED_it qcloudssl_X509_REVOKED_it +#define d2i_X509 qcloudssl_d2i_X509 +#define X509_subject_name_hash_old qcloudssl_X509_subject_name_hash_old +#define ACCESS_DESCRIPTION_free qcloudssl_ACCESS_DESCRIPTION_free +#define RSA_size qcloudssl_RSA_size +#define HMAC_size qcloudssl_HMAC_size +#define EVP_MD_type qcloudssl_EVP_MD_type +#define dsa_asn1_meth qcloudssl_dsa_asn1_meth +#define rsa_greater_than_pow2 qcloudssl_rsa_greater_than_pow2 +#define i2d_PKCS8_fp qcloudssl_i2d_PKCS8_fp +#define EVP_has_aes_hardware qcloudssl_EVP_has_aes_hardware +#define CONF_modules_free qcloudssl_CONF_modules_free +#define BIO_puts qcloudssl_BIO_puts +#define DES_set_odd_parity qcloudssl_DES_set_odd_parity +#define X509_PURPOSE_get_count qcloudssl_X509_PURPOSE_get_count +#define X509_STORE_set_purpose qcloudssl_X509_STORE_set_purpose +#define CBB_len qcloudssl_CBB_len +#define EVP_CIPHER_CTX_flags qcloudssl_EVP_CIPHER_CTX_flags +#define X509_EXTENSIONS_it qcloudssl_X509_EXTENSIONS_it +#define SSL_set_max_send_fragment qcloudssl_SSL_set_max_send_fragment +#define X509_policy_tree_get0_user_policies qcloudssl_X509_policy_tree_get0_user_policies +#define X509_print_ex_fp qcloudssl_X509_print_ex_fp +#define SSL_get_server_random qcloudssl_SSL_get_server_random +#define ECDSA_SIG_max_len qcloudssl_ECDSA_SIG_max_len +#define x25519_sc_reduce qcloudssl_x25519_sc_reduce +#define i2s_ASN1_ENUMERATED qcloudssl_i2s_ASN1_ENUMERATED +#define lh_free qcloudssl_lh_free +#define X509_set_subject_name qcloudssl_X509_set_subject_name +#define EVP_PKEY_type qcloudssl_EVP_PKEY_type +#define ASN1_item_d2i_fp qcloudssl_ASN1_item_d2i_fp +#define ASN1_ENUMERATED_to_BN qcloudssl_ASN1_ENUMERATED_to_BN +#define X509_INFO_new qcloudssl_X509_INFO_new +#define X509_CRL_cmp qcloudssl_X509_CRL_cmp +#define SSL_set_tmp_rsa_callback qcloudssl_SSL_set_tmp_rsa_callback +#define X509_TRUST_get_by_id qcloudssl_X509_TRUST_get_by_id +#define SSL_CTX_get_channel_id_cb qcloudssl_SSL_CTX_get_channel_id_cb +#define SSL_get_SSL_CTX qcloudssl_SSL_get_SSL_CTX +#define CBB_add_asn1_uint64 qcloudssl_CBB_add_asn1_uint64 +#define X509_subject_name_cmp qcloudssl_X509_subject_name_cmp +#define CRYPTO_set_thread_local qcloudssl_CRYPTO_set_thread_local +#define SSL_CIPHER_standard_name qcloudssl_SSL_CIPHER_standard_name +#define DSA_generate_parameters_ex qcloudssl_DSA_generate_parameters_ex +#define BUF_MEM_free qcloudssl_BUF_MEM_free +#define SHA224 qcloudssl_SHA224 +#define SHA256_Final qcloudssl_SHA256_Final +#define ASN1_STRING_type_new qcloudssl_ASN1_STRING_type_new +#define EC_METHOD_get_field_type qcloudssl_EC_METHOD_get_field_type +#define ASN1_BMPSTRING_it qcloudssl_ASN1_BMPSTRING_it +#define ASN1_ENUMERATED_set qcloudssl_ASN1_ENUMERATED_set +#define BN_bn2bin_padded qcloudssl_BN_bn2bin_padded +#define SSL_export_keying_material qcloudssl_SSL_export_keying_material +#define DIST_POINT_NAME_new qcloudssl_DIST_POINT_NAME_new +#define SSL_SESSION_get_master_key qcloudssl_SSL_SESSION_get_master_key +#define X509_EXTENSION_get_data qcloudssl_X509_EXTENSION_get_data +#define X509_get_default_cert_file qcloudssl_X509_get_default_cert_file +#define SSL_CTX_check_private_key qcloudssl_SSL_CTX_check_private_key +#define bn_sqr_comba8 qcloudssl_bn_sqr_comba8 +#define RAND_egd qcloudssl_RAND_egd +#define d2i_ECPrivateKey_bio qcloudssl_d2i_ECPrivateKey_bio +#define X509_check_ip qcloudssl_X509_check_ip +#define ASN1_SET_ANY_it qcloudssl_ASN1_SET_ANY_it +#define v3_sxnet qcloudssl_v3_sxnet +#define X509_ALGOR_free qcloudssl_X509_ALGOR_free +#define BIO_new_file qcloudssl_BIO_new_file +#define i2d_BASIC_CONSTRAINTS qcloudssl_i2d_BASIC_CONSTRAINTS +#define BN_dup qcloudssl_BN_dup +#define EC_POINT_set_compressed_coordinates_GFp qcloudssl_EC_POINT_set_compressed_coordinates_GFp +#define X509_REQ_get1_email qcloudssl_X509_REQ_get1_email +#define SSL_set_tmp_ecdh qcloudssl_SSL_set_tmp_ecdh +#define X509_NAME_oneline qcloudssl_X509_NAME_oneline +#define d2i_EDIPARTYNAME qcloudssl_d2i_EDIPARTYNAME +#define X509_check_email qcloudssl_X509_check_email +#define i2d_X509_REVOKED qcloudssl_i2d_X509_REVOKED +#define i2d_PUBKEY qcloudssl_i2d_PUBKEY +#define X509_EXTENSION_get_critical qcloudssl_X509_EXTENSION_get_critical +#define SSL_set_cipher_list qcloudssl_SSL_set_cipher_list +#define PEM_read_RSAPublicKey qcloudssl_PEM_read_RSAPublicKey +#define X509_get_ex_data qcloudssl_X509_get_ex_data +#define BN_primality_test qcloudssl_BN_primality_test +#define HKDF_expand qcloudssl_HKDF_expand +#define X509_REVOKED_set_revocationDate qcloudssl_X509_REVOKED_set_revocationDate +#define OPENSSL_built_in_curves qcloudssl_OPENSSL_built_in_curves +#define X509_up_ref qcloudssl_X509_up_ref +#define EVP_AEAD_CTX_new qcloudssl_EVP_AEAD_CTX_new +#define d2i_GENERAL_NAME qcloudssl_d2i_GENERAL_NAME +#define PEM_bytes_read_bio qcloudssl_PEM_bytes_read_bio +#define X509_OBJECT_retrieve_by_subject qcloudssl_X509_OBJECT_retrieve_by_subject +#define X509_STORE_CTX_get1_chain qcloudssl_X509_STORE_CTX_get1_chain +#define CBS_get_any_asn1 qcloudssl_CBS_get_any_asn1 +#define RAND_load_file qcloudssl_RAND_load_file +#define X509_REQ_add1_attr_by_OBJ qcloudssl_X509_REQ_add1_attr_by_OBJ +#define ec_GFp_simple_points_make_affine qcloudssl_ec_GFp_simple_points_make_affine +#define CRYPTO_THREADID_set_pointer qcloudssl_CRYPTO_THREADID_set_pointer +#define X509_get_ext_by_OBJ qcloudssl_X509_get_ext_by_OBJ +#define BN_MONT_CTX_set qcloudssl_BN_MONT_CTX_set +#define X509_VERIFY_PARAM_clear_flags qcloudssl_X509_VERIFY_PARAM_clear_flags +#define EVP_DigestVerify qcloudssl_EVP_DigestVerify +#define d2i_EC_PUBKEY_fp qcloudssl_d2i_EC_PUBKEY_fp +#define EVP_AEAD_nonce_length qcloudssl_EVP_AEAD_nonce_length +#define X509_CERT_AUX_free qcloudssl_X509_CERT_AUX_free +#define NETSCAPE_SPKI_set_pubkey qcloudssl_NETSCAPE_SPKI_set_pubkey +#define BN_enhanced_miller_rabin_primality_test qcloudssl_BN_enhanced_miller_rabin_primality_test +#define EVP_PKEY_cmp qcloudssl_EVP_PKEY_cmp +#define X509_VAL_free qcloudssl_X509_VAL_free +#define sk_new qcloudssl_sk_new +#define RAND_bytes_with_additional_data qcloudssl_RAND_bytes_with_additional_data +#define EVP_PKEY_CTX_get_rsa_mgf1_md qcloudssl_EVP_PKEY_CTX_get_rsa_mgf1_md +#define X509_get_default_private_dir qcloudssl_X509_get_default_private_dir +#define ACCESS_DESCRIPTION_it qcloudssl_ACCESS_DESCRIPTION_it +#define bio_sock_error qcloudssl_bio_sock_error +#define CERTIFICATEPOLICIES_new qcloudssl_CERTIFICATEPOLICIES_new +#define DSA_verify qcloudssl_DSA_verify +#define X509_REVOKED_get_ext_count qcloudssl_X509_REVOKED_get_ext_count +#define SSL_set_state qcloudssl_SSL_set_state +#define RSA_padding_add_PKCS1_type_2 qcloudssl_RSA_padding_add_PKCS1_type_2 +#define FIPS_mode qcloudssl_FIPS_mode +#define RSA_decrypt qcloudssl_RSA_decrypt +#define ASN1_UTCTIME_adj qcloudssl_ASN1_UTCTIME_adj +#define SSL_COMP_get_name qcloudssl_SSL_COMP_get_name +#define i2d_ASN1_ENUMERATED qcloudssl_i2d_ASN1_ENUMERATED +#define X509_NAME_add_entry_by_OBJ qcloudssl_X509_NAME_add_entry_by_OBJ +#define X509_alias_get0 qcloudssl_X509_alias_get0 +#define SSL_get_certificate qcloudssl_SSL_get_certificate +#define ASN1_item_free qcloudssl_ASN1_item_free +#define d2i_PKCS8_bio qcloudssl_d2i_PKCS8_bio +#define SSL_set_shutdown qcloudssl_SSL_set_shutdown +#define bn_expand qcloudssl_bn_expand +#define EVP_MD_CTX_destroy qcloudssl_EVP_MD_CTX_destroy +#define CRYPTO_poly1305_update qcloudssl_CRYPTO_poly1305_update +#define ASN1_SEQUENCE_it qcloudssl_ASN1_SEQUENCE_it +#define SSL_SESSION_set_timeout qcloudssl_SSL_SESSION_set_timeout +#define TLSv1_server_method qcloudssl_TLSv1_server_method +#define METHOD_unref qcloudssl_METHOD_unref +#define d2i_DSA_PUBKEY_bio qcloudssl_d2i_DSA_PUBKEY_bio +#define X509_parse_from_buffer qcloudssl_X509_parse_from_buffer +#define CRYPTO_is_confidential_build qcloudssl_CRYPTO_is_confidential_build +#define BIO_rw_filename qcloudssl_BIO_rw_filename +#define EVP_AEAD_CTX_cleanup qcloudssl_EVP_AEAD_CTX_cleanup +#define X509_PURPOSE_get_by_sname qcloudssl_X509_PURPOSE_get_by_sname +#define PEM_ASN1_write_bio qcloudssl_PEM_ASN1_write_bio +#define PEM_get_EVP_CIPHER_INFO qcloudssl_PEM_get_EVP_CIPHER_INFO +#define name_cmp qcloudssl_name_cmp +#define SSL_CTX_get0_privatekey qcloudssl_SSL_CTX_get0_privatekey +#define X509_signature_dump qcloudssl_X509_signature_dump +#define SSL_use_certificate qcloudssl_SSL_use_certificate +#define ASN1_item_sign_ctx qcloudssl_ASN1_item_sign_ctx +#define BN_sub_word qcloudssl_BN_sub_word +#define lh_doall qcloudssl_lh_doall +#define SSL_reset_early_data_reject qcloudssl_SSL_reset_early_data_reject +#define EVP_CIPHER_CTX_block_size qcloudssl_EVP_CIPHER_CTX_block_size +#define X509_ATTRIBUTE_get0_data qcloudssl_X509_ATTRIBUTE_get0_data +#define ASN1_TIME_set_string qcloudssl_ASN1_TIME_set_string +#define BN_set_bit qcloudssl_BN_set_bit +#define i2a_ACCESS_DESCRIPTION qcloudssl_i2a_ACCESS_DESCRIPTION +#define CRYPTO_refcount_inc qcloudssl_CRYPTO_refcount_inc +#define EVP_CIPHER_flags qcloudssl_EVP_CIPHER_flags +#define X509_VERIFY_PARAM_free qcloudssl_X509_VERIFY_PARAM_free +#define PKCS12_verify_mac qcloudssl_PKCS12_verify_mac +#define X509_STORE_CTX_set_cert qcloudssl_X509_STORE_CTX_set_cert +#define d2i_X509_NAME_ENTRY qcloudssl_d2i_X509_NAME_ENTRY +#define RSA_padding_add_PKCS1_type_1 qcloudssl_RSA_padding_add_PKCS1_type_1 +#define CRYPTO_MUTEX_unlock_write qcloudssl_CRYPTO_MUTEX_unlock_write +#define X509_REQ_sign_ctx qcloudssl_X509_REQ_sign_ctx +#define X509_ATTRIBUTE_get0_object qcloudssl_X509_ATTRIBUTE_get0_object +#define X509_VERIFY_PARAM_get_depth qcloudssl_X509_VERIFY_PARAM_get_depth +#define RSA_sign qcloudssl_RSA_sign +#define bio_clear_socket_error qcloudssl_bio_clear_socket_error +#define X509_STORE_CTX_set_trust qcloudssl_X509_STORE_CTX_set_trust +#define X509_verify_cert_error_string qcloudssl_X509_verify_cert_error_string +#define DES_set_key qcloudssl_DES_set_key +#define BN_rand_range_ex qcloudssl_BN_rand_range_ex +#define RSAPrivateKey_dup qcloudssl_RSAPrivateKey_dup +#define PEM_read_bio_DSAparams qcloudssl_PEM_read_bio_DSAparams +#define BIO_number_read qcloudssl_BIO_number_read +#define i2d_ECDSA_SIG qcloudssl_i2d_ECDSA_SIG +#define PROXY_CERT_INFO_EXTENSION_free qcloudssl_PROXY_CERT_INFO_EXTENSION_free +#define EC_get_builtin_curves qcloudssl_EC_get_builtin_curves +#define ec_GFp_mont_field_sqr qcloudssl_ec_GFp_mont_field_sqr +#define RSA_marshal_public_key qcloudssl_RSA_marshal_public_key +#define X509_STORE_CTX_get_chain qcloudssl_X509_STORE_CTX_get_chain +#define CRYPTO_THREADID_set_callback qcloudssl_CRYPTO_THREADID_set_callback +#define ASN1_STRING_new qcloudssl_ASN1_STRING_new +#define BIO_set_conn_int_port qcloudssl_BIO_set_conn_int_port +#define EDIPARTYNAME_free qcloudssl_EDIPARTYNAME_free +#define SSL_get_options qcloudssl_SSL_get_options +#define PKEY_USAGE_PERIOD_free qcloudssl_PKEY_USAGE_PERIOD_free +#define RSA_set_ex_data qcloudssl_RSA_set_ex_data +#define level_add_node qcloudssl_level_add_node +#define OpenSSL_add_all_ciphers qcloudssl_OpenSSL_add_all_ciphers +#define BIO_vfree qcloudssl_BIO_vfree +#define SSL_get_info_callback qcloudssl_SSL_get_info_callback +#define d2i_PKEY_USAGE_PERIOD qcloudssl_d2i_PKEY_USAGE_PERIOD +#define BIO_method_type qcloudssl_BIO_method_type +#define SSL_set_bio qcloudssl_SSL_set_bio +#define ec_GFp_simple_add qcloudssl_ec_GFp_simple_add +#define SSLv3_client_method qcloudssl_SSLv3_client_method +#define SSL_get_selected_srtp_profile qcloudssl_SSL_get_selected_srtp_profile +#define OTHERNAME_it qcloudssl_OTHERNAME_it +#define ASN1_item_d2i qcloudssl_ASN1_item_d2i +#define BN_usub qcloudssl_BN_usub +#define X509_PURPOSE_get0_name qcloudssl_X509_PURPOSE_get0_name +#define d2i_SXNETID qcloudssl_d2i_SXNETID +#define d2i_GENERAL_NAMES qcloudssl_d2i_GENERAL_NAMES +#define X509_add1_trust_object qcloudssl_X509_add1_trust_object +#define SSL_set_purpose qcloudssl_SSL_set_purpose +#define PEM_read_PUBKEY qcloudssl_PEM_read_PUBKEY +#define SSL_get0_next_proto_negotiated qcloudssl_SSL_get0_next_proto_negotiated +#define X509_REQ_print_ex qcloudssl_X509_REQ_print_ex +#define PEM_read_bio_RSAPublicKey qcloudssl_PEM_read_bio_RSAPublicKey +#define CTR_DRBG_clear qcloudssl_CTR_DRBG_clear +#define bio_ip_and_port_to_socket_and_addr qcloudssl_bio_ip_and_port_to_socket_and_addr +#define lh_doall_arg qcloudssl_lh_doall_arg +#define X509_chain_up_ref qcloudssl_X509_chain_up_ref +#define SHA256_Init qcloudssl_SHA256_Init +#define CBS_contains_zero_byte qcloudssl_CBS_contains_zero_byte +#define X509V3_get_string qcloudssl_X509V3_get_string +#define ASN1_STRING_set_default_mask_asc qcloudssl_ASN1_STRING_set_default_mask_asc +#define d2i_ASN1_IA5STRING qcloudssl_d2i_ASN1_IA5STRING +#define EVP_AEAD_CTX_free qcloudssl_EVP_AEAD_CTX_free +#define POLICY_MAPPINGS_it qcloudssl_POLICY_MAPPINGS_it +#define d2i_ASN1_GENERALIZEDTIME qcloudssl_d2i_ASN1_GENERALIZEDTIME +#define TLSv1_2_client_method qcloudssl_TLSv1_2_client_method +#define X509_policy_tree_level_count qcloudssl_X509_policy_tree_level_count +#define BN_is_zero qcloudssl_BN_is_zero +#define PEM_write_SSL_SESSION qcloudssl_PEM_write_SSL_SESSION +#define SSL_cache_hit qcloudssl_SSL_cache_hit +#define X509_policy_tree_free qcloudssl_X509_policy_tree_free +#define BASIC_CONSTRAINTS_it qcloudssl_BASIC_CONSTRAINTS_it +#define BIO_set_flags qcloudssl_BIO_set_flags +#define X509_ATTRIBUTE_create_by_NID qcloudssl_X509_ATTRIBUTE_create_by_NID +#define BN_set_u64 qcloudssl_BN_set_u64 +#define BN_cmp_word qcloudssl_BN_cmp_word +#define EVP_BytesToKey qcloudssl_EVP_BytesToKey +#define SSL_write qcloudssl_SSL_write +#define X509_STORE_get_by_subject qcloudssl_X509_STORE_get_by_subject +#define ASN1_STRING_to_UTF8 qcloudssl_ASN1_STRING_to_UTF8 +#define BN_GENCB_set qcloudssl_BN_GENCB_set +#define i2d_DSA_SIG qcloudssl_i2d_DSA_SIG +#define ERR_add_error_dataf qcloudssl_ERR_add_error_dataf +#define EVP_aead_aes_256_cbc_sha1_tls qcloudssl_EVP_aead_aes_256_cbc_sha1_tls +#define s2i_ASN1_OCTET_STRING qcloudssl_s2i_ASN1_OCTET_STRING +#define CBS_is_valid_asn1_bitstring qcloudssl_CBS_is_valid_asn1_bitstring +#define ASN1_ENUMERATED_get qcloudssl_ASN1_ENUMERATED_get +#define bn_mul_comba4 qcloudssl_bn_mul_comba4 +#define ASN1_NULL_free qcloudssl_ASN1_NULL_free +#define SHA1_Final qcloudssl_SHA1_Final +#define i2d_ECPrivateKey qcloudssl_i2d_ECPrivateKey +#define RSA_sign_raw qcloudssl_RSA_sign_raw +#define EVP_get_digestbyobj qcloudssl_EVP_get_digestbyobj +#define CRYPTO_cbc128_decrypt qcloudssl_CRYPTO_cbc128_decrypt +#define SSL_set_msg_callback qcloudssl_SSL_set_msg_callback +#define v3_pkey_usage_period qcloudssl_v3_pkey_usage_period +#define SSL_use_RSAPrivateKey_file qcloudssl_SSL_use_RSAPrivateKey_file +#define BN_init qcloudssl_BN_init +#define PEM_write_bio_DSAparams qcloudssl_PEM_write_bio_DSAparams +#define SXNET_get_id_asc qcloudssl_SXNET_get_id_asc +#define i2d_DIRECTORYSTRING qcloudssl_i2d_DIRECTORYSTRING +#define CRYPTO_MUTEX_lock_read qcloudssl_CRYPTO_MUTEX_lock_read +#define ED25519_keypair qcloudssl_ED25519_keypair +#define BN_GENCB_call qcloudssl_BN_GENCB_call +#define GENERAL_NAME_get0_value qcloudssl_GENERAL_NAME_get0_value +#define HMAC_CTX_copy_ex qcloudssl_HMAC_CTX_copy_ex +#define EVP_tls_cbc_copy_mac qcloudssl_EVP_tls_cbc_copy_mac +#define X509_NAME_it qcloudssl_X509_NAME_it +#define X509_VERIFY_PARAM_get_flags qcloudssl_X509_VERIFY_PARAM_get_flags +#define SSL_send_fatal_alert qcloudssl_SSL_send_fatal_alert +#define BN_one qcloudssl_BN_one +#define ERR_clear_error qcloudssl_ERR_clear_error +#define BN_pseudo_rand qcloudssl_BN_pseudo_rand +#define EVP_DigestVerifyFinal qcloudssl_EVP_DigestVerifyFinal +#define X509V3_add_value_uchar qcloudssl_X509V3_add_value_uchar +#define DES_ecb3_encrypt qcloudssl_DES_ecb3_encrypt +#define EC_POINT_oct2point qcloudssl_EC_POINT_oct2point +#define X509_cmp_current_time qcloudssl_X509_cmp_current_time +#define ASN1_ANY_it qcloudssl_ASN1_ANY_it +#define SSL_get_client_CA_list qcloudssl_SSL_get_client_CA_list +#define X509_check_issued qcloudssl_X509_check_issued +#define BN_bn2cbb_padded qcloudssl_BN_bn2cbb_padded +#define ASN1_TYPE_new qcloudssl_ASN1_TYPE_new +#define v3_bcons qcloudssl_v3_bcons +#define ASN1_GENERALSTRING_it qcloudssl_ASN1_GENERALSTRING_it +#define SPAKE2_generate_msg qcloudssl_SPAKE2_generate_msg +#define SSL_CTX_set_default_passwd_cb qcloudssl_SSL_CTX_set_default_passwd_cb +#define d2i_PKCS8_PRIV_KEY_INFO qcloudssl_d2i_PKCS8_PRIV_KEY_INFO +#define SSL_CTX_get_keylog_callback qcloudssl_SSL_CTX_get_keylog_callback +#define EVP_aead_des_ede3_cbc_sha1_ssl3 qcloudssl_EVP_aead_des_ede3_cbc_sha1_ssl3 +#define PKEY_USAGE_PERIOD_new qcloudssl_PKEY_USAGE_PERIOD_new +#define ASN1_TIME_adj qcloudssl_ASN1_TIME_adj +#define EC_KEY_parse_parameters qcloudssl_EC_KEY_parse_parameters +#define i2v_GENERAL_NAMES qcloudssl_i2v_GENERAL_NAMES +#define X509V3_string_free qcloudssl_X509V3_string_free +#define ECDSA_sign_ex qcloudssl_ECDSA_sign_ex +#define ec_GFp_simple_group_init qcloudssl_ec_GFp_simple_group_init +#define EVP_des_ede3_cbc qcloudssl_EVP_des_ede3_cbc +#define X509_VERIFY_PARAM_set1_email qcloudssl_X509_VERIFY_PARAM_set1_email +#define MD4_Init qcloudssl_MD4_Init +#define OpenSSL_add_all_digests qcloudssl_OpenSSL_add_all_digests +#define AES_encrypt qcloudssl_AES_encrypt +#define i2d_X509_NAME_ENTRY qcloudssl_i2d_X509_NAME_ENTRY +#define X509_STORE_CTX_set_default qcloudssl_X509_STORE_CTX_set_default +#define DH_get_ex_new_index qcloudssl_DH_get_ex_new_index +#define ASN1_item_sign qcloudssl_ASN1_item_sign +#define bn_correct_top qcloudssl_bn_correct_top +#define EVP_PKEY_decrypt_init qcloudssl_EVP_PKEY_decrypt_init +#define OBJ_cmp qcloudssl_OBJ_cmp +#define SSL_get_ex_data qcloudssl_SSL_get_ex_data +#define BIO_callback_ctrl qcloudssl_BIO_callback_ctrl +#define BIO_new_bio_pair qcloudssl_BIO_new_bio_pair +#define X509_CRL_print_fp qcloudssl_X509_CRL_print_fp +#define ASN1_item_ex_d2i qcloudssl_ASN1_item_ex_d2i +#define SSL_CTX_set_trust qcloudssl_SSL_CTX_set_trust +#define i2d_ASN1_OCTET_STRING qcloudssl_i2d_ASN1_OCTET_STRING +#define SSL_get_privatekey qcloudssl_SSL_get_privatekey +#define BN_mod_add_quick qcloudssl_BN_mod_add_quick +#define x25519_ge_add qcloudssl_x25519_ge_add +#define SSL_CTX_set_cert_store qcloudssl_SSL_CTX_set_cert_store +#define X509_cmp_time qcloudssl_X509_cmp_time +#define EVP_DigestVerifyUpdate qcloudssl_EVP_DigestVerifyUpdate +#define SHA512_Update qcloudssl_SHA512_Update +#define ASN1_mbstring_copy qcloudssl_ASN1_mbstring_copy +#define ERR_peek_last_error_line_data qcloudssl_ERR_peek_last_error_line_data +#define DIST_POINT_it qcloudssl_DIST_POINT_it +#define DISPLAYTEXT_it qcloudssl_DISPLAYTEXT_it +#define ASN1_GENERALSTRING_free qcloudssl_ASN1_GENERALSTRING_free +#define i2d_X509_CRL_INFO qcloudssl_i2d_X509_CRL_INFO +#define SSL_CTX_sessions qcloudssl_SSL_CTX_sessions +#define CRYPTO_sysrand qcloudssl_CRYPTO_sysrand +#define ec_GFp_simple_make_affine qcloudssl_ec_GFp_simple_make_affine +#define ec_GFp_simple_set_compressed_coordinates qcloudssl_ec_GFp_simple_set_compressed_coordinates +#define CRL_DIST_POINTS_new qcloudssl_CRL_DIST_POINTS_new +#define CBS_get_u24_length_prefixed qcloudssl_CBS_get_u24_length_prefixed +#define ASN1_OBJECT_it qcloudssl_ASN1_OBJECT_it +#define SSL_CTX_cipher_in_group qcloudssl_SSL_CTX_cipher_in_group +#define CBB_zero qcloudssl_CBB_zero +#define SXNET_get_id_INTEGER qcloudssl_SXNET_get_id_INTEGER +#define ec_GFp_simple_point_copy qcloudssl_ec_GFp_simple_point_copy +#define X509_get_default_cert_area qcloudssl_X509_get_default_cert_area +#define i2c_ASN1_BIT_STRING qcloudssl_i2c_ASN1_BIT_STRING +#define asn1_set_choice_selector qcloudssl_asn1_set_choice_selector +#define X509V3_EXT_val_prn qcloudssl_X509V3_EXT_val_prn +#define RSA_public_key_to_bytes qcloudssl_RSA_public_key_to_bytes +#define MD4_Update qcloudssl_MD4_Update +#define BN_sqr qcloudssl_BN_sqr +#define X509_CRL_set_nextUpdate qcloudssl_X509_CRL_set_nextUpdate +#define X509_set_pubkey qcloudssl_X509_set_pubkey +#define EVP_AEAD_CTX_seal qcloudssl_EVP_AEAD_CTX_seal +#define X509_REQ_set_pubkey qcloudssl_X509_REQ_set_pubkey +#define SHA256 qcloudssl_SHA256 +#define SSL_CTX_use_PrivateKey_file qcloudssl_SSL_CTX_use_PrivateKey_file +#define v2i_GENERAL_NAMES qcloudssl_v2i_GENERAL_NAMES +#define i2d_SSL_SESSION qcloudssl_i2d_SSL_SESSION +#define X509_VERIFY_PARAM_set1_policies qcloudssl_X509_VERIFY_PARAM_set1_policies +#define SSL_session_reused qcloudssl_SSL_session_reused +#define CRYPTO_set_add_lock_callback qcloudssl_CRYPTO_set_add_lock_callback +#define X509_STORE_CTX_set_chain qcloudssl_X509_STORE_CTX_set_chain +#define PKCS8_pkey_get0 qcloudssl_PKCS8_pkey_get0 +#define EVP_PKEY_copy_parameters qcloudssl_EVP_PKEY_copy_parameters +#define EVP_PKEY_CTX_set_rsa_keygen_bits qcloudssl_EVP_PKEY_CTX_set_rsa_keygen_bits +#define sk_dup qcloudssl_sk_dup +#define policy_node_match qcloudssl_policy_node_match +#define ECDSA_SIG_new qcloudssl_ECDSA_SIG_new +#define EVP_MD_CTX_type qcloudssl_EVP_MD_CTX_type +#define SSL_CTX_sess_timeouts qcloudssl_SSL_CTX_sess_timeouts +#define EVP_tls_cbc_digest_record qcloudssl_EVP_tls_cbc_digest_record +#define USERNOTICE_free qcloudssl_USERNOTICE_free +#define CBB_init_fixed qcloudssl_CBB_init_fixed +#define BIO_ptr_ctrl qcloudssl_BIO_ptr_ctrl +#define SSL_set_max_cert_list qcloudssl_SSL_set_max_cert_list +#define SSL_get_write_sequence qcloudssl_SSL_get_write_sequence +#define AES_ofb128_encrypt qcloudssl_AES_ofb128_encrypt +#define SSL_get_psk_identity_hint qcloudssl_SSL_get_psk_identity_hint +#define BN_mod_lshift_quick qcloudssl_BN_mod_lshift_quick +#define RSA_padding_add_none qcloudssl_RSA_padding_add_none +#define CRYPTO_set_dynlock_create_callback qcloudssl_CRYPTO_set_dynlock_create_callback +#define EVP_AEAD_CTX_zero qcloudssl_EVP_AEAD_CTX_zero +#define SSL_CTX_add1_chain_cert qcloudssl_SSL_CTX_add1_chain_cert +#define BN_mod_exp_mont_word qcloudssl_BN_mod_exp_mont_word +#define EVP_parse_digest_algorithm qcloudssl_EVP_parse_digest_algorithm +#define RSA_check_key qcloudssl_RSA_check_key +#define X509_VERIFY_PARAM_inherit qcloudssl_X509_VERIFY_PARAM_inherit +#define bio_fd_should_retry qcloudssl_bio_fd_should_retry +#define v3_info qcloudssl_v3_info +#define X509_TRUST_set_default qcloudssl_X509_TRUST_set_default +#define X509V3_EXT_get_nid qcloudssl_X509V3_EXT_get_nid +#define DTLS_with_buffers_method qcloudssl_DTLS_with_buffers_method +#define ERR_peek_last_error_line qcloudssl_ERR_peek_last_error_line +#define RSA_new_method qcloudssl_RSA_new_method +#define d2i_DSAPrivateKey qcloudssl_d2i_DSAPrivateKey +#define BIO_snprintf qcloudssl_BIO_snprintf +#define EVP_PKEY_derive_set_peer qcloudssl_EVP_PKEY_derive_set_peer +#define RSA_new qcloudssl_RSA_new +#define EVP_PKEY_get0_DSA qcloudssl_EVP_PKEY_get0_DSA +#define EVP_PKEY2PKCS8 qcloudssl_EVP_PKEY2PKCS8 +#define SSL_CTX_sess_misses qcloudssl_SSL_CTX_sess_misses +#define PEM_write_PUBKEY qcloudssl_PEM_write_PUBKEY +#define sk_shift qcloudssl_sk_shift +#define x25519_ge_p1p1_to_p2 qcloudssl_x25519_ge_p1p1_to_p2 +#define X509_get0_extensions qcloudssl_X509_get0_extensions +#define i2d_EC_PUBKEY_bio qcloudssl_i2d_EC_PUBKEY_bio +#define BN_is_word qcloudssl_BN_is_word +#define SSL_is_init_finished qcloudssl_SSL_is_init_finished +#define EVP_aead_des_ede3_cbc_sha1_tls qcloudssl_EVP_aead_des_ede3_cbc_sha1_tls +#define BN_is_one qcloudssl_BN_is_one +#define SSL_state qcloudssl_SSL_state +#define SSL_get_curve_id qcloudssl_SSL_get_curve_id +#define ASN1_VISIBLESTRING_free qcloudssl_ASN1_VISIBLESTRING_free +#define BN_mask_bits qcloudssl_BN_mask_bits +#define ASN1_UTCTIME_set qcloudssl_ASN1_UTCTIME_set +#define rsa_default_sign_raw qcloudssl_rsa_default_sign_raw +#define ASN1_NULL_it qcloudssl_ASN1_NULL_it +#define EC_KEY_check_fips qcloudssl_EC_KEY_check_fips +#define GENERAL_NAME_free qcloudssl_GENERAL_NAME_free +#define CRYPTO_ctr128_encrypt qcloudssl_CRYPTO_ctr128_encrypt +#define CRYPTO_POLYVAL_init qcloudssl_CRYPTO_POLYVAL_init +#define CRYPTO_chacha_20 qcloudssl_CRYPTO_chacha_20 +#define BN_set_negative qcloudssl_BN_set_negative +#define policy_data_free qcloudssl_policy_data_free +#define X509_get_default_cert_dir qcloudssl_X509_get_default_cert_dir +#define SSLv23_client_method qcloudssl_SSLv23_client_method +#define ASN1_PRINTABLE_new qcloudssl_ASN1_PRINTABLE_new +#define DH_free qcloudssl_DH_free +#define SSL_CTX_set_tmp_rsa_callback qcloudssl_SSL_CTX_set_tmp_rsa_callback +#define SSL_CTX_sess_get_get_cb qcloudssl_SSL_CTX_sess_get_get_cb +#define DH_compute_key qcloudssl_DH_compute_key +#define OBJ_nid2cbb qcloudssl_OBJ_nid2cbb +#define CRYPTO_set_id_callback qcloudssl_CRYPTO_set_id_callback +#define ASN1_INTEGER_to_BN qcloudssl_ASN1_INTEGER_to_BN +#define X509_REQ_new qcloudssl_X509_REQ_new +#define d2i_PROXY_POLICY qcloudssl_d2i_PROXY_POLICY +#define SSL_CTX_need_tmp_RSA qcloudssl_SSL_CTX_need_tmp_RSA +#define PEM_read qcloudssl_PEM_read +#define X509_NAME_ENTRY_get_data qcloudssl_X509_NAME_ENTRY_get_data +#define ASN1_template_new qcloudssl_ASN1_template_new +#define X509_VERIFY_PARAM_set1_name qcloudssl_X509_VERIFY_PARAM_set1_name +#define OBJ_find_sigid_by_algs qcloudssl_OBJ_find_sigid_by_algs +#define RSA_verify_PKCS1_PSS_mgf1 qcloudssl_RSA_verify_PKCS1_PSS_mgf1 +#define X509_CERT_AUX_new qcloudssl_X509_CERT_AUX_new +#define ec_GFp_simple_group_get_curve qcloudssl_ec_GFp_simple_group_get_curve +#define SSL_CTX_use_PrivateKey_ASN1 qcloudssl_SSL_CTX_use_PrivateKey_ASN1 +#define SSL_max_seal_overhead qcloudssl_SSL_max_seal_overhead +#define BUF_MEM_reserve qcloudssl_BUF_MEM_reserve +#define d2i_NETSCAPE_SPKI qcloudssl_d2i_NETSCAPE_SPKI +#define i2d_PrivateKey_bio qcloudssl_i2d_PrivateKey_bio +#define ec_GFp_simple_group_set_curve qcloudssl_ec_GFp_simple_group_set_curve +#define SSL_CTX_use_RSAPrivateKey qcloudssl_SSL_CTX_use_RSAPrivateKey +#define SSLeay_version qcloudssl_SSLeay_version +#define X509_CERT_AUX_print qcloudssl_X509_CERT_AUX_print +#define X509_check_trust qcloudssl_X509_check_trust +#define v3_ns_ia5_list qcloudssl_v3_ns_ia5_list +#define EVP_AEAD_CTX_init_with_direction qcloudssl_EVP_AEAD_CTX_init_with_direction +#define i2d_PKCS8PrivateKey_bio qcloudssl_i2d_PKCS8PrivateKey_bio +#define X509_INFO_free qcloudssl_X509_INFO_free +#define X509v3_get_ext qcloudssl_X509v3_get_ext +#define POLICY_CONSTRAINTS_new qcloudssl_POLICY_CONSTRAINTS_new +#define BN_clear_bit qcloudssl_BN_clear_bit +#define i2d_RSA_PUBKEY qcloudssl_i2d_RSA_PUBKEY +#define bio_socket_nbio qcloudssl_bio_socket_nbio +#define X509_STORE_CTX_get_error_depth qcloudssl_X509_STORE_CTX_get_error_depth +#define NETSCAPE_SPKAC_free qcloudssl_NETSCAPE_SPKAC_free +#define ASN1_BIT_STRING_set qcloudssl_ASN1_BIT_STRING_set +#define MD5_Final qcloudssl_MD5_Final +#define i2d_POLICYINFO qcloudssl_i2d_POLICYINFO +#define RAND_seed qcloudssl_RAND_seed +#define SSL_get_server_tmp_key qcloudssl_SSL_get_server_tmp_key +#define d2i_X509_EXTENSIONS qcloudssl_d2i_X509_EXTENSIONS +#define ASN1_VISIBLESTRING_it qcloudssl_ASN1_VISIBLESTRING_it +#define EVP_PKEY_new_ed25519_public qcloudssl_EVP_PKEY_new_ed25519_public +#define RSA_private_encrypt qcloudssl_RSA_private_encrypt +#define CRYPTO_THREADID_set_numeric qcloudssl_CRYPTO_THREADID_set_numeric +#define x509_digest_verify_init qcloudssl_x509_digest_verify_init +#define SSL_SESSION_to_bytes_for_ticket qcloudssl_SSL_SESSION_to_bytes_for_ticket +#define CBS_strdup qcloudssl_CBS_strdup +#define SSL_SESSION_from_bytes qcloudssl_SSL_SESSION_from_bytes +#define EC_POINT_dup qcloudssl_EC_POINT_dup +#define i2d_X509_ATTRIBUTE qcloudssl_i2d_X509_ATTRIBUTE +#define EVP_CipherInit_ex qcloudssl_EVP_CipherInit_ex +#define NETSCAPE_SPKI_free qcloudssl_NETSCAPE_SPKI_free +#define BN_rand qcloudssl_BN_rand +#define i2d_USERNOTICE qcloudssl_i2d_USERNOTICE +#define SSL_CTX_set_session_cache_mode qcloudssl_SSL_CTX_set_session_cache_mode +#define SSL_get0_signed_cert_timestamp_list qcloudssl_SSL_get0_signed_cert_timestamp_list +#define GENERAL_NAME_print qcloudssl_GENERAL_NAME_print +#define SSL_CTX_get_info_callback qcloudssl_SSL_CTX_get_info_callback +#define d2i_PROXY_CERT_INFO_EXTENSION qcloudssl_d2i_PROXY_CERT_INFO_EXTENSION +#define EVP_AEAD_max_tag_len qcloudssl_EVP_AEAD_max_tag_len +#define X509V3_section_free qcloudssl_X509V3_section_free +#define PEM_write_bio_DSAPrivateKey qcloudssl_PEM_write_bio_DSAPrivateKey +#define SXNET_it qcloudssl_SXNET_it +#define SSL_load_error_strings qcloudssl_SSL_load_error_strings +#define d2i_RSA_PUBKEY qcloudssl_d2i_RSA_PUBKEY +#define SSL_set1_param qcloudssl_SSL_set1_param +#define NCONF_get_string qcloudssl_NCONF_get_string +#define SSL_CIPHER_get_digest_nid qcloudssl_SSL_CIPHER_get_digest_nid +#define v3_pci qcloudssl_v3_pci +#define BUF_strnlen qcloudssl_BUF_strnlen +#define SSL_SESSION_set1_id_context qcloudssl_SSL_SESSION_set1_id_context +#define EVP_AEAD_CTX_init qcloudssl_EVP_AEAD_CTX_init +#define ECDSA_do_sign_ex qcloudssl_ECDSA_do_sign_ex +#define ASN1_mbstring_ncopy qcloudssl_ASN1_mbstring_ncopy +#define CRYPTO_BUFFER_up_ref qcloudssl_CRYPTO_BUFFER_up_ref +#define EVP_CIPHER_CTX_init qcloudssl_EVP_CIPHER_CTX_init +#define BIO_pop qcloudssl_BIO_pop +#define OBJ_nid2sn qcloudssl_OBJ_nid2sn +#define X509_gmtime_adj qcloudssl_X509_gmtime_adj +#define BN_clear qcloudssl_BN_clear +#define ASN1_STRING_print_ex qcloudssl_ASN1_STRING_print_ex +#define CERTIFICATEPOLICIES_free qcloudssl_CERTIFICATEPOLICIES_free +#define SSL_get_tls_unique qcloudssl_SSL_get_tls_unique +#define EVP_marshal_private_key qcloudssl_EVP_marshal_private_key +#define EVP_PKEY_verify_recover_init qcloudssl_EVP_PKEY_verify_recover_init +#define BUF_strlcpy qcloudssl_BUF_strlcpy +#define RSA_parse_public_key_buggy qcloudssl_RSA_parse_public_key_buggy +#define ASN1_BMPSTRING_new qcloudssl_ASN1_BMPSTRING_new +#define X509_CRL_get_ext_d2i qcloudssl_X509_CRL_get_ext_d2i +#define SSL_CIPHER_get_bits qcloudssl_SSL_CIPHER_get_bits +#define SHA384_Final qcloudssl_SHA384_Final +#define BN_mod_lshift qcloudssl_BN_mod_lshift +#define SSL_set1_tls_channel_id qcloudssl_SSL_set1_tls_channel_id +#define CRL_DIST_POINTS_free qcloudssl_CRL_DIST_POINTS_free +#define CRYPTO_BUFFER_POOL_free qcloudssl_CRYPTO_BUFFER_POOL_free +#define X509_STORE_set_default_paths qcloudssl_X509_STORE_set_default_paths +#define EDIPARTYNAME_it qcloudssl_EDIPARTYNAME_it +#define X509_get1_email qcloudssl_X509_get1_email +#define SSL_CTX_use_certificate qcloudssl_SSL_CTX_use_certificate +#define X509_STORE_CTX_get0_current_crl qcloudssl_X509_STORE_CTX_get0_current_crl +#define SSL_CTX_sess_accept_good qcloudssl_SSL_CTX_sess_accept_good +#define ec_GFp_mont_group_copy qcloudssl_ec_GFp_mont_group_copy +#define SSL_set1_curves qcloudssl_SSL_set1_curves +#define SSL_CTX_set_ticket_aead_method qcloudssl_SSL_CTX_set_ticket_aead_method +#define X509_EXTENSION_set_data qcloudssl_X509_EXTENSION_set_data +#define BN_from_montgomery qcloudssl_BN_from_montgomery +#define RSA_public_key_from_bytes qcloudssl_RSA_public_key_from_bytes +#define PEM_write_PKCS8 qcloudssl_PEM_write_PKCS8 +#define EVP_PKEY_CTX_set_signature_md qcloudssl_EVP_PKEY_CTX_set_signature_md +#define X509_VERIFY_PARAM_table_cleanup qcloudssl_X509_VERIFY_PARAM_table_cleanup +#define BIO_new_fp qcloudssl_BIO_new_fp +#define ED25519_verify qcloudssl_ED25519_verify +#define i2c_ASN1_INTEGER qcloudssl_i2c_ASN1_INTEGER +#define X509_CRL_set_version qcloudssl_X509_CRL_set_version +#define SSL_set_verify_result qcloudssl_SSL_set_verify_result +#define X509_get_ext_count qcloudssl_X509_get_ext_count +#define X509V3_add_standard_extensions qcloudssl_X509V3_add_standard_extensions +#define SSL_get_ex_new_index qcloudssl_SSL_get_ex_new_index +#define EC_KEY_new_by_curve_name qcloudssl_EC_KEY_new_by_curve_name +#define SSL_library_init qcloudssl_SSL_library_init +#define asn1_refcount_set_one qcloudssl_asn1_refcount_set_one +#define EVP_PKEY_get0_DH qcloudssl_EVP_PKEY_get0_DH +#define SSL_get1_session qcloudssl_SSL_get1_session +#define EVP_des_ede_cbc qcloudssl_EVP_des_ede_cbc +#define SSL_set_client_CA_list qcloudssl_SSL_set_client_CA_list +#define CRYPTO_MUTEX_unlock_read qcloudssl_CRYPTO_MUTEX_unlock_read +#define ASN1_STRING_print qcloudssl_ASN1_STRING_print +#define SSL_CTX_set_quiet_shutdown qcloudssl_SSL_CTX_set_quiet_shutdown +#define i2d_RSAPrivateKey qcloudssl_i2d_RSAPrivateKey +#define i2d_X509_ALGOR qcloudssl_i2d_X509_ALGOR +#define X509_VERIFY_PARAM_set_time qcloudssl_X509_VERIFY_PARAM_set_time +#define X509_NAME_new qcloudssl_X509_NAME_new +#define BIO_get_fp qcloudssl_BIO_get_fp +#define i2d_CERTIFICATEPOLICIES qcloudssl_i2d_CERTIFICATEPOLICIES +#define SSL_get_finished qcloudssl_SSL_get_finished +#define SSL_COMP_add_compression_method qcloudssl_SSL_COMP_add_compression_method +#define ASN1_item_i2d_bio qcloudssl_ASN1_item_i2d_bio +#define EXTENDED_KEY_USAGE_it qcloudssl_EXTENDED_KEY_USAGE_it +#define GENERAL_NAMES_free qcloudssl_GENERAL_NAMES_free +#define AES_ecb_encrypt qcloudssl_AES_ecb_encrypt +#define X509_set_issuer_name qcloudssl_X509_set_issuer_name +#define X509_EXTENSION_free qcloudssl_X509_EXTENSION_free +#define ASN1_STRING_set_default_mask qcloudssl_ASN1_STRING_set_default_mask +#define ASN1_TYPE_set qcloudssl_ASN1_TYPE_set +#define SSL_CTX_set_cert_cb qcloudssl_SSL_CTX_set_cert_cb +#define SSL_get_key_block_len qcloudssl_SSL_get_key_block_len +#define RSA_private_decrypt qcloudssl_RSA_private_decrypt +#define ASN1_UTCTIME_it qcloudssl_ASN1_UTCTIME_it +#define ASN1_BIT_STRING_get_bit qcloudssl_ASN1_BIT_STRING_get_bit +#define a2i_GENERAL_NAME qcloudssl_a2i_GENERAL_NAME +#define CBS_mem_equal qcloudssl_CBS_mem_equal +#define X509_LOOKUP_by_alias qcloudssl_X509_LOOKUP_by_alias +#define X509_REQ_to_X509 qcloudssl_X509_REQ_to_X509 +#define ec_group_get_order_mont qcloudssl_ec_group_get_order_mont +#define d2i_PKCS12_bio qcloudssl_d2i_PKCS12_bio +#define PEM_read_bio_DHparams qcloudssl_PEM_read_bio_DHparams +#define SSL_accept qcloudssl_SSL_accept +#define ASN1_GENERALIZEDTIME_new qcloudssl_ASN1_GENERALIZEDTIME_new +#define lh_new qcloudssl_lh_new +#define ASN1_item_verify qcloudssl_ASN1_item_verify +#define X509_CRL_get_ext_by_NID qcloudssl_X509_CRL_get_ext_by_NID +#define v3_inhibit_anyp qcloudssl_v3_inhibit_anyp +#define rsa_default_size qcloudssl_rsa_default_size +#define SSL_CTX_clear_options qcloudssl_SSL_CTX_clear_options +#define X509_STORE_CTX_get0_parent_ctx qcloudssl_X509_STORE_CTX_get0_parent_ctx +#define ASN1_generate_v3 qcloudssl_ASN1_generate_v3 +#define d2i_AUTHORITY_INFO_ACCESS qcloudssl_d2i_AUTHORITY_INFO_ACCESS +#define RSA_padding_add_PKCS1_PSS_mgf1 qcloudssl_RSA_padding_add_PKCS1_PSS_mgf1 +#define RAND_enable_fork_unsafe_buffering qcloudssl_RAND_enable_fork_unsafe_buffering +#define policy_cache_find_data qcloudssl_policy_cache_find_data +#define EVP_EncodeInit qcloudssl_EVP_EncodeInit +#define SSL_SESSION_get_ex_new_index qcloudssl_SSL_SESSION_get_ex_new_index +#define d2i_X509_ALGOR qcloudssl_d2i_X509_ALGOR +#define ASN1_tag2str qcloudssl_ASN1_tag2str +#define X509_STORE_CTX_get0_store qcloudssl_X509_STORE_CTX_get0_store +#define BN_mod_word qcloudssl_BN_mod_word +#define x25519_ge_scalarmult qcloudssl_x25519_ge_scalarmult +#define SSL_get_fd qcloudssl_SSL_get_fd +#define i2d_X509_EXTENSIONS qcloudssl_i2d_X509_EXTENSIONS +#define X509v3_get_ext_count qcloudssl_X509v3_get_ext_count +#define EC_POINT_is_on_curve qcloudssl_EC_POINT_is_on_curve +#define x25519_ge_scalarmult_small_precomp qcloudssl_x25519_ge_scalarmult_small_precomp +#define BN_CTX_get qcloudssl_BN_CTX_get +#define SSL_set_min_proto_version qcloudssl_SSL_set_min_proto_version +#define X509at_get_attr qcloudssl_X509at_get_attr +#define SSL_get_read_sequence qcloudssl_SSL_get_read_sequence +#define X509_VERIFY_PARAM_set1_host qcloudssl_X509_VERIFY_PARAM_set1_host +#define X509_PURPOSE_get_id qcloudssl_X509_PURPOSE_get_id +#define ec_GFp_simple_point_set_to_infinity qcloudssl_ec_GFp_simple_point_set_to_infinity +#define RAND_SSLeay qcloudssl_RAND_SSLeay +#define X509_REQ_set_extension_nids qcloudssl_X509_REQ_set_extension_nids +#define BIO_clear_retry_flags qcloudssl_BIO_clear_retry_flags +#define X509_REQ_get_extension_nids qcloudssl_X509_REQ_get_extension_nids +#define X509_TRUST_get0_name qcloudssl_X509_TRUST_get0_name +#define CRYPTO_num_locks qcloudssl_CRYPTO_num_locks +#define BN_get_word qcloudssl_BN_get_word +#define ASN1_GENERALIZEDTIME_check qcloudssl_ASN1_GENERALIZEDTIME_check +#define SSL_CTX_flush_sessions qcloudssl_SSL_CTX_flush_sessions +#define X509_CRL_set_issuer_name qcloudssl_X509_CRL_set_issuer_name +#define X509_ALGOR_set0 qcloudssl_X509_ALGOR_set0 +#define X509_issuer_and_serial_hash qcloudssl_X509_issuer_and_serial_hash +#define SSL_CTX_set_keylog_callback qcloudssl_SSL_CTX_set_keylog_callback +#define SHA224_Final qcloudssl_SHA224_Final +#define BN_mod_inverse_odd qcloudssl_BN_mod_inverse_odd +#define X509_NAME_free qcloudssl_X509_NAME_free +#define SSL_SESSION_free qcloudssl_SSL_SESSION_free +#define s2i_ASN1_INTEGER qcloudssl_s2i_ASN1_INTEGER +#define CRYPTO_BUFFER_init_CBS qcloudssl_CRYPTO_BUFFER_init_CBS +#define SXNET_add_id_asc qcloudssl_SXNET_add_id_asc +#define X509_VERIFY_PARAM_add0_table qcloudssl_X509_VERIFY_PARAM_add0_table +#define EVP_PKEY_verify_init qcloudssl_EVP_PKEY_verify_init +#define rsa_default_private_transform qcloudssl_rsa_default_private_transform +#define HMAC_Init qcloudssl_HMAC_Init +#define X509_NAME_get_entry qcloudssl_X509_NAME_get_entry +#define EVP_aead_aes_256_gcm_siv qcloudssl_EVP_aead_aes_256_gcm_siv +#define SSL_CTX_use_psk_identity_hint qcloudssl_SSL_CTX_use_psk_identity_hint +#define OTHERNAME_free qcloudssl_OTHERNAME_free +#define ERR_reason_error_string qcloudssl_ERR_reason_error_string +#define CRYPTO_set_dynlock_destroy_callback qcloudssl_CRYPTO_set_dynlock_destroy_callback +#define rand_fork_unsafe_buffering_enabled qcloudssl_rand_fork_unsafe_buffering_enabled +#define TLSv1_1_method qcloudssl_TLSv1_1_method +#define CBB_data qcloudssl_CBB_data +#define X509_CRL_get_meth_data qcloudssl_X509_CRL_get_meth_data +#define X509at_get_attr_by_NID qcloudssl_X509at_get_attr_by_NID +#define X509V3_get_value_int qcloudssl_X509V3_get_value_int +#define X509_ALGOR_it qcloudssl_X509_ALGOR_it +#define EVP_PKEY_get1_DSA qcloudssl_EVP_PKEY_get1_DSA +#define SSL_get0_session_id_context qcloudssl_SSL_get0_session_id_context +#define v3_cpols qcloudssl_v3_cpols +#define EVP_DigestFinal qcloudssl_EVP_DigestFinal +#define SSL_CTX_set_verify_depth qcloudssl_SSL_CTX_set_verify_depth +#define X509v3_add_ext qcloudssl_X509v3_add_ext +#define ENGINE_get_RSA_method qcloudssl_ENGINE_get_RSA_method +#define CRYPTO_BUFFER_new_from_CBS qcloudssl_CRYPTO_BUFFER_new_from_CBS +#define AUTHORITY_KEYID_it qcloudssl_AUTHORITY_KEYID_it +#define X509_ATTRIBUTE_set1_data qcloudssl_X509_ATTRIBUTE_set1_data +#define X509_LOOKUP_by_issuer_serial qcloudssl_X509_LOOKUP_by_issuer_serial +#define SSL_CTX_use_RSAPrivateKey_file qcloudssl_SSL_CTX_use_RSAPrivateKey_file +#define X509_CRL_set_lastUpdate qcloudssl_X509_CRL_set_lastUpdate +#define EVP_CIPHER_CTX_nid qcloudssl_EVP_CIPHER_CTX_nid +#define GENERAL_NAME_dup qcloudssl_GENERAL_NAME_dup +#define PKCS5_pbe2_decrypt_init qcloudssl_PKCS5_pbe2_decrypt_init +#define X509_CRL_up_ref qcloudssl_X509_CRL_up_ref +#define sk_pop qcloudssl_sk_pop +#define EC_POINT_free qcloudssl_EC_POINT_free +#define SSL_get_quiet_shutdown qcloudssl_SSL_get_quiet_shutdown +#define CRYPTO_gcm128_finish qcloudssl_CRYPTO_gcm128_finish +#define CBS_get_any_ber_asn1_element qcloudssl_CBS_get_any_ber_asn1_element +#define asn1_get_choice_selector qcloudssl_asn1_get_choice_selector +#define ECDSA_SIG_parse qcloudssl_ECDSA_SIG_parse +#define ASN1_INTEGER_get qcloudssl_ASN1_INTEGER_get +#define i2d_ASN1_BOOLEAN qcloudssl_i2d_ASN1_BOOLEAN +#define EC_POINT_invert qcloudssl_EC_POINT_invert +#define ASN1_ENUMERATED_free qcloudssl_ASN1_ENUMERATED_free +#define EVP_enc_null qcloudssl_EVP_enc_null +#define NOTICEREF_free qcloudssl_NOTICEREF_free +#define X509_CRL_set_default_method qcloudssl_X509_CRL_set_default_method +#define i2d_SXNET qcloudssl_i2d_SXNET +#define X509_REQ_add1_attr qcloudssl_X509_REQ_add1_attr +#define SSL_CTX_sess_accept qcloudssl_SSL_CTX_sess_accept +#define CRYPTO_get_dynlock_destroy_callback qcloudssl_CRYPTO_get_dynlock_destroy_callback +#define CRYPTO_new_ex_data qcloudssl_CRYPTO_new_ex_data +#define ASN1_STRING_TABLE_cleanup qcloudssl_ASN1_STRING_TABLE_cleanup +#define OTHERNAME_cmp qcloudssl_OTHERNAME_cmp +#define i2d_NETSCAPE_SPKAC qcloudssl_i2d_NETSCAPE_SPKAC +#define PEM_write_bio_X509_REQ qcloudssl_PEM_write_bio_X509_REQ +#define sk_zero qcloudssl_sk_zero +#define RSA_private_key_to_bytes qcloudssl_RSA_private_key_to_bytes +#define POLICY_MAPPING_it qcloudssl_POLICY_MAPPING_it +#define EVP_DigestFinal_ex qcloudssl_EVP_DigestFinal_ex +#define ISSUING_DIST_POINT_it qcloudssl_ISSUING_DIST_POINT_it +#define RSA_padding_check_PKCS1_OAEP_mgf1 qcloudssl_RSA_padding_check_PKCS1_OAEP_mgf1 +#define d2i_PrivateKey_bio qcloudssl_d2i_PrivateKey_bio +#define X509_STORE_set0_additional_untrusted qcloudssl_X509_STORE_set0_additional_untrusted +#define CMAC_Update qcloudssl_CMAC_Update +#define ASN1_item_i2d_fp qcloudssl_ASN1_item_i2d_fp +#define ASN1_OBJECT_new qcloudssl_ASN1_OBJECT_new +#define SSL_set_mode qcloudssl_SSL_set_mode +#define OBJ_obj2txt qcloudssl_OBJ_obj2txt +#define CRYPTO_poly1305_init qcloudssl_CRYPTO_poly1305_init +#define i2d_X509_EXTENSION qcloudssl_i2d_X509_EXTENSION +#define d2i_X509_REVOKED qcloudssl_d2i_X509_REVOKED +#define SSL_get_shared_ciphers qcloudssl_SSL_get_shared_ciphers +#define POLICY_CONSTRAINTS_free qcloudssl_POLICY_CONSTRAINTS_free +#define SSL_get_error qcloudssl_SSL_get_error +#define ASN1_generate_nconf qcloudssl_ASN1_generate_nconf +#define DES_set_key_unchecked qcloudssl_DES_set_key_unchecked +#define X509_REQ_get_attr qcloudssl_X509_REQ_get_attr +#define ASN1_GENERALSTRING_new qcloudssl_ASN1_GENERALSTRING_new +#define X509_ALGORS_it qcloudssl_X509_ALGORS_it +#define EC_KEY_get_enc_flags qcloudssl_EC_KEY_get_enc_flags +#define X509_get_signature_nid qcloudssl_X509_get_signature_nid +#define X509_REVOKED_free qcloudssl_X509_REVOKED_free +#define EXTENDED_KEY_USAGE_new qcloudssl_EXTENDED_KEY_USAGE_new +#define sk_push qcloudssl_sk_push +#define X509_NAME_ENTRY_create_by_OBJ qcloudssl_X509_NAME_ENTRY_create_by_OBJ +#define X509_CRL_sign_ctx qcloudssl_X509_CRL_sign_ctx +#define ERR_peek_error qcloudssl_ERR_peek_error +#define EC_KEY_new qcloudssl_EC_KEY_new +#define CBS_get_any_asn1_element qcloudssl_CBS_get_any_asn1_element +#define SSL_get_secure_renegotiation_support qcloudssl_SSL_get_secure_renegotiation_support +#define RSA_padding_check_PKCS1_type_2 qcloudssl_RSA_padding_check_PKCS1_type_2 +#define X509_print qcloudssl_X509_print +#define DSA_marshal_public_key qcloudssl_DSA_marshal_public_key +#define X509_set_serialNumber qcloudssl_X509_set_serialNumber +#define EVP_PKEY_CTX_new qcloudssl_EVP_PKEY_CTX_new +#define SSL_get_current_compression qcloudssl_SSL_get_current_compression +#define PEM_read_PKCS8 qcloudssl_PEM_read_PKCS8 +#define ENGINE_set_ECDSA_method qcloudssl_ENGINE_set_ECDSA_method +#define ASN1_UTCTIME_check qcloudssl_ASN1_UTCTIME_check +#define X509_VERIFY_PARAM_get0_peername qcloudssl_X509_VERIFY_PARAM_get0_peername +#define d2i_X509_ATTRIBUTE qcloudssl_d2i_X509_ATTRIBUTE +#define EVP_PKEY_decrypt qcloudssl_EVP_PKEY_decrypt +#define X509_STORE_free qcloudssl_X509_STORE_free +#define DSA_marshal_private_key qcloudssl_DSA_marshal_private_key +#define SSL_CTX_get_ex_data qcloudssl_SSL_CTX_get_ex_data +#define PEM_write_RSA_PUBKEY qcloudssl_PEM_write_RSA_PUBKEY +#define ASN1_STRING_length_set qcloudssl_ASN1_STRING_length_set +#define EVP_tls_cbc_record_digest_supported qcloudssl_EVP_tls_cbc_record_digest_supported +#define CRYPTO_THREADID_current qcloudssl_CRYPTO_THREADID_current +#define SHA512_Init qcloudssl_SHA512_Init +#define v3_skey_id qcloudssl_v3_skey_id +#define SSL_extension_supported qcloudssl_SSL_extension_supported +#define SXNET_add_id_INTEGER qcloudssl_SXNET_add_id_INTEGER +#define PEM_write_bio_X509_AUX qcloudssl_PEM_write_bio_X509_AUX +#define CBB_add_space qcloudssl_CBB_add_space +#define HMAC_CTX_copy qcloudssl_HMAC_CTX_copy +#define ERR_peek_last_error qcloudssl_ERR_peek_last_error +#define ec_wNAF_mul qcloudssl_ec_wNAF_mul +#define d2i_X509_VAL qcloudssl_d2i_X509_VAL +#define SSL_CTX_get_verify_mode qcloudssl_SSL_CTX_get_verify_mode +#define SSL_get_ciphers qcloudssl_SSL_get_ciphers +#define SSL_CTX_set_grease_enabled qcloudssl_SSL_CTX_set_grease_enabled +#define SSL_CTX_sess_get_new_cb qcloudssl_SSL_CTX_sess_get_new_cb +#define EVP_AEAD_max_overhead qcloudssl_EVP_AEAD_max_overhead +#define DSA_parse_parameters qcloudssl_DSA_parse_parameters +#define SSL_CTX_set1_curves_list qcloudssl_SSL_CTX_set1_curves_list +#define EC_KEY_parse_private_key qcloudssl_EC_KEY_parse_private_key +#define sk_sort qcloudssl_sk_sort +#define SSL_CTX_set_purpose qcloudssl_SSL_CTX_set_purpose +#define BN_cmp qcloudssl_BN_cmp +#define X509_STORE_CTX_set_depth qcloudssl_X509_STORE_CTX_set_depth +#define PKCS5_PBKDF2_HMAC qcloudssl_PKCS5_PBKDF2_HMAC +#define lh_strhash qcloudssl_lh_strhash +#define PEM_read_bio_PrivateKey qcloudssl_PEM_read_bio_PrivateKey +#define SSL_set_srtp_profiles qcloudssl_SSL_set_srtp_profiles +#define OPENSSL_strdup qcloudssl_OPENSSL_strdup +#define X509_REVOKED_new qcloudssl_X509_REVOKED_new +#define OPENSSL_free qcloudssl_OPENSSL_free +#define RSA_padding_add_PKCS1_OAEP_mgf1 qcloudssl_RSA_padding_add_PKCS1_OAEP_mgf1 +#define SSL_enable_signed_cert_timestamps qcloudssl_SSL_enable_signed_cert_timestamps +#define BIO_s_fd qcloudssl_BIO_s_fd +#define X509at_delete_attr qcloudssl_X509at_delete_attr +#define CMAC_Reset qcloudssl_CMAC_Reset +#define SSL_CIPHER_get_min_version qcloudssl_SSL_CIPHER_get_min_version +#define d2i_PUBKEY_bio qcloudssl_d2i_PUBKEY_bio +#define CBS_get_u16 qcloudssl_CBS_get_u16 +#define BN_exp qcloudssl_BN_exp +#define X509_VERIFY_PARAM_lookup qcloudssl_X509_VERIFY_PARAM_lookup +#define X509_get_ext_by_NID qcloudssl_X509_get_ext_by_NID +#define SSL_shutdown qcloudssl_SSL_shutdown +#define BN_parse_asn1_unsigned_buggy qcloudssl_BN_parse_asn1_unsigned_buggy +#define BN_bn2mpi qcloudssl_BN_bn2mpi +#define EVP_CIPHER_mode qcloudssl_EVP_CIPHER_mode +#define SSL_get_verify_mode qcloudssl_SSL_get_verify_mode +#define DTLSv1_client_method qcloudssl_DTLSv1_client_method +#define DH_set_ex_data qcloudssl_DH_set_ex_data +#define BN_mod_sub qcloudssl_BN_mod_sub +#define ASN1_ENUMERATED_new qcloudssl_ASN1_ENUMERATED_new +#define BIO_s_socket qcloudssl_BIO_s_socket +#define EVP_EncodeFinal qcloudssl_EVP_EncodeFinal +#define SXNETID_free qcloudssl_SXNETID_free +#define ASN1_INTEGER_cmp qcloudssl_ASN1_INTEGER_cmp +#define X509_ocspid_print qcloudssl_X509_ocspid_print +#define X509_get0_pubkey_bitstr qcloudssl_X509_get0_pubkey_bitstr +#define BN_generate_prime_ex qcloudssl_BN_generate_prime_ex +#define EVP_MD_flags qcloudssl_EVP_MD_flags +#define BN_bn2dec qcloudssl_BN_bn2dec +#define PEM_read_EC_PUBKEY qcloudssl_PEM_read_EC_PUBKEY +#define SSL_early_data_accepted qcloudssl_SSL_early_data_accepted +#define d2i_ECPrivateKey_fp qcloudssl_d2i_ECPrivateKey_fp +#define ASN1_i2d_fp qcloudssl_ASN1_i2d_fp +#define ASN1_UTCTIME_new qcloudssl_ASN1_UTCTIME_new +#define X509_NAME_add_entry qcloudssl_X509_NAME_add_entry +#define BN_add_word qcloudssl_BN_add_word +#define EVP_aead_aes_128_cbc_sha256_tls qcloudssl_EVP_aead_aes_128_cbc_sha256_tls +#define X509_OBJECT_free_contents qcloudssl_X509_OBJECT_free_contents +#define SSLv3_method qcloudssl_SSLv3_method +#define DH_size qcloudssl_DH_size +#define DSA_do_check_signature qcloudssl_DSA_do_check_signature +#define SSL_CTX_set_read_ahead qcloudssl_SSL_CTX_set_read_ahead +#define X509_dup qcloudssl_X509_dup +#define PROXY_CERT_INFO_EXTENSION_new qcloudssl_PROXY_CERT_INFO_EXTENSION_new +#define OPENSSL_strcasecmp qcloudssl_OPENSSL_strcasecmp +#define i2d_ASN1_SET_ANY qcloudssl_i2d_ASN1_SET_ANY +#define X509_PURPOSE_get0 qcloudssl_X509_PURPOSE_get0 +#define SSL_set0_rbio qcloudssl_SSL_set0_rbio +#define X509_pubkey_digest qcloudssl_X509_pubkey_digest +#define SSL_set_tmp_dh qcloudssl_SSL_set_tmp_dh +#define OPENSSL_load_builtin_modules qcloudssl_OPENSSL_load_builtin_modules +#define i2d_PROXY_CERT_INFO_EXTENSION qcloudssl_i2d_PROXY_CERT_INFO_EXTENSION +#define kOpenSSLReasonValues qcloudssl_kOpenSSLReasonValues +#define X509_to_X509_REQ qcloudssl_X509_to_X509_REQ +#define EVP_AEAD_CTX_get_iv qcloudssl_EVP_AEAD_CTX_get_iv +#define rsa_pkey_meth qcloudssl_rsa_pkey_meth +#define SSL_get_tls_channel_id qcloudssl_SSL_get_tls_channel_id +#define ASN1_PRINTABLE_type qcloudssl_ASN1_PRINTABLE_type +#define X509_CRL_delete_ext qcloudssl_X509_CRL_delete_ext +#define ASN1_TIME_print qcloudssl_ASN1_TIME_print +#define X509_keyid_set1 qcloudssl_X509_keyid_set1 +#define EVP_CIPHER_block_size qcloudssl_EVP_CIPHER_block_size +#define X509_PURPOSE_get_by_id qcloudssl_X509_PURPOSE_get_by_id +#define X509_get_ext_by_critical qcloudssl_X509_get_ext_by_critical +#define BN_mod_exp qcloudssl_BN_mod_exp +#define SSL_select_next_proto qcloudssl_SSL_select_next_proto +#define OpenSSL_add_all_algorithms qcloudssl_OpenSSL_add_all_algorithms +#define MD5_Init qcloudssl_MD5_Init +#define BIO_set_conn_port qcloudssl_BIO_set_conn_port +#define d2i_RSAPrivateKey_bio qcloudssl_d2i_RSAPrivateKey_bio +#define X509_ATTRIBUTE_create qcloudssl_X509_ATTRIBUTE_create +#define X509_OBJECT_up_ref_count qcloudssl_X509_OBJECT_up_ref_count +#define X509_TRUST_set qcloudssl_X509_TRUST_set +#define SSL_CTX_set_private_key_method qcloudssl_SSL_CTX_set_private_key_method +#define SSL_set_session_id_context qcloudssl_SSL_set_session_id_context +#define rsa_less_than_words qcloudssl_rsa_less_than_words +#define v3_freshest_crl qcloudssl_v3_freshest_crl +#define BN_mod_exp2_mont qcloudssl_BN_mod_exp2_mont +#define SSL_CTX_set_verify qcloudssl_SSL_CTX_set_verify +#define ec_GFp_simple_group_copy qcloudssl_ec_GFp_simple_group_copy +#define BIO_should_retry qcloudssl_BIO_should_retry +#define EVP_CIPHER_CTX_set_app_data qcloudssl_EVP_CIPHER_CTX_set_app_data +#define SSL_set_tlsext_host_name qcloudssl_SSL_set_tlsext_host_name +#define SSL_CTX_set_tmp_dh_callback qcloudssl_SSL_CTX_set_tmp_dh_callback +#define PEM_write_bio_EC_PUBKEY qcloudssl_PEM_write_bio_EC_PUBKEY +#define SSL_num_renegotiations qcloudssl_SSL_num_renegotiations +#define x509_print_rsa_pss_params qcloudssl_x509_print_rsa_pss_params +#define X509_check_akid qcloudssl_X509_check_akid +#define BN_rshift qcloudssl_BN_rshift +#define i2d_RSA_PUBKEY_bio qcloudssl_i2d_RSA_PUBKEY_bio +#define ASN1_UTF8STRING_new qcloudssl_ASN1_UTF8STRING_new +#define ec_GFp_simple_point_clear_finish qcloudssl_ec_GFp_simple_point_clear_finish +#define RAND_cleanup qcloudssl_RAND_cleanup +#define x25519_ge_sub qcloudssl_x25519_ge_sub +#define ERR_set_mark qcloudssl_ERR_set_mark +#define SSL_CTX_set_ed25519_enabled qcloudssl_SSL_CTX_set_ed25519_enabled +#define X509_CRL_METHOD_new qcloudssl_X509_CRL_METHOD_new +#define ASN1_template_i2d qcloudssl_ASN1_template_i2d +#define ENGINE_free qcloudssl_ENGINE_free +#define AUTHORITY_INFO_ACCESS_it qcloudssl_AUTHORITY_INFO_ACCESS_it +#define X509_REQ_add_extensions qcloudssl_X509_REQ_add_extensions +#define X509_PUBKEY_get0_param qcloudssl_X509_PUBKEY_get0_param +#define X509_CRL_get_ext qcloudssl_X509_CRL_get_ext +#define X509_get_ext qcloudssl_X509_get_ext +#define i2d_ASN1_INTEGER qcloudssl_i2d_ASN1_INTEGER +#define CRYPTO_gcm128_init qcloudssl_CRYPTO_gcm128_init +#define CBS_get_u32 qcloudssl_CBS_get_u32 +#define X509_STORE_CTX_set_verify_cb qcloudssl_X509_STORE_CTX_set_verify_cb +#define ASN1_object_size qcloudssl_ASN1_object_size +#define ASN1_STRING_dup qcloudssl_ASN1_STRING_dup +#define EC_POINT_copy qcloudssl_EC_POINT_copy +#define ASN1_T61STRING_new qcloudssl_ASN1_T61STRING_new +#define RSA_padding_check_PKCS1_type_1 qcloudssl_RSA_padding_check_PKCS1_type_1 +#define CRYPTO_STATIC_MUTEX_unlock_read qcloudssl_CRYPTO_STATIC_MUTEX_unlock_read +#define CBS_asn1_bitstring_has_bit qcloudssl_CBS_asn1_bitstring_has_bit +#define X509_policy_level_get0_node qcloudssl_X509_policy_level_get0_node +#define OBJ_ln2nid qcloudssl_OBJ_ln2nid +#define SSL_CTX_set_tmp_dh qcloudssl_SSL_CTX_set_tmp_dh +#define PEM_write_bio_SSL_SESSION qcloudssl_PEM_write_bio_SSL_SESSION +#define EVP_EncodeBlock qcloudssl_EVP_EncodeBlock +#define X509_NAME_set qcloudssl_X509_NAME_set +#define bn_mul_comba8 qcloudssl_bn_mul_comba8 +#define EC_POINT_add qcloudssl_EC_POINT_add +#define ASN1_TBOOLEAN_it qcloudssl_ASN1_TBOOLEAN_it +#define SSL_CTX_get_session_cache_mode qcloudssl_SSL_CTX_get_session_cache_mode +#define X509_ATTRIBUTE_get0_type qcloudssl_X509_ATTRIBUTE_get0_type +#define EC_KEY_copy qcloudssl_EC_KEY_copy +#define EVP_PKEY_size qcloudssl_EVP_PKEY_size +#define BN_num_bytes qcloudssl_BN_num_bytes +#define BN_mod_sub_quick qcloudssl_BN_mod_sub_quick +#define OBJ_nid2obj qcloudssl_OBJ_nid2obj +#define i2d_X509_REQ_fp qcloudssl_i2d_X509_REQ_fp +#define d2i_DHparams qcloudssl_d2i_DHparams +#define EC_KEY_set_enc_flags qcloudssl_EC_KEY_set_enc_flags +#define d2i_DIST_POINT_NAME qcloudssl_d2i_DIST_POINT_NAME +#define SSL_CTX_add_extra_chain_cert qcloudssl_SSL_CTX_add_extra_chain_cert +#define EVP_CIPHER_CTX_set_key_length qcloudssl_EVP_CIPHER_CTX_set_key_length +#define SSL_in_false_start qcloudssl_SSL_in_false_start +#define EVP_des_ede qcloudssl_EVP_des_ede +#define PEM_read_SSL_SESSION qcloudssl_PEM_read_SSL_SESSION +#define SSL_CTX_set_alpn_select_cb qcloudssl_SSL_CTX_set_alpn_select_cb +#define ec_GFp_simple_group_finish qcloudssl_ec_GFp_simple_group_finish +#define EC_KEY_set_private_key qcloudssl_EC_KEY_set_private_key +#define CRYPTO_BUFFER_data qcloudssl_CRYPTO_BUFFER_data +#define CRYPTO_gcm128_encrypt_ctr32 qcloudssl_CRYPTO_gcm128_encrypt_ctr32 +#define SSL_get_ex_data_X509_STORE_CTX_idx qcloudssl_SSL_get_ex_data_X509_STORE_CTX_idx +#define BIO_get_mem_ptr qcloudssl_BIO_get_mem_ptr +#define DH_get_ex_data qcloudssl_DH_get_ex_data +#define PEM_ASN1_write qcloudssl_PEM_ASN1_write +#define v3_policy_constraints qcloudssl_v3_policy_constraints +#define ASN1_TIME_to_generalizedtime qcloudssl_ASN1_TIME_to_generalizedtime +#define EC_KEY_set_asn1_flag qcloudssl_EC_KEY_set_asn1_flag +#define EVP_PKEY_CTX_set_rsa_oaep_md qcloudssl_EVP_PKEY_CTX_set_rsa_oaep_md +#define X509_CRL_digest qcloudssl_X509_CRL_digest +#define SSL_CTX_set_session_psk_dhe_timeout qcloudssl_SSL_CTX_set_session_psk_dhe_timeout +#define X509_PURPOSE_cleanup qcloudssl_X509_PURPOSE_cleanup +#define d2i_ASN1_PRINTABLE qcloudssl_d2i_ASN1_PRINTABLE +#define ENGINE_register_all_complete qcloudssl_ENGINE_register_all_complete +#define EC_KEY_set_public_key_affine_coordinates qcloudssl_EC_KEY_set_public_key_affine_coordinates +#define d2i_NETSCAPE_SPKAC qcloudssl_d2i_NETSCAPE_SPKAC +#define X509_sign qcloudssl_X509_sign +#define X509_set_notAfter qcloudssl_X509_set_notAfter +#define i2d_PublicKey qcloudssl_i2d_PublicKey +#define OBJ_obj2nid qcloudssl_OBJ_obj2nid +#define tree_find_sk qcloudssl_tree_find_sk +#define EVP_parse_private_key qcloudssl_EVP_parse_private_key +#define EVP_VerifyInit_ex qcloudssl_EVP_VerifyInit_ex +#define PEM_read_bio_X509_AUX qcloudssl_PEM_read_bio_X509_AUX +#define SHA1_Transform qcloudssl_SHA1_Transform +#define AES_decrypt qcloudssl_AES_decrypt +#define i2d_PKEY_USAGE_PERIOD qcloudssl_i2d_PKEY_USAGE_PERIOD +#define a2i_IPADDRESS qcloudssl_a2i_IPADDRESS +#define ASN1_IA5STRING_it qcloudssl_ASN1_IA5STRING_it +#define X509_REQ_check_private_key qcloudssl_X509_REQ_check_private_key +#define i2d_ASN1_PRINTABLESTRING qcloudssl_i2d_ASN1_PRINTABLESTRING +#define d2i_X509_EXTENSION qcloudssl_d2i_X509_EXTENSION +#define PKCS12_get_key_and_certs qcloudssl_PKCS12_get_key_and_certs +#define i2d_X509_REQ_INFO qcloudssl_i2d_X509_REQ_INFO +#define BN_zero qcloudssl_BN_zero +#define EVP_get_digestbynid qcloudssl_EVP_get_digestbynid +#define DSA_parse_public_key qcloudssl_DSA_parse_public_key +#define i2a_ASN1_STRING qcloudssl_i2a_ASN1_STRING +#define X509_STORE_get1_crls qcloudssl_X509_STORE_get1_crls +#define SSL_CTX_set_tlsext_ticket_keys qcloudssl_SSL_CTX_set_tlsext_ticket_keys +#define BIO_write qcloudssl_BIO_write +#define BN_BLINDING_invert qcloudssl_BN_BLINDING_invert +#define ECDSA_SIG_from_bytes qcloudssl_ECDSA_SIG_from_bytes +#define SSL_state_string qcloudssl_SSL_state_string +#define DH_new qcloudssl_DH_new +#define X509_VERIFY_PARAM_set1 qcloudssl_X509_VERIFY_PARAM_set1 +#define d2i_X509_REQ qcloudssl_d2i_X509_REQ +#define x25519_ge_frombytes_vartime qcloudssl_x25519_ge_frombytes_vartime +#define X509_find_by_subject qcloudssl_X509_find_by_subject +#define ASN1_item_ex_new qcloudssl_ASN1_item_ex_new +#define NETSCAPE_SPKAC_it qcloudssl_NETSCAPE_SPKAC_it +#define X509_NAME_ENTRY_get_object qcloudssl_X509_NAME_ENTRY_get_object +#define i2d_ASN1_T61STRING qcloudssl_i2d_ASN1_T61STRING +#define PEM_read_DSAPrivateKey qcloudssl_PEM_read_DSAPrivateKey +#define OPENSSL_add_all_algorithms_conf qcloudssl_OPENSSL_add_all_algorithms_conf +#define ASN1_BIT_STRING_it qcloudssl_ASN1_BIT_STRING_it +#define PEM_read_bio_SSL_SESSION qcloudssl_PEM_read_bio_SSL_SESSION +#define CRYPTO_gcm128_decrypt_ctr32 qcloudssl_CRYPTO_gcm128_decrypt_ctr32 +#define SHA384 qcloudssl_SHA384 +#define SSL_set_quiet_shutdown qcloudssl_SSL_set_quiet_shutdown +#define OPENSSL_hash32 qcloudssl_OPENSSL_hash32 +#define CBS_init qcloudssl_CBS_init +#define asn1_enc_free qcloudssl_asn1_enc_free +#define X509V3_EXT_CRL_add_nconf qcloudssl_X509V3_EXT_CRL_add_nconf +#define BN_marshal_asn1 qcloudssl_BN_marshal_asn1 +#define RSA_up_ref qcloudssl_RSA_up_ref +#define SSL_CTX_sess_connect_renegotiate qcloudssl_SSL_CTX_sess_connect_renegotiate +#define CRYPTO_set_locking_callback qcloudssl_CRYPTO_set_locking_callback +#define PEM_write_PKCS8PrivateKey qcloudssl_PEM_write_PKCS8PrivateKey +#define BIO_eof qcloudssl_BIO_eof +#define NCONF_load qcloudssl_NCONF_load +#define ECDSA_do_sign qcloudssl_ECDSA_do_sign +#define X509_load_crl_file qcloudssl_X509_load_crl_file +#define level_find_node qcloudssl_level_find_node +#define EVP_CIPHER_CTX_mode qcloudssl_EVP_CIPHER_CTX_mode +#define CBS_get_asn1 qcloudssl_CBS_get_asn1 +#define SSL_CTX_set_session_id_context qcloudssl_SSL_CTX_set_session_id_context +#define CRYPTO_memcmp qcloudssl_CRYPTO_memcmp +#define DIST_POINT_NAME_it qcloudssl_DIST_POINT_NAME_it +#define X509_STORE_add_lookup qcloudssl_X509_STORE_add_lookup +#define X509V3_EXT_free qcloudssl_X509V3_EXT_free +#define SSL_CTX_set_cert_verify_callback qcloudssl_SSL_CTX_set_cert_verify_callback +#define SSL_CTX_set_default_passwd_cb_userdata qcloudssl_SSL_CTX_set_default_passwd_cb_userdata +#define i2d_OTHERNAME qcloudssl_i2d_OTHERNAME +#define X509_find_by_issuer_and_serial qcloudssl_X509_find_by_issuer_and_serial +#define SSL_CTX_set_tls_channel_id_enabled qcloudssl_SSL_CTX_set_tls_channel_id_enabled +#define sk_delete_ptr qcloudssl_sk_delete_ptr +#define EVP_DigestUpdate qcloudssl_EVP_DigestUpdate +#define i2d_X509_PUBKEY qcloudssl_i2d_X509_PUBKEY +#define ED25519_keypair_from_seed qcloudssl_ED25519_keypair_from_seed +#define ASN1_STRING_get_default_mask qcloudssl_ASN1_STRING_get_default_mask +#define EC_POINT_new qcloudssl_EC_POINT_new +#define SXNET_free qcloudssl_SXNET_free +#define BIO_gets qcloudssl_BIO_gets +#define SSL_get_current_expansion qcloudssl_SSL_get_current_expansion +#define OPENSSL_gmtime qcloudssl_OPENSSL_gmtime +#define i2t_ASN1_OBJECT qcloudssl_i2t_ASN1_OBJECT +#define BN_mod_pow2 qcloudssl_BN_mod_pow2 +#define SSL_CTX_clear_chain_certs qcloudssl_SSL_CTX_clear_chain_certs +#define POLICYQUALINFO_new qcloudssl_POLICYQUALINFO_new +#define SSL_get0_ocsp_response qcloudssl_SSL_get0_ocsp_response +#define X509_STORE_set_flags qcloudssl_X509_STORE_set_flags +#define DH_num_bits qcloudssl_DH_num_bits +#define ec_asn1_meth qcloudssl_ec_asn1_meth +#define X509_NAME_ENTRY_create_by_NID qcloudssl_X509_NAME_ENTRY_create_by_NID +#define SSL_CTX_set_next_proto_select_cb qcloudssl_SSL_CTX_set_next_proto_select_cb +#define v3_crl_num qcloudssl_v3_crl_num +#define BN_is_bit_set qcloudssl_BN_is_bit_set +#define X509_PURPOSE_get_trust qcloudssl_X509_PURPOSE_get_trust +#define CBS_get_bytes qcloudssl_CBS_get_bytes +#define i2d_X509_fp qcloudssl_i2d_X509_fp +#define bn_mod_inverse_prime qcloudssl_bn_mod_inverse_prime +#define SSL_get_verify_depth qcloudssl_SSL_get_verify_depth +#define X509_REQ_extension_nid qcloudssl_X509_REQ_extension_nid +#define EVP_CIPHER_CTX_set_padding qcloudssl_EVP_CIPHER_CTX_set_padding +#define DH_check qcloudssl_DH_check +#define ACCESS_DESCRIPTION_new qcloudssl_ACCESS_DESCRIPTION_new +#define BN_to_ASN1_ENUMERATED qcloudssl_BN_to_ASN1_ENUMERATED +#define RAND_pseudo_bytes qcloudssl_RAND_pseudo_bytes +#define EVP_CIPHER_CTX_ctrl qcloudssl_EVP_CIPHER_CTX_ctrl +#define X509_sign_ctx qcloudssl_X509_sign_ctx +#define EVP_aead_aes_256_cbc_sha256_tls qcloudssl_EVP_aead_aes_256_cbc_sha256_tls +#define SSL_set_custom_verify qcloudssl_SSL_set_custom_verify +#define ASN1_item_d2i_bio qcloudssl_ASN1_item_d2i_bio +#define X509_STORE_CTX_init qcloudssl_X509_STORE_CTX_init +#define X509_REQ_verify qcloudssl_X509_REQ_verify +#define RSA_get_ex_data qcloudssl_RSA_get_ex_data +#define SSL_CIPHER_get_cipher_nid qcloudssl_SSL_CIPHER_get_cipher_nid +#define sk_is_sorted qcloudssl_sk_is_sorted +#define BIO_ctrl qcloudssl_BIO_ctrl +#define i2d_DSAPublicKey qcloudssl_i2d_DSAPublicKey +#define ASN1_item_dup qcloudssl_ASN1_item_dup +#define BN_set_word qcloudssl_BN_set_word +#define DSA_SIG_new qcloudssl_DSA_SIG_new +#define SSL_connect qcloudssl_SSL_connect +#define X509_LOOKUP_by_subject qcloudssl_X509_LOOKUP_by_subject +#define ECDSA_SIG_free qcloudssl_ECDSA_SIG_free +#define RSA_verify_raw qcloudssl_RSA_verify_raw +#define i2d_X509_VAL qcloudssl_i2d_X509_VAL +#define SSL_SESSION_get_version qcloudssl_SSL_SESSION_get_version +#define md4_block_data_order qcloudssl_md4_block_data_order +#define ASN1_UNIVERSALSTRING_free qcloudssl_ASN1_UNIVERSALSTRING_free +#define EC_KEY_get_ex_new_index qcloudssl_EC_KEY_get_ex_new_index +#define SSL_use_certificate_ASN1 qcloudssl_SSL_use_certificate_ASN1 +#define X509V3_add1_i2d qcloudssl_X509V3_add1_i2d +#define EVP_sha224 qcloudssl_EVP_sha224 +#define ec_GFp_simple_cmp qcloudssl_ec_GFp_simple_cmp +#define X509_LOOKUP_new qcloudssl_X509_LOOKUP_new +#define SHA512_Final qcloudssl_SHA512_Final +#define SSL_get_cipher_list qcloudssl_SSL_get_cipher_list +#define PEM_write_bio_X509 qcloudssl_PEM_write_bio_X509 +#define NCONF_load_bio qcloudssl_NCONF_load_bio +#define X509_issuer_name_hash_old qcloudssl_X509_issuer_name_hash_old +#define SSL_CTX_remove_session qcloudssl_SSL_CTX_remove_session +#define X509_STORE_CTX_get_current_cert qcloudssl_X509_STORE_CTX_get_current_cert +#define X509_check_host qcloudssl_X509_check_host +#define PEM_read_DHparams qcloudssl_PEM_read_DHparams +#define EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv qcloudssl_EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv +#define asn1_generalizedtime_to_tm qcloudssl_asn1_generalizedtime_to_tm +#define BN_pseudo_rand_range qcloudssl_BN_pseudo_rand_range +#define OPENSSL_realloc qcloudssl_OPENSSL_realloc +#define PEM_read_RSAPrivateKey qcloudssl_PEM_read_RSAPrivateKey +#define EVP_DecodeFinal qcloudssl_EVP_DecodeFinal +#define d2i_PrivateKey_fp qcloudssl_d2i_PrivateKey_fp +#define EVP_MD_CTX_block_size qcloudssl_EVP_MD_CTX_block_size +#define CBB_add_u16 qcloudssl_CBB_add_u16 +#define SSLv3_server_method qcloudssl_SSLv3_server_method +#define RAND_add qcloudssl_RAND_add +#define BN_ucmp qcloudssl_BN_ucmp +#define X509_EXTENSION_create_by_OBJ qcloudssl_X509_EXTENSION_create_by_OBJ +#define EVP_CIPHER_CTX_new qcloudssl_EVP_CIPHER_CTX_new +#define CBS_peek_asn1_tag qcloudssl_CBS_peek_asn1_tag +#define EVP_rc2_cbc qcloudssl_EVP_rc2_cbc +#define BIO_reset qcloudssl_BIO_reset +#define SSL_get_wbio qcloudssl_SSL_get_wbio +#define EVP_parse_public_key qcloudssl_EVP_parse_public_key +#define GENERAL_NAMES_new qcloudssl_GENERAL_NAMES_new +#define SSL_pending qcloudssl_SSL_pending +#define CRYPTO_ofb128_encrypt qcloudssl_CRYPTO_ofb128_encrypt +#define asn1_enc_save qcloudssl_asn1_enc_save +#define SSL_set0_wbio qcloudssl_SSL_set0_wbio +#define PEM_write_bio_RSAPrivateKey qcloudssl_PEM_write_bio_RSAPrivateKey +#define policy_node_free qcloudssl_policy_node_free +#define SSL_get_cipher_by_value qcloudssl_SSL_get_cipher_by_value +#define SSL_CTX_clear_mode qcloudssl_SSL_CTX_clear_mode +#define SSL_set_private_key_digest_prefs qcloudssl_SSL_set_private_key_digest_prefs +#define X509_supported_extension qcloudssl_X509_supported_extension +#define d2i_RSA_PUBKEY_bio qcloudssl_d2i_RSA_PUBKEY_bio +#define CRYPTO_BUFFER_new qcloudssl_CRYPTO_BUFFER_new +#define EC_POINT_get_affine_coordinates_GFp qcloudssl_EC_POINT_get_affine_coordinates_GFp +#define SSL_set_tmp_dh_callback qcloudssl_SSL_set_tmp_dh_callback +#define X509_NAME_ENTRY_it qcloudssl_X509_NAME_ENTRY_it +#define SHA1_Init qcloudssl_SHA1_Init +#define SSL_CIPHER_get_name qcloudssl_SSL_CIPHER_get_name +#define PEM_proc_type qcloudssl_PEM_proc_type +#define CBS_get_optional_asn1 qcloudssl_CBS_get_optional_asn1 +#define EVP_DigestInit_ex qcloudssl_EVP_DigestInit_ex +#define v2i_ASN1_BIT_STRING qcloudssl_v2i_ASN1_BIT_STRING +#define CRYPTO_gcm128_decrypt qcloudssl_CRYPTO_gcm128_decrypt +#define SHA1_Update qcloudssl_SHA1_Update +#define X509_LOOKUP_ctrl qcloudssl_X509_LOOKUP_ctrl +#define EVP_PKEY_is_opaque qcloudssl_EVP_PKEY_is_opaque +#define BIO_should_read qcloudssl_BIO_should_read +#define DTLS_server_method qcloudssl_DTLS_server_method +#define ASN1_TIME_new qcloudssl_ASN1_TIME_new +#define ERR_get_error_line_data qcloudssl_ERR_get_error_line_data +#define i2d_ACCESS_DESCRIPTION qcloudssl_i2d_ACCESS_DESCRIPTION +#define SSL_CTX_set_current_time_cb qcloudssl_SSL_CTX_set_current_time_cb +#define d2i_RSAPrivateKey_fp qcloudssl_d2i_RSAPrivateKey_fp +#define ECDSA_sign qcloudssl_ECDSA_sign +#define CBS_get_u8 qcloudssl_CBS_get_u8 +#define DSA_free qcloudssl_DSA_free +#define BIO_number_written qcloudssl_BIO_number_written +#define X509V3_add_value_bool_nf qcloudssl_X509V3_add_value_bool_nf +#define x509_rsa_ctx_to_pss qcloudssl_x509_rsa_ctx_to_pss +#define DIRECTORYSTRING_free qcloudssl_DIRECTORYSTRING_free +#define X509_keyid_get0 qcloudssl_X509_keyid_get0 +#define SSL_get_servername qcloudssl_SSL_get_servername +#define SSL_set_trust qcloudssl_SSL_set_trust +#define X509_VERIFY_PARAM_new qcloudssl_X509_VERIFY_PARAM_new +#define SXNET_new qcloudssl_SXNET_new +#define EC_GROUP_dup qcloudssl_EC_GROUP_dup +#define SSL_CTX_get_read_ahead qcloudssl_SSL_CTX_get_read_ahead +#define EVP_add_cipher_alias qcloudssl_EVP_add_cipher_alias +#define CBS_get_u8_length_prefixed qcloudssl_CBS_get_u8_length_prefixed +#define X509V3_EXT_get qcloudssl_X509V3_EXT_get +#define BN_num_bits_word qcloudssl_BN_num_bits_word +#define SSL_get_peer_signature_algorithm qcloudssl_SSL_get_peer_signature_algorithm +#define BIO_set_mem_eof_return qcloudssl_BIO_set_mem_eof_return +#define X509_trust_clear qcloudssl_X509_trust_clear +#define d2i_X509_REQ_fp qcloudssl_d2i_X509_REQ_fp +#define BIO_flush qcloudssl_BIO_flush +#define d2i_EC_PUBKEY qcloudssl_d2i_EC_PUBKEY +#define bn_mont_n0 qcloudssl_bn_mont_n0 +#define X509_VERIFY_PARAM_get_count qcloudssl_X509_VERIFY_PARAM_get_count +#define RAND_file_name qcloudssl_RAND_file_name +#define BIO_printf qcloudssl_BIO_printf +#define SSL_get0_certificate_types qcloudssl_SSL_get0_certificate_types +#define bn_jacobi qcloudssl_bn_jacobi +#define DSA_SIG_free qcloudssl_DSA_SIG_free +#define SPAKE2_CTX_free qcloudssl_SPAKE2_CTX_free +#define i2d_ASN1_UTCTIME qcloudssl_i2d_ASN1_UTCTIME +#define sk_delete qcloudssl_sk_delete +#define SSL_CIPHER_description qcloudssl_SSL_CIPHER_description +#define SSL_peek qcloudssl_SSL_peek +#define ASN1_IA5STRING_free qcloudssl_ASN1_IA5STRING_free +#define CRYPTO_POLYVAL_update_blocks qcloudssl_CRYPTO_POLYVAL_update_blocks +#define CRYPTO_MUTEX_lock_write qcloudssl_CRYPTO_MUTEX_lock_write +#define EC_POINT_is_at_infinity qcloudssl_EC_POINT_is_at_infinity +#define X509_STORE_CTX_set_time qcloudssl_X509_STORE_CTX_set_time +#define X509_ATTRIBUTE_set1_object qcloudssl_X509_ATTRIBUTE_set1_object +#define ENGINE_new qcloudssl_ENGINE_new +#define SSL_CTX_get_client_CA_list qcloudssl_SSL_CTX_get_client_CA_list +#define HMAC_CTX_cleanup qcloudssl_HMAC_CTX_cleanup +#define DSA_SIG_parse qcloudssl_DSA_SIG_parse +#define CBS_stow qcloudssl_CBS_stow +#define X509_STORE_CTX_get1_issuer qcloudssl_X509_STORE_CTX_get1_issuer +#define ASN1_GENERALIZEDTIME_print qcloudssl_ASN1_GENERALIZEDTIME_print +#define RSA_verify_pss_mgf1 qcloudssl_RSA_verify_pss_mgf1 +#define ERR_lib_error_string qcloudssl_ERR_lib_error_string +#define HMAC_Init_ex qcloudssl_HMAC_Init_ex +#define v3_crl_reason qcloudssl_v3_crl_reason +#define CRYPTO_poly1305_finish qcloudssl_CRYPTO_poly1305_finish +#define ASN1_UTF8STRING_free qcloudssl_ASN1_UTF8STRING_free +#define X509_policy_node_get0_qualifiers qcloudssl_X509_policy_node_get0_qualifiers +#define EVP_rc4 qcloudssl_EVP_rc4 +#define EVP_CIPHER_CTX_cleanup qcloudssl_EVP_CIPHER_CTX_cleanup +#define CRL_DIST_POINTS_it qcloudssl_CRL_DIST_POINTS_it +#define ASN1_GENERALIZEDTIME_set_string qcloudssl_ASN1_GENERALIZEDTIME_set_string +#define POLICYQUALINFO_it qcloudssl_POLICYQUALINFO_it +#define CRYPTO_BUFFER_POOL_new qcloudssl_CRYPTO_BUFFER_POOL_new +#define X509_get_default_cert_dir_env qcloudssl_X509_get_default_cert_dir_env +#define SSL_set1_verify_cert_store qcloudssl_SSL_set1_verify_cert_store +#define EVP_tls_cbc_remove_padding qcloudssl_EVP_tls_cbc_remove_padding +#define X509_PUBKEY_new qcloudssl_X509_PUBKEY_new +#define SSL_CTX_set_tlsext_servername_arg qcloudssl_SSL_CTX_set_tlsext_servername_arg +#define i2d_X509_CRL_fp qcloudssl_i2d_X509_CRL_fp +#define EVP_DecodeBlock qcloudssl_EVP_DecodeBlock +#define TLSv1_2_server_method qcloudssl_TLSv1_2_server_method +#define CBB_add_u32 qcloudssl_CBB_add_u32 +#define SSL_get_peer_full_cert_chain qcloudssl_SSL_get_peer_full_cert_chain +#define SSL_CIPHER_get_id qcloudssl_SSL_CIPHER_get_id +#define SSL_CTX_set_tmp_ecdh qcloudssl_SSL_CTX_set_tmp_ecdh +#define BIO_get_retry_flags qcloudssl_BIO_get_retry_flags +#define EC_KEY_parse_curve_name qcloudssl_EC_KEY_parse_curve_name +#define X509_VERIFY_PARAM_set_trust qcloudssl_X509_VERIFY_PARAM_set_trust +#define SSL_CTX_sess_set_get_cb qcloudssl_SSL_CTX_sess_set_get_cb +#define NAME_CONSTRAINTS_it qcloudssl_NAME_CONSTRAINTS_it +#define ASN1_BIT_STRING_new qcloudssl_ASN1_BIT_STRING_new +#define SSL_CTX_get_ex_new_index qcloudssl_SSL_CTX_get_ex_new_index +#define SSL_set_signing_algorithm_prefs qcloudssl_SSL_set_signing_algorithm_prefs +#define EC_GFp_nistp256_method qcloudssl_EC_GFp_nistp256_method +#define lh_delete qcloudssl_lh_delete +#define ec_GFp_simple_is_at_infinity qcloudssl_ec_GFp_simple_is_at_infinity +#define EVP_PKEY_new_ed25519_private qcloudssl_EVP_PKEY_new_ed25519_private +#define X509_chain_check_suiteb qcloudssl_X509_chain_check_suiteb +#define SSL_get0_server_requested_CAs qcloudssl_SSL_get0_server_requested_CAs +#define EVP_aead_null_sha1_ssl3 qcloudssl_EVP_aead_null_sha1_ssl3 +#define i2d_ECPrivateKey_bio qcloudssl_i2d_ECPrivateKey_bio +#define ECDSA_SIG_marshal qcloudssl_ECDSA_SIG_marshal +#define X509_NAME_INTERNAL_it qcloudssl_X509_NAME_INTERNAL_it +#define i2d_DHparams qcloudssl_i2d_DHparams +#define CBB_add_u8 qcloudssl_CBB_add_u8 +#define X509_NAME_get_index_by_NID qcloudssl_X509_NAME_get_index_by_NID +#define DH_up_ref qcloudssl_DH_up_ref +#define BN_clear_free qcloudssl_BN_clear_free +#define SSL_CTX_set1_chain qcloudssl_SSL_CTX_set1_chain +#define DHparams_dup qcloudssl_DHparams_dup +#define SSL_CTX_add0_chain_cert qcloudssl_SSL_CTX_add0_chain_cert +#define RSA_parse_public_key qcloudssl_RSA_parse_public_key +#define SSL_CTX_set_srtp_profiles qcloudssl_SSL_CTX_set_srtp_profiles +#define SSL_set_info_callback qcloudssl_SSL_set_info_callback +#define X509_CRL_get0_by_cert qcloudssl_X509_CRL_get0_by_cert +#define NETSCAPE_SPKI_get_pubkey qcloudssl_NETSCAPE_SPKI_get_pubkey +#define EVP_PKEY_set1_RSA qcloudssl_EVP_PKEY_set1_RSA +#define X509_VERIFY_PARAM_add0_policy qcloudssl_X509_VERIFY_PARAM_add0_policy +#define EC_POINT_dbl qcloudssl_EC_POINT_dbl +#define i2d_DSAparams qcloudssl_i2d_DSAparams +#define DIRECTORYSTRING_it qcloudssl_DIRECTORYSTRING_it +#define i2d_NOTICEREF qcloudssl_i2d_NOTICEREF +#define v3_name_constraints qcloudssl_v3_name_constraints +#define a2i_ipadd qcloudssl_a2i_ipadd +#define EC_KEY_get_ex_data qcloudssl_EC_KEY_get_ex_data +#define ASN1_UTCTIME_set_string qcloudssl_ASN1_UTCTIME_set_string +#define SSL_SESSION_get_timeout qcloudssl_SSL_SESSION_get_timeout +#define BIO_ctrl_get_write_guarantee qcloudssl_BIO_ctrl_get_write_guarantee +#define CBB_discard_child qcloudssl_CBB_discard_child +#define EVP_PKEY_CTX_get_rsa_oaep_md qcloudssl_EVP_PKEY_CTX_get_rsa_oaep_md +#define X509_signature_print qcloudssl_X509_signature_print +#define BN_mpi2bn qcloudssl_BN_mpi2bn +#define i2d_AUTHORITY_INFO_ACCESS qcloudssl_i2d_AUTHORITY_INFO_ACCESS +#define BIO_new_mem_buf qcloudssl_BIO_new_mem_buf +#define d2i_X509_PUBKEY qcloudssl_d2i_X509_PUBKEY +#define SSL_clear_options qcloudssl_SSL_clear_options +#define X509_ALGOR_dup qcloudssl_X509_ALGOR_dup +#define X509_REVOKED_get_ext qcloudssl_X509_REVOKED_get_ext +#define EVP_PKEY_CTX_get0_rsa_oaep_label qcloudssl_EVP_PKEY_CTX_get0_rsa_oaep_label +#define SSL_CIPHER_get_version qcloudssl_SSL_CIPHER_get_version +#define SSL_CTX_sess_set_remove_cb qcloudssl_SSL_CTX_sess_set_remove_cb +#define EC_KEY_get_conv_form qcloudssl_EC_KEY_get_conv_form +#define X509_CRL_INFO_free qcloudssl_X509_CRL_INFO_free +#define EVP_DecryptInit_ex qcloudssl_EVP_DecryptInit_ex +#define X509_STORE_CTX_get_ex_new_index qcloudssl_X509_STORE_CTX_get_ex_new_index +#define EC_GFp_mont_method qcloudssl_EC_GFp_mont_method +#define EVP_CIPHER_nid qcloudssl_EVP_CIPHER_nid +#define X509_NAME_print qcloudssl_X509_NAME_print +#define ASN1_dup qcloudssl_ASN1_dup +#define SSL_use_RSAPrivateKey qcloudssl_SSL_use_RSAPrivateKey +#define a2i_IPADDRESS_NC qcloudssl_a2i_IPADDRESS_NC +#define EC_GROUP_set_point_conversion_form qcloudssl_EC_GROUP_set_point_conversion_form +#define SSL_CTX_use_RSAPrivateKey_ASN1 qcloudssl_SSL_CTX_use_RSAPrivateKey_ASN1 +#define i2d_X509_REQ qcloudssl_i2d_X509_REQ +#define X509_STORE_add_crl qcloudssl_X509_STORE_add_crl +#define CRYPTO_MUTEX_cleanup qcloudssl_CRYPTO_MUTEX_cleanup +#define X509_PUBKEY_get qcloudssl_X509_PUBKEY_get +#define X509_policy_node_get0_parent qcloudssl_X509_policy_node_get0_parent +#define SSL_in_early_data qcloudssl_SSL_in_early_data +#define d2i_ACCESS_DESCRIPTION qcloudssl_d2i_ACCESS_DESCRIPTION +#define SSL_set_renegotiate_mode qcloudssl_SSL_set_renegotiate_mode +#define POLICY_MAPPING_free qcloudssl_POLICY_MAPPING_free +#define ec_GFp_simple_get_Jprojective_coordinates_GFp qcloudssl_ec_GFp_simple_get_Jprojective_coordinates_GFp +#define EC_POINT_set_affine_coordinates_GFp qcloudssl_EC_POINT_set_affine_coordinates_GFp +#define EVP_DecodeBase64 qcloudssl_EVP_DecodeBase64 +#define EVP_add_digest qcloudssl_EVP_add_digest +#define X509_get_subject_name qcloudssl_X509_get_subject_name +#define NAME_CONSTRAINTS_new qcloudssl_NAME_CONSTRAINTS_new +#define ASN1_T61STRING_it qcloudssl_ASN1_T61STRING_it +#define EVP_aes_192_gcm qcloudssl_EVP_aes_192_gcm +#define EVP_PKEY_free qcloudssl_EVP_PKEY_free +#define X509_PKEY_free qcloudssl_X509_PKEY_free +#define ASN1_STRING_copy qcloudssl_ASN1_STRING_copy +#define X509_STORE_CTX_zero qcloudssl_X509_STORE_CTX_zero +#define PEM_write_bio qcloudssl_PEM_write_bio +#define X509_check_private_key qcloudssl_X509_check_private_key +#define SSL_CTX_set_ocsp_response qcloudssl_SSL_CTX_set_ocsp_response +#define PROXY_POLICY_it qcloudssl_PROXY_POLICY_it +#define CRYPTO_cleanup_all_ex_data qcloudssl_CRYPTO_cleanup_all_ex_data +#define EVP_get_digestbyname qcloudssl_EVP_get_digestbyname +#define d2i_ASN1_BOOLEAN qcloudssl_d2i_ASN1_BOOLEAN +#define EVP_AEAD_CTX_seal_scatter qcloudssl_EVP_AEAD_CTX_seal_scatter +#define X509at_get0_data_by_OBJ qcloudssl_X509at_get0_data_by_OBJ +#define X509_ATTRIBUTE_free qcloudssl_X509_ATTRIBUTE_free +#define SSL_set_accept_state qcloudssl_SSL_set_accept_state +#define SSL_CTX_set_strict_cipher_list qcloudssl_SSL_CTX_set_strict_cipher_list +#define CRYPTO_library_init qcloudssl_CRYPTO_library_init +#define SSL_CTX_use_certificate_chain_file qcloudssl_SSL_CTX_use_certificate_chain_file +#define SSL_CTX_set_min_proto_version qcloudssl_SSL_CTX_set_min_proto_version +#define RSA_verify qcloudssl_RSA_verify +#define EC_KEY_set_public_key qcloudssl_EC_KEY_set_public_key +#define ASN1_SEQUENCE_ANY_it qcloudssl_ASN1_SEQUENCE_ANY_it +#define BIO_new_fd qcloudssl_BIO_new_fd +#define X509V3_EXT_REQ_add_nconf qcloudssl_X509V3_EXT_REQ_add_nconf +#define X509_policy_level_node_count qcloudssl_X509_policy_level_node_count +#define EVP_PKEY_get1_EC_KEY qcloudssl_EVP_PKEY_get1_EC_KEY +#define i2d_ASN1_NULL qcloudssl_i2d_ASN1_NULL +#define ASN1_TIME_it qcloudssl_ASN1_TIME_it +#define NAME_CONSTRAINTS_check qcloudssl_NAME_CONSTRAINTS_check +#define CRYPTO_STATIC_MUTEX_unlock_write qcloudssl_CRYPTO_STATIC_MUTEX_unlock_write +#define EVP_PKEY_get0_EC_KEY qcloudssl_EVP_PKEY_get0_EC_KEY +#define BN_add qcloudssl_BN_add +#define SSL_get_peer_certificate qcloudssl_SSL_get_peer_certificate +#define EVP_aead_aes_256_gcm qcloudssl_EVP_aead_aes_256_gcm +#define i2d_X509_NAME qcloudssl_i2d_X509_NAME +#define EVP_MD_CTX_cleanup qcloudssl_EVP_MD_CTX_cleanup +#define OTHERNAME_new qcloudssl_OTHERNAME_new +#define X509_set_ex_data qcloudssl_X509_set_ex_data +#define i2d_DISPLAYTEXT qcloudssl_i2d_DISPLAYTEXT +#define SSL_CTX_set_next_protos_advertised_cb qcloudssl_SSL_CTX_set_next_protos_advertised_cb +#define DTLSv1_get_timeout qcloudssl_DTLSv1_get_timeout +#define EVP_PKEY_verify qcloudssl_EVP_PKEY_verify +#define SSL_CIPHER_get_kx_nid qcloudssl_SSL_CIPHER_get_kx_nid +#define PEM_write_bio_PKCS8_PRIV_KEY_INFO qcloudssl_PEM_write_bio_PKCS8_PRIV_KEY_INFO +#define BIO_get_fd qcloudssl_BIO_get_fd +#define ASN1_STRING_set qcloudssl_ASN1_STRING_set +#define RSAPublicKey_dup qcloudssl_RSAPublicKey_dup +#define X509_NAME_get_index_by_OBJ qcloudssl_X509_NAME_get_index_by_OBJ +#define BIO_set_retry_write qcloudssl_BIO_set_retry_write +#define X509_STORE_CTX_cleanup qcloudssl_X509_STORE_CTX_cleanup +#define policy_cache_set qcloudssl_policy_cache_set +#define EVP_SignInit qcloudssl_EVP_SignInit +#define PROXY_POLICY_new qcloudssl_PROXY_POLICY_new +#define d2i_X509_CRL_INFO qcloudssl_d2i_X509_CRL_INFO +#define SSL_CTX_get_options qcloudssl_SSL_CTX_get_options +#define CBB_add_u16_length_prefixed qcloudssl_CBB_add_u16_length_prefixed +#define X509_NAME_ENTRY_free qcloudssl_X509_NAME_ENTRY_free +#define SSL_get_verify_callback qcloudssl_SSL_get_verify_callback +#define X509_VERIFY_PARAM_set1_ip_asc qcloudssl_X509_VERIFY_PARAM_set1_ip_asc +#define SSL_set_strict_cipher_list qcloudssl_SSL_set_strict_cipher_list +#define CRYPTO_MUTEX_init qcloudssl_CRYPTO_MUTEX_init +#define DTLSv1_method qcloudssl_DTLSv1_method +#define d2i_ASN1_UINTEGER qcloudssl_d2i_ASN1_UINTEGER +#define ERR_put_error qcloudssl_ERR_put_error +#define EVP_aead_null_sha1_tls qcloudssl_EVP_aead_null_sha1_tls +#define X509_set_version qcloudssl_X509_set_version +#define SSL_CTX_get_timeout qcloudssl_SSL_CTX_get_timeout +#define BN_mod_inverse qcloudssl_BN_mod_inverse +#define SSL_CTX_get0_chain_certs qcloudssl_SSL_CTX_get0_chain_certs +#define OBJ_nid2ln qcloudssl_OBJ_nid2ln +#define v2i_GENERAL_NAME qcloudssl_v2i_GENERAL_NAME +#define EVP_aes_128_gcm qcloudssl_EVP_aes_128_gcm +#define ec_group_new qcloudssl_ec_group_new +#define X509_VERIFY_PARAM_add1_host qcloudssl_X509_VERIFY_PARAM_add1_host +#define EC_KEY_marshal_curve_name qcloudssl_EC_KEY_marshal_curve_name +#define PEM_read_bio_PUBKEY qcloudssl_PEM_read_bio_PUBKEY +#define bn_sqr_words qcloudssl_bn_sqr_words +#define PEM_read_bio_X509_REQ qcloudssl_PEM_read_bio_X509_REQ +#define TLS_method qcloudssl_TLS_method +#define CONF_parse_list qcloudssl_CONF_parse_list +#define EVP_MD_CTX_copy_ex qcloudssl_EVP_MD_CTX_copy_ex +#define EVP_AEAD_CTX_open qcloudssl_EVP_AEAD_CTX_open +#define SSL_CIPHER_get_max_version qcloudssl_SSL_CIPHER_get_max_version +#define i2d_ASN1_TYPE qcloudssl_i2d_ASN1_TYPE +#define SSL_CTX_get_tlsext_ticket_keys qcloudssl_SSL_CTX_get_tlsext_ticket_keys +#define EC_KEY_new_method qcloudssl_EC_KEY_new_method +#define asn1_ex_i2c qcloudssl_asn1_ex_i2c +#define i2d_PrivateKey qcloudssl_i2d_PrivateKey +#define NOTICEREF_new qcloudssl_NOTICEREF_new +#define DSA_marshal_parameters qcloudssl_DSA_marshal_parameters +#define ASN1_STRING_length qcloudssl_ASN1_STRING_length +#define PEM_write_DSAPrivateKey qcloudssl_PEM_write_DSAPrivateKey +#define HMAC_CTX_init qcloudssl_HMAC_CTX_init +#define BN_to_ASN1_INTEGER qcloudssl_BN_to_ASN1_INTEGER +#define EVP_AEAD_CTX_open_gather qcloudssl_EVP_AEAD_CTX_open_gather +#define i2d_POLICYQUALINFO qcloudssl_i2d_POLICYQUALINFO +#define BN_is_prime_fasttest_ex qcloudssl_BN_is_prime_fasttest_ex +#define hex_to_string qcloudssl_hex_to_string +#define BIO_f_ssl qcloudssl_BIO_f_ssl +#define PEM_write_bio_PrivateKey qcloudssl_PEM_write_bio_PrivateKey +#define OPENSSL_no_config qcloudssl_OPENSSL_no_config +#define X509_delete_ext qcloudssl_X509_delete_ext +#define AES_wrap_key qcloudssl_AES_wrap_key +#define X509_CRL_dup qcloudssl_X509_CRL_dup +#define i2s_ASN1_OCTET_STRING qcloudssl_i2s_ASN1_OCTET_STRING +#define EC_KEY_check_key qcloudssl_EC_KEY_check_key +#define SSL_CIPHER_is_block_cipher qcloudssl_SSL_CIPHER_is_block_cipher +#define OBJ_dup qcloudssl_OBJ_dup +#define SSL_magic_pending_session_ptr qcloudssl_SSL_magic_pending_session_ptr +#define PEM_write_bio_DSA_PUBKEY qcloudssl_PEM_write_bio_DSA_PUBKEY +#define d2i_PrivateKey qcloudssl_d2i_PrivateKey +#define ASN1_put_eoc qcloudssl_ASN1_put_eoc +#define i2d_PKCS8PrivateKey_nid_fp qcloudssl_i2d_PKCS8PrivateKey_nid_fp +#define ERR_get_next_error_library qcloudssl_ERR_get_next_error_library +#define BN_print_fp qcloudssl_BN_print_fp +#define X509_CRL_METHOD_free qcloudssl_X509_CRL_METHOD_free +#define v3_key_usage qcloudssl_v3_key_usage +#define i2d_PKCS8PrivateKey_fp qcloudssl_i2d_PKCS8PrivateKey_fp +#define X509_cmp qcloudssl_X509_cmp +#define X509_get_ex_new_index qcloudssl_X509_get_ex_new_index +#define SSL_is_server qcloudssl_SSL_is_server +#define EVP_aes_256_gcm qcloudssl_EVP_aes_256_gcm +#define EVP_MD_CTX_size qcloudssl_EVP_MD_CTX_size +#define SSL_CTX_add_client_custom_ext qcloudssl_SSL_CTX_add_client_custom_ext +#define EVP_PKEY_CTX_ctrl qcloudssl_EVP_PKEY_CTX_ctrl +#define DH_parse_parameters qcloudssl_DH_parse_parameters +#define BIO_hexdump qcloudssl_BIO_hexdump +#define d2i_EC_PUBKEY_bio qcloudssl_d2i_EC_PUBKEY_bio +#define CRYPTO_get_lock_name qcloudssl_CRYPTO_get_lock_name +#define SSL_CTX_sess_hits qcloudssl_SSL_CTX_sess_hits +#define i2d_ASN1_OBJECT qcloudssl_i2d_ASN1_OBJECT +#define X509_STORE_new qcloudssl_X509_STORE_new +#define i2d_DSA_PUBKEY_fp qcloudssl_i2d_DSA_PUBKEY_fp +#define i2d_PKCS8PrivateKey_nid_bio qcloudssl_i2d_PKCS8PrivateKey_nid_bio +#define i2d_ASN1_GENERALSTRING qcloudssl_i2d_ASN1_GENERALSTRING +#define ECDSA_sign_setup qcloudssl_ECDSA_sign_setup +#define EVP_PKEY_sign qcloudssl_EVP_PKEY_sign +#define ECDSA_verify qcloudssl_ECDSA_verify +#define X509_PURPOSE_get0_sname qcloudssl_X509_PURPOSE_get0_sname +#define d2i_SSL_SESSION qcloudssl_d2i_SSL_SESSION +#define asn1_get_field_ptr qcloudssl_asn1_get_field_ptr +#define SSL_CTX_set1_tls_channel_id qcloudssl_SSL_CTX_set1_tls_channel_id +#define X509_STORE_CTX_purpose_inherit qcloudssl_X509_STORE_CTX_purpose_inherit +#define BN_is_pow2 qcloudssl_BN_is_pow2 +#define EC_GROUP_set_generator qcloudssl_EC_GROUP_set_generator +#define X509_set_notBefore qcloudssl_X509_set_notBefore +#define ERR_clear_system_error qcloudssl_ERR_clear_system_error +#define EVP_DigestSignFinal qcloudssl_EVP_DigestSignFinal +#define PEM_write_bio_PKCS8PrivateKey_nid qcloudssl_PEM_write_bio_PKCS8PrivateKey_nid +#define d2i_X509_CRL_fp qcloudssl_d2i_X509_CRL_fp +#define SSL_set_cert_cb qcloudssl_SSL_set_cert_cb +#define ERR_func_error_string qcloudssl_ERR_func_error_string +#define RSA_generate_key_ex qcloudssl_RSA_generate_key_ex +#define X509_OBJECT_retrieve_match qcloudssl_X509_OBJECT_retrieve_match +#define ASN1_TIME_set qcloudssl_ASN1_TIME_set +#define RSA_public_decrypt qcloudssl_RSA_public_decrypt +#define BN_to_montgomery qcloudssl_BN_to_montgomery +#define RSA_blinding_on qcloudssl_RSA_blinding_on +#define d2i_ASN1_SET_ANY qcloudssl_d2i_ASN1_SET_ANY +#define BN_mul_word qcloudssl_BN_mul_word +#define PEM_write_X509_CRL qcloudssl_PEM_write_X509_CRL +#define MD5_Update qcloudssl_MD5_Update +#define CBB_add_bytes qcloudssl_CBB_add_bytes +#define SSL_CIPHER_is_aead qcloudssl_SSL_CIPHER_is_aead +#define BIO_new qcloudssl_BIO_new +#define CBB_did_write qcloudssl_CBB_did_write +#define SSL_CTX_get0_param qcloudssl_SSL_CTX_get0_param +#define BUF_strdup qcloudssl_BUF_strdup +#define i2d_SXNETID qcloudssl_i2d_SXNETID +#define X509_get_ext_d2i qcloudssl_X509_get_ext_d2i +#define SSL_SESSION_set_ex_data qcloudssl_SSL_SESSION_set_ex_data +#define SSL_CTX_sess_get_remove_cb qcloudssl_SSL_CTX_sess_get_remove_cb +#define EVP_DecryptFinal_ex qcloudssl_EVP_DecryptFinal_ex +#define ASN1_get_object qcloudssl_ASN1_get_object +#define BN_uadd qcloudssl_BN_uadd +#define AUTHORITY_INFO_ACCESS_free qcloudssl_AUTHORITY_INFO_ACCESS_free +#define BN_is_prime_ex qcloudssl_BN_is_prime_ex +#define X509_CRL_add_ext qcloudssl_X509_CRL_add_ext +#define PEM_write_ECPrivateKey qcloudssl_PEM_write_ECPrivateKey +#define X509_POLICY_NODE_print qcloudssl_X509_POLICY_NODE_print +#define BN_is_odd qcloudssl_BN_is_odd +#define SSL_use_RSAPrivateKey_ASN1 qcloudssl_SSL_use_RSAPrivateKey_ASN1 +#define X509_CRL_INFO_it qcloudssl_X509_CRL_INFO_it +#define BN_bn2bin qcloudssl_BN_bn2bin +#define i2d_ASN1_TIME qcloudssl_i2d_ASN1_TIME +#define CRYPTO_free_ex_data qcloudssl_CRYPTO_free_ex_data +#define X509_reject_clear qcloudssl_X509_reject_clear +#define CRYPTO_refcount_dec_and_test_zero qcloudssl_CRYPTO_refcount_dec_and_test_zero +#define X509V3_EXT_add_alias qcloudssl_X509V3_EXT_add_alias +#define EC_KEY_set_conv_form qcloudssl_EC_KEY_set_conv_form +#define X509V3_EXT_add qcloudssl_X509V3_EXT_add +#define BUF_MEM_grow qcloudssl_BUF_MEM_grow +#define PEM_write_RSAPrivateKey qcloudssl_PEM_write_RSAPrivateKey +#define BUF_strlcat qcloudssl_BUF_strlcat +#define PKCS12_PBE_add qcloudssl_PKCS12_PBE_add +#define v2i_GENERAL_NAME_ex qcloudssl_v2i_GENERAL_NAME_ex +#define kOpenSSLReasonStringData qcloudssl_kOpenSSLReasonStringData +#define METHOD_ref qcloudssl_METHOD_ref +#define SSL_CTX_get_verify_callback qcloudssl_SSL_CTX_get_verify_callback +#define X509_load_cert_file qcloudssl_X509_load_cert_file +#define EVP_PKEY_CTX_free qcloudssl_EVP_PKEY_CTX_free +#define SSL_early_callback_ctx_extension_get qcloudssl_SSL_early_callback_ctx_extension_get +#define X509_REQ_delete_attr qcloudssl_X509_REQ_delete_attr +#define PEM_read_RSA_PUBKEY qcloudssl_PEM_read_RSA_PUBKEY +#define X509_new qcloudssl_X509_new +#define policy_node_cmp_new qcloudssl_policy_node_cmp_new +#define bn_mod_inverse_secret_prime qcloudssl_bn_mod_inverse_secret_prime +#define SSL_set0_chain qcloudssl_SSL_set0_chain +#define RSA_marshal_private_key qcloudssl_RSA_marshal_private_key +#define X509_REVOKED_get_ext_d2i qcloudssl_X509_REVOKED_get_ext_d2i +#define X509_CRL_get_ext_count qcloudssl_X509_CRL_get_ext_count +#define SXNETID_it qcloudssl_SXNETID_it +#define i2d_ASN1_VISIBLESTRING qcloudssl_i2d_ASN1_VISIBLESTRING +#define BN_get_rfc3526_prime_1536 qcloudssl_BN_get_rfc3526_prime_1536 +#define SSL_CTX_set_mode qcloudssl_SSL_CTX_set_mode +#define X509_EXTENSION_it qcloudssl_X509_EXTENSION_it +#define ECDH_compute_key qcloudssl_ECDH_compute_key +#define SSL_set_tlsext_use_srtp qcloudssl_SSL_set_tlsext_use_srtp +#define EVP_des_ecb qcloudssl_EVP_des_ecb +#define RSA_PSS_PARAMS_free qcloudssl_RSA_PSS_PARAMS_free +#define x25519_ge_p3_to_cached qcloudssl_x25519_ge_p3_to_cached +#define CBS_get_optional_asn1_uint64 qcloudssl_CBS_get_optional_asn1_uint64 +#define NCONF_new qcloudssl_NCONF_new +#define MD4_Final qcloudssl_MD4_Final +#define CBB_finish_i2d qcloudssl_CBB_finish_i2d +#define ASN1_GENERALIZEDTIME_adj qcloudssl_ASN1_GENERALIZEDTIME_adj +#define SSL_SESSION_up_ref qcloudssl_SSL_SESSION_up_ref +#define OBJ_cbs2nid qcloudssl_OBJ_cbs2nid +#define X509_REQ_INFO_new qcloudssl_X509_REQ_INFO_new +#define POLICYINFO_new qcloudssl_POLICYINFO_new +#define BN_lshift qcloudssl_BN_lshift +#define SSL_set0_client_CAs qcloudssl_SSL_set0_client_CAs +#define DSA_get_ex_data qcloudssl_DSA_get_ex_data +#define SSL_get_mode qcloudssl_SSL_get_mode +#define BUF_MEM_new qcloudssl_BUF_MEM_new +#define TLSv1_1_client_method qcloudssl_TLSv1_1_client_method +#define X509V3_EXT_add_nconf_sk qcloudssl_X509V3_EXT_add_nconf_sk +#define EVP_MD_size qcloudssl_EVP_MD_size +#define PEM_write_bio_RSA_PUBKEY qcloudssl_PEM_write_bio_RSA_PUBKEY +#define d2i_ASN1_SEQUENCE_ANY qcloudssl_d2i_ASN1_SEQUENCE_ANY +#define ec_GFp_simple_dbl qcloudssl_ec_GFp_simple_dbl +#define EC_GROUP_get_order qcloudssl_EC_GROUP_get_order +#define X509V3_set_ctx qcloudssl_X509V3_set_ctx +#define ASN1_primitive_free qcloudssl_ASN1_primitive_free +#define EVP_CIPHER_iv_length qcloudssl_EVP_CIPHER_iv_length +#define PKCS8_parse_encrypted_private_key qcloudssl_PKCS8_parse_encrypted_private_key +#define RAND_bytes qcloudssl_RAND_bytes +#define PEM_write_bio_X509_REQ_NEW qcloudssl_PEM_write_bio_X509_REQ_NEW +#define X509_OBJECT_idx_by_subject qcloudssl_X509_OBJECT_idx_by_subject +#define X509V3_EXT_print qcloudssl_X509V3_EXT_print +#define d2i_ASN1_INTEGER qcloudssl_d2i_ASN1_INTEGER +#define CRYPTO_get_locking_callback qcloudssl_CRYPTO_get_locking_callback +#define BN_BLINDING_free qcloudssl_BN_BLINDING_free +#define BIO_set_mem_buf qcloudssl_BIO_set_mem_buf +#define ASN1_item_i2d qcloudssl_ASN1_item_i2d +#define i2d_X509_REQ_bio qcloudssl_i2d_X509_REQ_bio +#define EVP_PKEY_CTX_get0_pkey qcloudssl_EVP_PKEY_CTX_get0_pkey +#define SSL_get0_peer_certificates qcloudssl_SSL_get0_peer_certificates +#define X509_REQ_INFO_free qcloudssl_X509_REQ_INFO_free +#define SSL_CTX_sess_connect_good qcloudssl_SSL_CTX_sess_connect_good +#define SSL_generate_key_block qcloudssl_SSL_generate_key_block +#define BIO_should_io_special qcloudssl_BIO_should_io_special +#define SSL_get0_param qcloudssl_SSL_get0_param +#define TLSv1_2_method qcloudssl_TLSv1_2_method +#define ASN1_put_object qcloudssl_ASN1_put_object +#define SSL_CTX_add_session qcloudssl_SSL_CTX_add_session +#define X509_NAME_digest qcloudssl_X509_NAME_digest +#define ec_GFp_simple_group_get_degree qcloudssl_ec_GFp_simple_group_get_degree +#define NETSCAPE_SPKAC_new qcloudssl_NETSCAPE_SPKAC_new +#define DES_ecb_encrypt qcloudssl_DES_ecb_encrypt +#define ASN1_item_ex_free qcloudssl_ASN1_item_ex_free +#define BIO_s_file qcloudssl_BIO_s_file +#define i2a_ASN1_OBJECT qcloudssl_i2a_ASN1_OBJECT +#define BN_dec2bn qcloudssl_BN_dec2bn +#define DSA_SIG_marshal qcloudssl_DSA_SIG_marshal +#define POLICYQUALINFO_free qcloudssl_POLICYQUALINFO_free +#define EVP_sha256 qcloudssl_EVP_sha256 +#define SSL_CTX_set_early_data_enabled qcloudssl_SSL_CTX_set_early_data_enabled +#define SSL_use_PrivateKey_ASN1 qcloudssl_SSL_use_PrivateKey_ASN1 +#define i2d_DSA_PUBKEY qcloudssl_i2d_DSA_PUBKEY +#define DSA_parse_private_key qcloudssl_DSA_parse_private_key +#define SSL_enable_tls_channel_id qcloudssl_SSL_enable_tls_channel_id +#define RAND_status qcloudssl_RAND_status +#define asn1_enc_restore qcloudssl_asn1_enc_restore +#define HMAC qcloudssl_HMAC +#define X509_NAME_add_entry_by_txt qcloudssl_X509_NAME_add_entry_by_txt +#define lh_num_items qcloudssl_lh_num_items +#define CRYPTO_get_dynlock_lock_callback qcloudssl_CRYPTO_get_dynlock_lock_callback +#define x25519_ge_scalarmult_base qcloudssl_x25519_ge_scalarmult_base +#define EC_KEY_is_opaque qcloudssl_EC_KEY_is_opaque +#define NAME_CONSTRAINTS_free qcloudssl_NAME_CONSTRAINTS_free +#define bn_mod_exp_base_2_vartime qcloudssl_bn_mod_exp_base_2_vartime +#define SSL_SESSION_to_bytes qcloudssl_SSL_SESSION_to_bytes +#define BIO_vsnprintf qcloudssl_BIO_vsnprintf +#define ASN1_IA5STRING_new qcloudssl_ASN1_IA5STRING_new +#define d2i_PKCS8PrivateKey_fp qcloudssl_d2i_PKCS8PrivateKey_fp +#define X509_ATTRIBUTE_dup qcloudssl_X509_ATTRIBUTE_dup +#define NETSCAPE_SPKI_verify qcloudssl_NETSCAPE_SPKI_verify +#define X509_NAME_print_ex qcloudssl_X509_NAME_print_ex +#define v3_crld qcloudssl_v3_crld +#define DIST_POINT_free qcloudssl_DIST_POINT_free +#define o2i_ECPublicKey qcloudssl_o2i_ECPublicKey +#define POLICYINFO_it qcloudssl_POLICYINFO_it +#define BIO_should_write qcloudssl_BIO_should_write +#define X509_verify qcloudssl_X509_verify +#define SSL_set_tmp_rsa qcloudssl_SSL_set_tmp_rsa +#define SSL_CTX_use_certificate_file qcloudssl_SSL_CTX_use_certificate_file +#define EVP_get_cipherbyname qcloudssl_EVP_get_cipherbyname +#define EC_POINTs_make_affine qcloudssl_EC_POINTs_make_affine +#define SSL_CTX_set_tlsext_servername_callback qcloudssl_SSL_CTX_set_tlsext_servername_callback +#define EC_KEY_free qcloudssl_EC_KEY_free +#define X509V3_EXT_cleanup qcloudssl_X509V3_EXT_cleanup +#define SSL_version qcloudssl_SSL_version +#define d2i_ASN1_PRINTABLESTRING qcloudssl_d2i_ASN1_PRINTABLESTRING +#define d2i_ASN1_GENERALSTRING qcloudssl_d2i_ASN1_GENERALSTRING +#define EVP_DecodeUpdate qcloudssl_EVP_DecodeUpdate +#define X509_STORE_set_lookup_crls_cb qcloudssl_X509_STORE_set_lookup_crls_cb +#define EVP_aead_aes_256_cbc_sha1_tls_implicit_iv qcloudssl_EVP_aead_aes_256_cbc_sha1_tls_implicit_iv +#define asn1_utctime_to_tm qcloudssl_asn1_utctime_to_tm +#define X509_REVOKED_add_ext qcloudssl_X509_REVOKED_add_ext +#define X509v3_delete_ext qcloudssl_X509v3_delete_ext +#define d2i_DSA_PUBKEY qcloudssl_d2i_DSA_PUBKEY +#define EC_GROUP_free qcloudssl_EC_GROUP_free +#define SSL_SESSION_get_time qcloudssl_SSL_SESSION_get_time +#define SSL_add_client_CA qcloudssl_SSL_add_client_CA +#define SXNET_get_id_ulong qcloudssl_SXNET_get_id_ulong +#define GENERAL_NAME_new qcloudssl_GENERAL_NAME_new +#define EC_POINT_clear_free qcloudssl_EC_POINT_clear_free +#define ASN1_template_d2i qcloudssl_ASN1_template_d2i +#define PKCS8_PRIV_KEY_INFO_it qcloudssl_PKCS8_PRIV_KEY_INFO_it +#define SSL_do_handshake qcloudssl_SSL_do_handshake +#define EVP_PBE_scrypt qcloudssl_EVP_PBE_scrypt +#define RC4 qcloudssl_RC4 +#define ASN1_UTCTIME_cmp_time_t qcloudssl_ASN1_UTCTIME_cmp_time_t +#define DSA_size qcloudssl_DSA_size +#define bn_mul_add_words qcloudssl_bn_mul_add_words +#define SSL_get_pending_cipher qcloudssl_SSL_get_pending_cipher +#define X509_ALGOR_cmp qcloudssl_X509_ALGOR_cmp +#define X509_ALGOR_get0 qcloudssl_X509_ALGOR_get0 +#define AES_unwrap_key qcloudssl_AES_unwrap_key +#define i2d_PUBKEY_bio qcloudssl_i2d_PUBKEY_bio +#define BN_MONT_CTX_new qcloudssl_BN_MONT_CTX_new +#define RSA_private_transform qcloudssl_RSA_private_transform +#define OPENSSL_strnlen qcloudssl_OPENSSL_strnlen +#define CRYPTO_cfb128_1_encrypt qcloudssl_CRYPTO_cfb128_1_encrypt +#define PEM_read_X509_CRL qcloudssl_PEM_read_X509_CRL +#define EC_GROUP_new_curve_GFp qcloudssl_EC_GROUP_new_curve_GFp +#define X509_verify_cert qcloudssl_X509_verify_cert +#define d2i_DIST_POINT qcloudssl_d2i_DIST_POINT +#define SSL_CTX_get0_certificate qcloudssl_SSL_CTX_get0_certificate +#define d2i_RSAPrivateKey qcloudssl_d2i_RSAPrivateKey +#define X509_check_ip_asc qcloudssl_X509_check_ip_asc +#define PEM_write_EC_PUBKEY qcloudssl_PEM_write_EC_PUBKEY +#define PEM_read_bio_RSA_PUBKEY qcloudssl_PEM_read_bio_RSA_PUBKEY +#define i2d_RSAPrivateKey_bio qcloudssl_i2d_RSAPrivateKey_bio +#define EVP_EncryptInit_ex qcloudssl_EVP_EncryptInit_ex +#define x509_digest_sign_algorithm qcloudssl_x509_digest_sign_algorithm +#define BIO_set_fp qcloudssl_BIO_set_fp +#define SSL_CTX_set_options qcloudssl_SSL_CTX_set_options +#define EC_POINT_make_affine qcloudssl_EC_POINT_make_affine +#define i2d_PKCS8_PRIV_KEY_INFO_bio qcloudssl_i2d_PKCS8_PRIV_KEY_INFO_bio +#define X509_ATTRIBUTE_new qcloudssl_X509_ATTRIBUTE_new +#define X509at_add1_attr_by_OBJ qcloudssl_X509at_add1_attr_by_OBJ +#define DSA_check_signature qcloudssl_DSA_check_signature +#define X509_NAME_ENTRY_set_data qcloudssl_X509_NAME_ENTRY_set_data +#define OPENSSL_realloc_clean qcloudssl_OPENSSL_realloc_clean +#define X509_STORE_CTX_set_flags qcloudssl_X509_STORE_CTX_set_flags +#define POLICY_CONSTRAINTS_it qcloudssl_POLICY_CONSTRAINTS_it +#define X509_NAME_ENTRY_new qcloudssl_X509_NAME_ENTRY_new +#define ASN1_d2i_fp qcloudssl_ASN1_d2i_fp +#define X509_email_free qcloudssl_X509_email_free +#define d2i_CRL_DIST_POINTS qcloudssl_d2i_CRL_DIST_POINTS +#define X509_REQ_get_attr_by_NID qcloudssl_X509_REQ_get_attr_by_NID +#define BIO_do_connect qcloudssl_BIO_do_connect +#define EC_POINT_point2cbb qcloudssl_EC_POINT_point2cbb +#define SSL_SESSION_get0_peer qcloudssl_SSL_SESSION_get0_peer +#define X509_ATTRIBUTE_SET_it qcloudssl_X509_ATTRIBUTE_SET_it +#define EC_GROUP_get0_generator qcloudssl_EC_GROUP_get0_generator +#define TLS_client_method qcloudssl_TLS_client_method +#define SSL_get_ticket_age_skew qcloudssl_SSL_get_ticket_age_skew +#define EVP_PKEY_keygen_init qcloudssl_EVP_PKEY_keygen_init +#define DSA_do_verify qcloudssl_DSA_do_verify +#define TLSv1_method qcloudssl_TLSv1_method +#define EVP_DigestSignInit qcloudssl_EVP_DigestSignInit +#define ASN1_OBJECT_create qcloudssl_ASN1_OBJECT_create +#define EC_GFp_nistp224_method qcloudssl_EC_GFp_nistp224_method +#define EVP_DecodedLength qcloudssl_EVP_DecodedLength +#define EC_KEY_get0_public_key qcloudssl_EC_KEY_get0_public_key +#define BN_le2bn qcloudssl_BN_le2bn +#define d2i_POLICYQUALINFO qcloudssl_d2i_POLICYQUALINFO +#define SHA256_Transform qcloudssl_SHA256_Transform +#define SSL_set_verify qcloudssl_SSL_set_verify +#define SSL_get_session qcloudssl_SSL_get_session +#define ASN1_FBOOLEAN_it qcloudssl_ASN1_FBOOLEAN_it +#define EVP_PKEY_set1_EC_KEY qcloudssl_EVP_PKEY_set1_EC_KEY +#define d2i_ASN1_VISIBLESTRING qcloudssl_d2i_ASN1_VISIBLESTRING +#define PEM_ASN1_read_bio qcloudssl_PEM_ASN1_read_bio +#define md5_block_data_order qcloudssl_md5_block_data_order +#define BN_mod_mul qcloudssl_BN_mod_mul +#define d2i_DSA_SIG qcloudssl_d2i_DSA_SIG +#define SSL_set_fd qcloudssl_SSL_set_fd +#define ASN1_PRINTABLE_free qcloudssl_ASN1_PRINTABLE_free +#define X509_policy_tree_get0_level qcloudssl_X509_policy_tree_get0_level +#define ASN1_STRING_free qcloudssl_ASN1_STRING_free +#define X509_REQ_get_attr_count qcloudssl_X509_REQ_get_attr_count +#define SSL_get_version qcloudssl_SSL_get_version +#define SSL_CTX_sess_number qcloudssl_SSL_CTX_sess_number +#define RSA_default_method qcloudssl_RSA_default_method +#define X509_policy_tree_get0_policies qcloudssl_X509_policy_tree_get0_policies +#define BN_lshift1 qcloudssl_BN_lshift1 +#define X509_TRUST_add qcloudssl_X509_TRUST_add +#define X509_CRL_set_meth_data qcloudssl_X509_CRL_set_meth_data +#define TLS_with_buffers_method qcloudssl_TLS_with_buffers_method +#define d2i_OTHERNAME qcloudssl_d2i_OTHERNAME +#define string_to_hex qcloudssl_string_to_hex +#define SSL_CTX_sess_connect qcloudssl_SSL_CTX_sess_connect +#define v3_delta_crl qcloudssl_v3_delta_crl +#define X509_REVOKED_get_ext_by_critical qcloudssl_X509_REVOKED_get_ext_by_critical +#define d2i_DSAPrivateKey_bio qcloudssl_d2i_DSAPrivateKey_bio +#define bn_sub_words qcloudssl_bn_sub_words +#define i2d_RSAPublicKey_bio qcloudssl_i2d_RSAPublicKey_bio +#define EVP_marshal_public_key qcloudssl_EVP_marshal_public_key +#define bssl qcloudssl diff --git a/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/Info.plist b/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/Info.plist new file mode 100644 index 0000000..cc1a45b Binary files /dev/null and b/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/Info.plist differ diff --git a/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/Modules/module.modulemap b/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/Modules/module.modulemap new file mode 100644 index 0000000..228224b --- /dev/null +++ b/src/ios/BoringSSL.xcframework/ios-arm64_armv7/BoringSSL.framework/Modules/module.modulemap @@ -0,0 +1,6 @@ +framework module BoringSSL { + umbrella header "BoringSSL.h" + + export * + module * { export * } +} diff --git a/src/ios/CordovaEventKit.h b/src/ios/CordovaEventKit.h new file mode 100644 index 0000000..f743900 --- /dev/null +++ b/src/ios/CordovaEventKit.h @@ -0,0 +1,22 @@ +// +// CordovaEventKit.h +// shuto-cne +// +// Created by 范大德 on 2022/3/18. +// +#import + +#ifndef CordovaEventKit_h +#define CordovaEventKit_h + + +#endif /* CordovaEventKit_h */ +@interface CordovaEventKit +{} ++ (void)init: (CDVPlugin*)plugin; + ++ (void) fireEvent:(NSString*)event obj:(NSDictionary*) obj; + ++ (void) fireEvent:(NSString*) event msg:(NSString*) msg; + +@end diff --git a/src/ios/CordovaEventKit.m b/src/ios/CordovaEventKit.m new file mode 100644 index 0000000..4e09595 --- /dev/null +++ b/src/ios/CordovaEventKit.m @@ -0,0 +1,71 @@ +// +// CordovaEventKit.m +// 触发Cordova事件 +// +// Created by 范大德 on 2022/3/18. +// +#import + +#import +#import "CordovaEventKit.h" +@interface CordovaEventKit() +{} +@end +@implementation CordovaEventKit + +static CDVPlugin* cdvPlugin; + ++ (void)init: (CDVPlugin*)plugin{ + cdvPlugin = plugin; +} + ++ (void) fireEvent:(NSString*)event obj:(NSDictionary*) obj{ + NSString* jsonData = [self toJSON:obj]; + [CordovaEventKit fireEvent:event msg:jsonData]; +} + ++ (void) fireEvent:(NSString*) event msg:(NSString*) msg{ + NSLog(@"TRTC - CordovaEventKit::fireEvent --- event:%@,msg:%@",event,msg); + + if (cdvPlugin == nil || event == nil || msg == nil) { + NSLog(@"TRTC - CordovaEventKit::fireEvent --- cdvPlugin%@,event:%@,msg:%@",cdvPlugin,event,msg); + return; + } + event = [event stringByReplacingOccurrencesOfString:@"\\" withString:@"_"]; + + NSString* js = [[NSString alloc] initWithFormat:@"window.cordova.plugin.trtc.fireEvent('%@',%@)", event, msg]; + dispatch_async(dispatch_get_main_queue(), ^{ + [cdvPlugin.commandDelegate evalJs:js]; + }); +} + ++(NSString*) toJSON:(NSDictionary*)obj{ + NSError *error = nil; + NSData *jsonData = nil; + NSMutableDictionary *dict = [NSMutableDictionary dictionary]; + [obj enumerateKeysAndObjectsUsingBlock:^(id _Nonnull key, id _Nonnull obj, BOOL * _Nonnull stop) { + NSString *keyString = nil; + NSString *valueString = nil; + if ([key isKindOfClass:[NSString class]]) { + keyString = key; + }else{ + keyString = [NSString stringWithFormat:@"%@",key]; + } + + if ([obj isKindOfClass:[NSString class]]) { + valueString = obj; + }else{ + valueString = [NSString stringWithFormat:@"%@",obj]; + } + + [dict setObject:valueString forKey:keyString]; + }]; + jsonData = [NSJSONSerialization dataWithJSONObject:dict options:NSJSONWritingPrettyPrinted error:&error]; + if ([jsonData length] == 0 || error != nil) { + return nil; + } + NSString *jsonString = [[NSString alloc] initWithData:jsonData encoding:NSUTF8StringEncoding]; + return jsonString; +} + +@end diff --git a/src/ios/Events.h b/src/ios/Events.h new file mode 100644 index 0000000..da5ffd9 --- /dev/null +++ b/src/ios/Events.h @@ -0,0 +1,22 @@ +// +// Events.h +// shuto-cne +// +// Created by 范大德 on 2022/3/17. +// + +#ifndef Events_h +#define Events_h + + +#endif /* Events_h */ + +#import "Listener.h" + +@interface Events +{} ++ (void)fireEvent: (NSString*)event; ++ (void)fireEvent: (NSString*)event extra:(NSDictionary*)extra; ++ (void)addListener: (NSString*)event listener:(Listener*)listener; ++ (void)removeListener: (NSString*)event listener:(Listener*)listener; +@end diff --git a/src/ios/Events.m b/src/ios/Events.m new file mode 100644 index 0000000..577e683 --- /dev/null +++ b/src/ios/Events.m @@ -0,0 +1,62 @@ +// +// Events.m +// 插件中事件流转 +// +// Created by 范大德 on 2022/3/17. +// + +#import +#import "Events.h" +@interface Events() +{} +@end +@implementation Events + +static NSString* PREFIX = @"com.tencent.trtc.event"; +static NSDictionary *events = nil; + ++ (void)fireEvent: (NSString*)event{ + [Events fireEvent: nil]; +} ++ (void)fireEvent: (NSString*)event extra:(NSDictionary*)extra{ + [Events init]; + NSLog(@"TRTC - Events::fireEvent --- event:%@,extra:%@",event,extra); + NSMutableArray* listeners = [self getEventListener:event]; + if(listeners != nil && listeners.count > 0 ){ + for (Listener* listener in listeners) { + NSLog(@"TRTC - Events::fireEvent --- event:%@,listener:%@",event,listener); + [listener on:extra]; + } + } +} ++ (void)addListener: (NSString*)event listener:(Listener*)listener{ + [Events init]; + NSLog(@"TRTC - Events::addListener --- event:%@,listener:%@",event,listener); + NSMutableArray* listeners = [self getEventListener:event]; + if(![listeners containsObject:listener]){ + [listeners addObject:listener]; + } +} ++ (void)removeListener: (NSString*)event listener:(Listener*)listener{ + [Events init]; + NSLog(@"TRTC - Events::removeListener --- event:%@,listener:%@",event,listener); + NSMutableArray* listeners = [self getEventListener:event]; + if([listeners containsObject:listener]){ + [listeners removeObject:listener]; + } +} + ++ (void)init{ + if( events == nil){ + events = [NSDictionary new]; + } +} ++ (NSMutableArray*) getEventListener: (NSString*)event{ + NSString* key = [[NSString alloc] initWithFormat:@"%@%@", PREFIX, event ]; + if([events objectForKey:key] == nil){ + NSMutableArray* listeners = [NSMutableArray array]; + [events setValue:listeners forKey:key]; + } + return [events objectForKey:key]; +} +@end diff --git a/src/ios/FFmpeg.xcframework/Info.plist b/src/ios/FFmpeg.xcframework/Info.plist new file mode 100644 index 0000000..6e2fcd7 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/Info.plist @@ -0,0 +1,26 @@ + + + + + AvailableLibraries + + + LibraryIdentifier + ios-arm64_armv7 + LibraryPath + FFmpeg.framework + SupportedArchitectures + + arm64 + armv7 + + SupportedPlatform + ios + + + CFBundlePackageType + XFWK + XCFrameworkFormatVersion + 1.0 + + diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/FFmpeg b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/FFmpeg new file mode 100644 index 0000000..c6ec992 Binary files /dev/null and b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/FFmpeg differ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/avcodec.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/avcodec.h new file mode 100644 index 0000000..38bf1c0 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/avcodec.h @@ -0,0 +1,6333 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVCODEC_H +#define AVCODEC_AVCODEC_H + +/** + * @file + * @ingroup libavc + * Libavcodec external API header + */ + +#include +#include "libavutil/samplefmt.h" +#include "libavutil/attributes.h" +#include "libavutil/avutil.h" +#include "libavutil/buffer.h" +#include "libavutil/cpu.h" +#include "libavutil/channel_layout.h" +#include "libavutil/dict.h" +#include "libavutil/frame.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" + +#include "version.h" + +/** + * @defgroup libavc libavcodec + * Encoding/Decoding Library + * + * @{ + * + * @defgroup lavc_decoding Decoding + * @{ + * @} + * + * @defgroup lavc_encoding Encoding + * @{ + * @} + * + * @defgroup lavc_codec Codecs + * @{ + * @defgroup lavc_codec_native Native Codecs + * @{ + * @} + * @defgroup lavc_codec_wrappers External library wrappers + * @{ + * @} + * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge + * @{ + * @} + * @} + * @defgroup lavc_internal Internal + * @{ + * @} + * @} + */ + +/** + * @ingroup libavc + * @defgroup lavc_encdec send/receive encoding and decoding API overview + * @{ + * + * The avcodec_send_packet()/avcodec_receive_frame()/avcodec_send_frame()/ + * avcodec_receive_packet() functions provide an encode/decode API, which + * decouples input and output. + * + * The API is very similar for encoding/decoding and audio/video, and works as + * follows: + * - Set up and open the AVCodecContext as usual. + * - Send valid input: + * - For decoding, call avcodec_send_packet() to give the decoder raw + * compressed data in an AVPacket. + * - For encoding, call avcodec_send_frame() to give the encoder an AVFrame + * containing uncompressed audio or video. + * In both cases, it is recommended that AVPackets and AVFrames are + * refcounted, or libavcodec might have to copy the input data. (libavformat + * always returns refcounted AVPackets, and av_frame_get_buffer() allocates + * refcounted AVFrames.) + * - Receive output in a loop. Periodically call one of the avcodec_receive_*() + * functions and process their output: + * - For decoding, call avcodec_receive_frame(). On success, it will return + * an AVFrame containing uncompressed audio or video data. + * - For encoding, call avcodec_receive_packet(). On success, it will return + * an AVPacket with a compressed frame. + * Repeat this call until it returns AVERROR(EAGAIN) or an error. The + * AVERROR(EAGAIN) return value means that new input data is required to + * return new output. In this case, continue with sending input. For each + * input frame/packet, the codec will typically return 1 output frame/packet, + * but it can also be 0 or more than 1. + * + * At the beginning of decoding or encoding, the codec might accept multiple + * input frames/packets without returning a frame, until its internal buffers + * are filled. This situation is handled transparently if you follow the steps + * outlined above. + * + * In theory, sending input can result in EAGAIN - this should happen only if + * not all output was received. You can use this to structure alternative decode + * or encode loops other than the one suggested above. For example, you could + * try sending new input on each iteration, and try to receive output if that + * returns EAGAIN. + * + * End of stream situations. These require "flushing" (aka draining) the codec, + * as the codec might buffer multiple frames or packets internally for + * performance or out of necessity (consider B-frames). + * This is handled as follows: + * - Instead of valid input, send NULL to the avcodec_send_packet() (decoding) + * or avcodec_send_frame() (encoding) functions. This will enter draining + * mode. + * - Call avcodec_receive_frame() (decoding) or avcodec_receive_packet() + * (encoding) in a loop until AVERROR_EOF is returned. The functions will + * not return AVERROR(EAGAIN), unless you forgot to enter draining mode. + * - Before decoding can be resumed again, the codec has to be reset with + * avcodec_flush_buffers(). + * + * Using the API as outlined above is highly recommended. But it is also + * possible to call functions outside of this rigid schema. For example, you can + * call avcodec_send_packet() repeatedly without calling + * avcodec_receive_frame(). In this case, avcodec_send_packet() will succeed + * until the codec's internal buffer has been filled up (which is typically of + * size 1 per output frame, after initial input), and then reject input with + * AVERROR(EAGAIN). Once it starts rejecting input, you have no choice but to + * read at least some output. + * + * Not all codecs will follow a rigid and predictable dataflow; the only + * guarantee is that an AVERROR(EAGAIN) return value on a send/receive call on + * one end implies that a receive/send call on the other end will succeed, or + * at least will not fail with AVERROR(EAGAIN). In general, no codec will + * permit unlimited buffering of input or output. + * + * This API replaces the following legacy functions: + * - avcodec_decode_video2() and avcodec_decode_audio4(): + * Use avcodec_send_packet() to feed input to the decoder, then use + * avcodec_receive_frame() to receive decoded frames after each packet. + * Unlike with the old video decoding API, multiple frames might result from + * a packet. For audio, splitting the input packet into frames by partially + * decoding packets becomes transparent to the API user. You never need to + * feed an AVPacket to the API twice (unless it is rejected with AVERROR(EAGAIN) - then + * no data was read from the packet). + * Additionally, sending a flush/draining packet is required only once. + * - avcodec_encode_video2()/avcodec_encode_audio2(): + * Use avcodec_send_frame() to feed input to the encoder, then use + * avcodec_receive_packet() to receive encoded packets. + * Providing user-allocated buffers for avcodec_receive_packet() is not + * possible. + * - The new API does not handle subtitles yet. + * + * Mixing new and old function calls on the same AVCodecContext is not allowed, + * and will result in undefined behavior. + * + * Some codecs might require using the new API; using the old API will return + * an error when calling it. All codecs support the new API. + * + * A codec is not allowed to return AVERROR(EAGAIN) for both sending and receiving. This + * would be an invalid state, which could put the codec user into an endless + * loop. The API has no concept of time either: it cannot happen that trying to + * do avcodec_send_packet() results in AVERROR(EAGAIN), but a repeated call 1 second + * later accepts the packet (with no other receive/flush API calls involved). + * The API is a strict state machine, and the passage of time is not supposed + * to influence it. Some timing-dependent behavior might still be deemed + * acceptable in certain cases. But it must never result in both send/receive + * returning EAGAIN at the same time at any point. It must also absolutely be + * avoided that the current state is "unstable" and can "flip-flop" between + * the send/receive APIs allowing progress. For example, it's not allowed that + * the codec randomly decides that it actually wants to consume a packet now + * instead of returning a frame, after it just returned AVERROR(EAGAIN) on an + * avcodec_send_packet() call. + * @} + */ + +/** + * @defgroup lavc_core Core functions/structures. + * @ingroup libavc + * + * Basic definitions, functions for querying libavcodec capabilities, + * allocating core structures, etc. + * @{ + */ + + +/** + * Identify the syntax and semantics of the bitstream. + * The principle is roughly: + * Two decoders with the same ID can decode the same streams. + * Two encoders with the same ID can encode compatible streams. + * There may be slight deviations from the principle due to implementation + * details. + * + * If you add a codec ID to this list, add it so that + * 1. no value of an existing codec ID changes (that would break ABI), + * 2. it is as close as possible to similar codecs + * + * After adding new codec IDs, do not forget to add an entry to the codec + * descriptor list and bump libavcodec minor version. + */ +enum AVCodecID { + AV_CODEC_ID_NONE, + + /* video codecs */ + AV_CODEC_ID_MPEG1VIDEO, + AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding +#if FF_API_XVMC + AV_CODEC_ID_MPEG2VIDEO_XVMC, +#endif /* FF_API_XVMC */ + AV_CODEC_ID_H261, + AV_CODEC_ID_H263, + AV_CODEC_ID_RV10, + AV_CODEC_ID_RV20, + AV_CODEC_ID_MJPEG, + AV_CODEC_ID_MJPEGB, + AV_CODEC_ID_LJPEG, + AV_CODEC_ID_SP5X, + AV_CODEC_ID_JPEGLS, + AV_CODEC_ID_MPEG4, + AV_CODEC_ID_RAWVIDEO, + AV_CODEC_ID_MSMPEG4V1, + AV_CODEC_ID_MSMPEG4V2, + AV_CODEC_ID_MSMPEG4V3, + AV_CODEC_ID_WMV1, + AV_CODEC_ID_WMV2, + AV_CODEC_ID_H263P, + AV_CODEC_ID_H263I, + AV_CODEC_ID_FLV1, + AV_CODEC_ID_SVQ1, + AV_CODEC_ID_SVQ3, + AV_CODEC_ID_DVVIDEO, + AV_CODEC_ID_HUFFYUV, + AV_CODEC_ID_CYUV, + AV_CODEC_ID_H264, + AV_CODEC_ID_INDEO3, + AV_CODEC_ID_VP3, + AV_CODEC_ID_THEORA, + AV_CODEC_ID_ASV1, + AV_CODEC_ID_ASV2, + AV_CODEC_ID_FFV1, + AV_CODEC_ID_4XM, + AV_CODEC_ID_VCR1, + AV_CODEC_ID_CLJR, + AV_CODEC_ID_MDEC, + AV_CODEC_ID_ROQ, + AV_CODEC_ID_INTERPLAY_VIDEO, + AV_CODEC_ID_XAN_WC3, + AV_CODEC_ID_XAN_WC4, + AV_CODEC_ID_RPZA, + AV_CODEC_ID_CINEPAK, + AV_CODEC_ID_WS_VQA, + AV_CODEC_ID_MSRLE, + AV_CODEC_ID_MSVIDEO1, + AV_CODEC_ID_IDCIN, + AV_CODEC_ID_8BPS, + AV_CODEC_ID_SMC, + AV_CODEC_ID_FLIC, + AV_CODEC_ID_TRUEMOTION1, + AV_CODEC_ID_VMDVIDEO, + AV_CODEC_ID_MSZH, + AV_CODEC_ID_ZLIB, + AV_CODEC_ID_QTRLE, + AV_CODEC_ID_TSCC, + AV_CODEC_ID_ULTI, + AV_CODEC_ID_QDRAW, + AV_CODEC_ID_VIXL, + AV_CODEC_ID_QPEG, + AV_CODEC_ID_PNG, + AV_CODEC_ID_PPM, + AV_CODEC_ID_PBM, + AV_CODEC_ID_PGM, + AV_CODEC_ID_PGMYUV, + AV_CODEC_ID_PAM, + AV_CODEC_ID_FFVHUFF, + AV_CODEC_ID_RV30, + AV_CODEC_ID_RV40, + AV_CODEC_ID_VC1, + AV_CODEC_ID_WMV3, + AV_CODEC_ID_LOCO, + AV_CODEC_ID_WNV1, + AV_CODEC_ID_AASC, + AV_CODEC_ID_INDEO2, + AV_CODEC_ID_FRAPS, + AV_CODEC_ID_TRUEMOTION2, + AV_CODEC_ID_BMP, + AV_CODEC_ID_CSCD, + AV_CODEC_ID_MMVIDEO, + AV_CODEC_ID_ZMBV, + AV_CODEC_ID_AVS, + AV_CODEC_ID_SMACKVIDEO, + AV_CODEC_ID_NUV, + AV_CODEC_ID_KMVC, + AV_CODEC_ID_FLASHSV, + AV_CODEC_ID_CAVS, + AV_CODEC_ID_JPEG2000, + AV_CODEC_ID_VMNC, + AV_CODEC_ID_VP5, + AV_CODEC_ID_VP6, + AV_CODEC_ID_VP6F, + AV_CODEC_ID_TARGA, + AV_CODEC_ID_DSICINVIDEO, + AV_CODEC_ID_TIERTEXSEQVIDEO, + AV_CODEC_ID_TIFF, + AV_CODEC_ID_GIF, + AV_CODEC_ID_DXA, + AV_CODEC_ID_DNXHD, + AV_CODEC_ID_THP, + AV_CODEC_ID_SGI, + AV_CODEC_ID_C93, + AV_CODEC_ID_BETHSOFTVID, + AV_CODEC_ID_PTX, + AV_CODEC_ID_TXD, + AV_CODEC_ID_VP6A, + AV_CODEC_ID_AMV, + AV_CODEC_ID_VB, + AV_CODEC_ID_PCX, + AV_CODEC_ID_SUNRAST, + AV_CODEC_ID_INDEO4, + AV_CODEC_ID_INDEO5, + AV_CODEC_ID_MIMIC, + AV_CODEC_ID_RL2, + AV_CODEC_ID_ESCAPE124, + AV_CODEC_ID_DIRAC, + AV_CODEC_ID_BFI, + AV_CODEC_ID_CMV, + AV_CODEC_ID_MOTIONPIXELS, + AV_CODEC_ID_TGV, + AV_CODEC_ID_TGQ, + AV_CODEC_ID_TQI, + AV_CODEC_ID_AURA, + AV_CODEC_ID_AURA2, + AV_CODEC_ID_V210X, + AV_CODEC_ID_TMV, + AV_CODEC_ID_V210, + AV_CODEC_ID_DPX, + AV_CODEC_ID_MAD, + AV_CODEC_ID_FRWU, + AV_CODEC_ID_FLASHSV2, + AV_CODEC_ID_CDGRAPHICS, + AV_CODEC_ID_R210, + AV_CODEC_ID_ANM, + AV_CODEC_ID_BINKVIDEO, + AV_CODEC_ID_IFF_ILBM, +#define AV_CODEC_ID_IFF_BYTERUN1 AV_CODEC_ID_IFF_ILBM + AV_CODEC_ID_KGV1, + AV_CODEC_ID_YOP, + AV_CODEC_ID_VP8, + AV_CODEC_ID_PICTOR, + AV_CODEC_ID_ANSI, + AV_CODEC_ID_A64_MULTI, + AV_CODEC_ID_A64_MULTI5, + AV_CODEC_ID_R10K, + AV_CODEC_ID_MXPEG, + AV_CODEC_ID_LAGARITH, + AV_CODEC_ID_PRORES, + AV_CODEC_ID_JV, + AV_CODEC_ID_DFA, + AV_CODEC_ID_WMV3IMAGE, + AV_CODEC_ID_VC1IMAGE, + AV_CODEC_ID_UTVIDEO, + AV_CODEC_ID_BMV_VIDEO, + AV_CODEC_ID_VBLE, + AV_CODEC_ID_DXTORY, + AV_CODEC_ID_V410, + AV_CODEC_ID_XWD, + AV_CODEC_ID_CDXL, + AV_CODEC_ID_XBM, + AV_CODEC_ID_ZEROCODEC, + AV_CODEC_ID_MSS1, + AV_CODEC_ID_MSA1, + AV_CODEC_ID_TSCC2, + AV_CODEC_ID_MTS2, + AV_CODEC_ID_CLLC, + AV_CODEC_ID_MSS2, + AV_CODEC_ID_VP9, + AV_CODEC_ID_AIC, + AV_CODEC_ID_ESCAPE130, + AV_CODEC_ID_G2M, + AV_CODEC_ID_WEBP, + AV_CODEC_ID_HNM4_VIDEO, + AV_CODEC_ID_HEVC, +#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC + AV_CODEC_ID_FIC, + AV_CODEC_ID_ALIAS_PIX, + AV_CODEC_ID_BRENDER_PIX, + AV_CODEC_ID_PAF_VIDEO, + AV_CODEC_ID_EXR, + AV_CODEC_ID_VP7, + AV_CODEC_ID_SANM, + AV_CODEC_ID_SGIRLE, + AV_CODEC_ID_MVC1, + AV_CODEC_ID_MVC2, + AV_CODEC_ID_HQX, + AV_CODEC_ID_TDSC, + AV_CODEC_ID_HQ_HQA, + AV_CODEC_ID_HAP, + AV_CODEC_ID_DDS, + AV_CODEC_ID_DXV, + AV_CODEC_ID_SCREENPRESSO, + AV_CODEC_ID_RSCC, + + AV_CODEC_ID_Y41P = 0x8000, + AV_CODEC_ID_AVRP, + AV_CODEC_ID_012V, + AV_CODEC_ID_AVUI, + AV_CODEC_ID_AYUV, + AV_CODEC_ID_TARGA_Y216, + AV_CODEC_ID_V308, + AV_CODEC_ID_V408, + AV_CODEC_ID_YUV4, + AV_CODEC_ID_AVRN, + AV_CODEC_ID_CPIA, + AV_CODEC_ID_XFACE, + AV_CODEC_ID_SNOW, + AV_CODEC_ID_SMVJPEG, + AV_CODEC_ID_APNG, + AV_CODEC_ID_DAALA, + AV_CODEC_ID_CFHD, + AV_CODEC_ID_TRUEMOTION2RT, + AV_CODEC_ID_M101, + AV_CODEC_ID_MAGICYUV, + AV_CODEC_ID_SHEERVIDEO, + AV_CODEC_ID_YLC, + AV_CODEC_ID_PSD, + AV_CODEC_ID_PIXLET, + AV_CODEC_ID_SPEEDHQ, + AV_CODEC_ID_FMVC, + AV_CODEC_ID_SCPR, + AV_CODEC_ID_CLEARVIDEO, + AV_CODEC_ID_XPM, + AV_CODEC_ID_AV1, + + /* various PCM "codecs" */ + AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs + AV_CODEC_ID_PCM_S16LE = 0x10000, + AV_CODEC_ID_PCM_S16BE, + AV_CODEC_ID_PCM_U16LE, + AV_CODEC_ID_PCM_U16BE, + AV_CODEC_ID_PCM_S8, + AV_CODEC_ID_PCM_U8, + AV_CODEC_ID_PCM_MULAW, + AV_CODEC_ID_PCM_ALAW, + AV_CODEC_ID_PCM_S32LE, + AV_CODEC_ID_PCM_S32BE, + AV_CODEC_ID_PCM_U32LE, + AV_CODEC_ID_PCM_U32BE, + AV_CODEC_ID_PCM_S24LE, + AV_CODEC_ID_PCM_S24BE, + AV_CODEC_ID_PCM_U24LE, + AV_CODEC_ID_PCM_U24BE, + AV_CODEC_ID_PCM_S24DAUD, + AV_CODEC_ID_PCM_ZORK, + AV_CODEC_ID_PCM_S16LE_PLANAR, + AV_CODEC_ID_PCM_DVD, + AV_CODEC_ID_PCM_F32BE, + AV_CODEC_ID_PCM_F32LE, + AV_CODEC_ID_PCM_F64BE, + AV_CODEC_ID_PCM_F64LE, + AV_CODEC_ID_PCM_BLURAY, + AV_CODEC_ID_PCM_LXF, + AV_CODEC_ID_S302M, + AV_CODEC_ID_PCM_S8_PLANAR, + AV_CODEC_ID_PCM_S24LE_PLANAR, + AV_CODEC_ID_PCM_S32LE_PLANAR, + AV_CODEC_ID_PCM_S16BE_PLANAR, + + AV_CODEC_ID_PCM_S64LE = 0x10800, + AV_CODEC_ID_PCM_S64BE, + AV_CODEC_ID_PCM_F16LE, + AV_CODEC_ID_PCM_F24LE, + + /* various ADPCM codecs */ + AV_CODEC_ID_ADPCM_IMA_QT = 0x11000, + AV_CODEC_ID_ADPCM_IMA_WAV, + AV_CODEC_ID_ADPCM_IMA_DK3, + AV_CODEC_ID_ADPCM_IMA_DK4, + AV_CODEC_ID_ADPCM_IMA_WS, + AV_CODEC_ID_ADPCM_IMA_SMJPEG, + AV_CODEC_ID_ADPCM_MS, + AV_CODEC_ID_ADPCM_4XM, + AV_CODEC_ID_ADPCM_XA, + AV_CODEC_ID_ADPCM_ADX, + AV_CODEC_ID_ADPCM_EA, + AV_CODEC_ID_ADPCM_G726, + AV_CODEC_ID_ADPCM_CT, + AV_CODEC_ID_ADPCM_SWF, + AV_CODEC_ID_ADPCM_YAMAHA, + AV_CODEC_ID_ADPCM_SBPRO_4, + AV_CODEC_ID_ADPCM_SBPRO_3, + AV_CODEC_ID_ADPCM_SBPRO_2, + AV_CODEC_ID_ADPCM_THP, + AV_CODEC_ID_ADPCM_IMA_AMV, + AV_CODEC_ID_ADPCM_EA_R1, + AV_CODEC_ID_ADPCM_EA_R3, + AV_CODEC_ID_ADPCM_EA_R2, + AV_CODEC_ID_ADPCM_IMA_EA_SEAD, + AV_CODEC_ID_ADPCM_IMA_EA_EACS, + AV_CODEC_ID_ADPCM_EA_XAS, + AV_CODEC_ID_ADPCM_EA_MAXIS_XA, + AV_CODEC_ID_ADPCM_IMA_ISS, + AV_CODEC_ID_ADPCM_G722, + AV_CODEC_ID_ADPCM_IMA_APC, + AV_CODEC_ID_ADPCM_VIMA, +#if FF_API_VIMA_DECODER + AV_CODEC_ID_VIMA = AV_CODEC_ID_ADPCM_VIMA, +#endif + + AV_CODEC_ID_ADPCM_AFC = 0x11800, + AV_CODEC_ID_ADPCM_IMA_OKI, + AV_CODEC_ID_ADPCM_DTK, + AV_CODEC_ID_ADPCM_IMA_RAD, + AV_CODEC_ID_ADPCM_G726LE, + AV_CODEC_ID_ADPCM_THP_LE, + AV_CODEC_ID_ADPCM_PSX, + AV_CODEC_ID_ADPCM_AICA, + AV_CODEC_ID_ADPCM_IMA_DAT4, + AV_CODEC_ID_ADPCM_MTAF, + + /* AMR */ + AV_CODEC_ID_AMR_NB = 0x12000, + AV_CODEC_ID_AMR_WB, + + /* RealAudio codecs*/ + AV_CODEC_ID_RA_144 = 0x13000, + AV_CODEC_ID_RA_288, + + /* various DPCM codecs */ + AV_CODEC_ID_ROQ_DPCM = 0x14000, + AV_CODEC_ID_INTERPLAY_DPCM, + AV_CODEC_ID_XAN_DPCM, + AV_CODEC_ID_SOL_DPCM, + + AV_CODEC_ID_SDX2_DPCM = 0x14800, + + /* audio codecs */ + AV_CODEC_ID_MP2 = 0x15000, + AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 + AV_CODEC_ID_AAC, + AV_CODEC_ID_AC3, + AV_CODEC_ID_DTS, + AV_CODEC_ID_VORBIS, + AV_CODEC_ID_DVAUDIO, + AV_CODEC_ID_WMAV1, + AV_CODEC_ID_WMAV2, + AV_CODEC_ID_MACE3, + AV_CODEC_ID_MACE6, + AV_CODEC_ID_VMDAUDIO, + AV_CODEC_ID_FLAC, + AV_CODEC_ID_MP3ADU, + AV_CODEC_ID_MP3ON4, + AV_CODEC_ID_SHORTEN, + AV_CODEC_ID_ALAC, + AV_CODEC_ID_WESTWOOD_SND1, + AV_CODEC_ID_GSM, ///< as in Berlin toast format + AV_CODEC_ID_QDM2, + AV_CODEC_ID_COOK, + AV_CODEC_ID_TRUESPEECH, + AV_CODEC_ID_TTA, + AV_CODEC_ID_SMACKAUDIO, + AV_CODEC_ID_QCELP, + AV_CODEC_ID_WAVPACK, + AV_CODEC_ID_DSICINAUDIO, + AV_CODEC_ID_IMC, + AV_CODEC_ID_MUSEPACK7, + AV_CODEC_ID_MLP, + AV_CODEC_ID_GSM_MS, /* as found in WAV */ + AV_CODEC_ID_ATRAC3, +#if FF_API_VOXWARE + AV_CODEC_ID_VOXWARE, +#endif + AV_CODEC_ID_APE, + AV_CODEC_ID_NELLYMOSER, + AV_CODEC_ID_MUSEPACK8, + AV_CODEC_ID_SPEEX, + AV_CODEC_ID_WMAVOICE, + AV_CODEC_ID_WMAPRO, + AV_CODEC_ID_WMALOSSLESS, + AV_CODEC_ID_ATRAC3P, + AV_CODEC_ID_EAC3, + AV_CODEC_ID_SIPR, + AV_CODEC_ID_MP1, + AV_CODEC_ID_TWINVQ, + AV_CODEC_ID_TRUEHD, + AV_CODEC_ID_MP4ALS, + AV_CODEC_ID_ATRAC1, + AV_CODEC_ID_BINKAUDIO_RDFT, + AV_CODEC_ID_BINKAUDIO_DCT, + AV_CODEC_ID_AAC_LATM, + AV_CODEC_ID_QDMC, + AV_CODEC_ID_CELT, + AV_CODEC_ID_G723_1, + AV_CODEC_ID_G729, + AV_CODEC_ID_8SVX_EXP, + AV_CODEC_ID_8SVX_FIB, + AV_CODEC_ID_BMV_AUDIO, + AV_CODEC_ID_RALF, + AV_CODEC_ID_IAC, + AV_CODEC_ID_ILBC, + AV_CODEC_ID_OPUS, + AV_CODEC_ID_COMFORT_NOISE, + AV_CODEC_ID_TAK, + AV_CODEC_ID_METASOUND, + AV_CODEC_ID_PAF_AUDIO, + AV_CODEC_ID_ON2AVC, + AV_CODEC_ID_DSS_SP, + + AV_CODEC_ID_FFWAVESYNTH = 0x15800, + AV_CODEC_ID_SONIC, + AV_CODEC_ID_SONIC_LS, + AV_CODEC_ID_EVRC, + AV_CODEC_ID_SMV, + AV_CODEC_ID_DSD_LSBF, + AV_CODEC_ID_DSD_MSBF, + AV_CODEC_ID_DSD_LSBF_PLANAR, + AV_CODEC_ID_DSD_MSBF_PLANAR, + AV_CODEC_ID_4GV, + AV_CODEC_ID_INTERPLAY_ACM, + AV_CODEC_ID_XMA1, + AV_CODEC_ID_XMA2, + AV_CODEC_ID_DST, + AV_CODEC_ID_ATRAC3AL, + AV_CODEC_ID_ATRAC3PAL, + + /* subtitle codecs */ + AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. + AV_CODEC_ID_DVD_SUBTITLE = 0x17000, + AV_CODEC_ID_DVB_SUBTITLE, + AV_CODEC_ID_TEXT, ///< raw UTF-8 text + AV_CODEC_ID_XSUB, + AV_CODEC_ID_SSA, + AV_CODEC_ID_MOV_TEXT, + AV_CODEC_ID_HDMV_PGS_SUBTITLE, + AV_CODEC_ID_DVB_TELETEXT, + AV_CODEC_ID_SRT, + + AV_CODEC_ID_MICRODVD = 0x17800, + AV_CODEC_ID_EIA_608, + AV_CODEC_ID_JACOSUB, + AV_CODEC_ID_SAMI, + AV_CODEC_ID_REALTEXT, + AV_CODEC_ID_STL, + AV_CODEC_ID_SUBVIEWER1, + AV_CODEC_ID_SUBVIEWER, + AV_CODEC_ID_SUBRIP, + AV_CODEC_ID_WEBVTT, + AV_CODEC_ID_MPL2, + AV_CODEC_ID_VPLAYER, + AV_CODEC_ID_PJS, + AV_CODEC_ID_ASS, + AV_CODEC_ID_HDMV_TEXT_SUBTITLE, + + /* other specific kind of codecs (generally used for attachments) */ + AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. + AV_CODEC_ID_TTF = 0x18000, + + AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program stream. + AV_CODEC_ID_BINTEXT = 0x18800, + AV_CODEC_ID_XBIN, + AV_CODEC_ID_IDF, + AV_CODEC_ID_OTF, + AV_CODEC_ID_SMPTE_KLV, + AV_CODEC_ID_DVD_NAV, + AV_CODEC_ID_TIMED_ID3, + AV_CODEC_ID_BIN_DATA, + + + AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it + + AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS + * stream (only used by libavformat) */ + AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems + * stream (only used by libavformat) */ + AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. + AV_CODEC_ID_WRAPPED_AVFRAME = 0x21001, ///< Passthrough codec, AVFrames wrapped in AVPacket +}; + +/** + * This struct describes the properties of a single codec described by an + * AVCodecID. + * @see avcodec_descriptor_get() + */ +typedef struct AVCodecDescriptor { + enum AVCodecID id; + enum AVMediaType type; + /** + * Name of the codec described by this descriptor. It is non-empty and + * unique for each codec descriptor. It should contain alphanumeric + * characters and '_' only. + */ + const char *name; + /** + * A more descriptive name for this codec. May be NULL. + */ + const char *long_name; + /** + * Codec properties, a combination of AV_CODEC_PROP_* flags. + */ + int props; + /** + * MIME type(s) associated with the codec. + * May be NULL; if not, a NULL-terminated array of MIME types. + * The first item is always non-NULL and is the preferred MIME type. + */ + const char *const *mime_types; + /** + * If non-NULL, an array of profiles recognized for this codec. + * Terminated with FF_PROFILE_UNKNOWN. + */ + const struct AVProfile *profiles; +} AVCodecDescriptor; + +/** + * Codec uses only intra compression. + * Video codecs only. + */ +#define AV_CODEC_PROP_INTRA_ONLY (1 << 0) +/** + * Codec supports lossy compression. Audio and video codecs only. + * @note a codec may support both lossy and lossless + * compression modes + */ +#define AV_CODEC_PROP_LOSSY (1 << 1) +/** + * Codec supports lossless compression. Audio and video codecs only. + */ +#define AV_CODEC_PROP_LOSSLESS (1 << 2) +/** + * Codec supports frame reordering. That is, the coded order (the order in which + * the encoded packets are output by the encoders / stored / input to the + * decoders) may be different from the presentation order of the corresponding + * frames. + * + * For codecs that do not have this property set, PTS and DTS should always be + * equal. + */ +#define AV_CODEC_PROP_REORDER (1 << 3) +/** + * Subtitle codec is bitmap based + * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field. + */ +#define AV_CODEC_PROP_BITMAP_SUB (1 << 16) +/** + * Subtitle codec is text based. + * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field. + */ +#define AV_CODEC_PROP_TEXT_SUB (1 << 17) + +/** + * @ingroup lavc_decoding + * Required number of additionally allocated bytes at the end of the input bitstream for decoding. + * This is mainly needed because some optimized bitstream readers read + * 32 or 64 bit at once and could read over the end.
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged + * MPEG bitstreams could cause overread and segfault. + */ +#define AV_INPUT_BUFFER_PADDING_SIZE 32 + +/** + * @ingroup lavc_encoding + * minimum encoding buffer size + * Used to avoid some checks during header writing. + */ +#define AV_INPUT_BUFFER_MIN_SIZE 16384 + +#if FF_API_WITHOUT_PREFIX +/** + * @deprecated use AV_INPUT_BUFFER_PADDING_SIZE instead + */ +#define FF_INPUT_BUFFER_PADDING_SIZE 32 + +/** + * @deprecated use AV_INPUT_BUFFER_MIN_SIZE instead + */ +#define FF_MIN_BUFFER_SIZE 16384 +#endif /* FF_API_WITHOUT_PREFIX */ + +/** + * @ingroup lavc_encoding + * motion estimation type. + * @deprecated use codec private option instead + */ +#if FF_API_MOTION_EST +enum Motion_Est_ID { + ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed + ME_FULL, + ME_LOG, + ME_PHODS, + ME_EPZS, ///< enhanced predictive zonal search + ME_X1, ///< reserved for experiments + ME_HEX, ///< hexagon based search + ME_UMH, ///< uneven multi-hexagon search + ME_TESA, ///< transformed exhaustive search algorithm + ME_ITER=50, ///< iterative search +}; +#endif + +/** + * @ingroup lavc_decoding + */ +enum AVDiscard{ + /* We leave some space between them for extensions (drop some + * keyframes for intra-only or drop just some bidir frames). */ + AVDISCARD_NONE =-16, ///< discard nothing + AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi + AVDISCARD_NONREF = 8, ///< discard all non reference + AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames + AVDISCARD_NONINTRA= 24, ///< discard all non intra frames + AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes + AVDISCARD_ALL = 48, ///< discard all +}; + +enum AVAudioServiceType { + AV_AUDIO_SERVICE_TYPE_MAIN = 0, + AV_AUDIO_SERVICE_TYPE_EFFECTS = 1, + AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2, + AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3, + AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4, + AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5, + AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6, + AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7, + AV_AUDIO_SERVICE_TYPE_KARAOKE = 8, + AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI +}; + +/** + * @ingroup lavc_encoding + */ +typedef struct RcOverride{ + int start_frame; + int end_frame; + int qscale; // If this is 0 then quality_factor will be used instead. + float quality_factor; +} RcOverride; + +#if FF_API_MAX_BFRAMES +/** + * @deprecated there is no libavcodec-wide limit on the number of B-frames + */ +#define FF_MAX_B_FRAMES 16 +#endif + +/* encoding support + These flags can be passed in AVCodecContext.flags before initialization. + Note: Not everything is supported yet. +*/ + +/** + * Allow decoders to produce frames with data planes that are not aligned + * to CPU requirements (e.g. due to cropping). + */ +#define AV_CODEC_FLAG_UNALIGNED (1 << 0) +/** + * Use fixed qscale. + */ +#define AV_CODEC_FLAG_QSCALE (1 << 1) +/** + * 4 MV per MB allowed / advanced prediction for H.263. + */ +#define AV_CODEC_FLAG_4MV (1 << 2) +/** + * Output even those frames that might be corrupted. + */ +#define AV_CODEC_FLAG_OUTPUT_CORRUPT (1 << 3) +/** + * Use qpel MC. + */ +#define AV_CODEC_FLAG_QPEL (1 << 4) +/** + * Use internal 2pass ratecontrol in first pass mode. + */ +#define AV_CODEC_FLAG_PASS1 (1 << 9) +/** + * Use internal 2pass ratecontrol in second pass mode. + */ +#define AV_CODEC_FLAG_PASS2 (1 << 10) +/** + * loop filter. + */ +#define AV_CODEC_FLAG_LOOP_FILTER (1 << 11) +/** + * Only decode/encode grayscale. + */ +#define AV_CODEC_FLAG_GRAY (1 << 13) +/** + * error[?] variables will be set during encoding. + */ +#define AV_CODEC_FLAG_PSNR (1 << 15) +/** + * Input bitstream might be truncated at a random location + * instead of only at frame boundaries. + */ +#define AV_CODEC_FLAG_TRUNCATED (1 << 16) +/** + * Use interlaced DCT. + */ +#define AV_CODEC_FLAG_INTERLACED_DCT (1 << 18) +/** + * Force low delay. + */ +#define AV_CODEC_FLAG_LOW_DELAY (1 << 19) +/** + * Place global headers in extradata instead of every keyframe. + */ +#define AV_CODEC_FLAG_GLOBAL_HEADER (1 << 22) +/** + * Use only bitexact stuff (except (I)DCT). + */ +#define AV_CODEC_FLAG_BITEXACT (1 << 23) +/* Fx : Flag for H.263+ extra options */ +/** + * H.263 advanced intra coding / MPEG-4 AC prediction + */ +#define AV_CODEC_FLAG_AC_PRED (1 << 24) +/** + * interlaced motion estimation + */ +#define AV_CODEC_FLAG_INTERLACED_ME (1 << 29) +#define AV_CODEC_FLAG_CLOSED_GOP (1U << 31) + +/** + * Allow non spec compliant speedup tricks. + */ +#define AV_CODEC_FLAG2_FAST (1 << 0) +/** + * Skip bitstream encoding. + */ +#define AV_CODEC_FLAG2_NO_OUTPUT (1 << 2) +/** + * Place global headers at every keyframe instead of in extradata. + */ +#define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3) + +/** + * timecode is in drop frame format. DEPRECATED!!!! + */ +#define AV_CODEC_FLAG2_DROP_FRAME_TIMECODE (1 << 13) + +/** + * Input bitstream might be truncated at a packet boundaries + * instead of only at frame boundaries. + */ +#define AV_CODEC_FLAG2_CHUNKS (1 << 15) +/** + * Discard cropping information from SPS. + */ +#define AV_CODEC_FLAG2_IGNORE_CROP (1 << 16) + +/** + * Show all frames before the first keyframe + */ +#define AV_CODEC_FLAG2_SHOW_ALL (1 << 22) +/** + * Export motion vectors through frame side data + */ +#define AV_CODEC_FLAG2_EXPORT_MVS (1 << 28) +/** + * Do not skip samples and export skip information as frame side data + */ +#define AV_CODEC_FLAG2_SKIP_MANUAL (1 << 29) +/** + * Do not reset ASS ReadOrder field on flush (subtitles decoding) + */ +#define AV_CODEC_FLAG2_RO_FLUSH_NOOP (1 << 30) + +/* Unsupported options : + * Syntax Arithmetic coding (SAC) + * Reference Picture Selection + * Independent Segment Decoding */ +/* /Fx */ +/* codec capabilities */ + +/** + * Decoder can use draw_horiz_band callback. + */ +#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0) +/** + * Codec uses get_buffer() for allocating buffers and supports custom allocators. + * If not set, it might not use get_buffer() at all or use operations that + * assume the buffer was allocated by avcodec_default_get_buffer. + */ +#define AV_CODEC_CAP_DR1 (1 << 1) +#define AV_CODEC_CAP_TRUNCATED (1 << 3) +/** + * Encoder or decoder requires flushing with NULL input at the end in order to + * give the complete and correct output. + * + * NOTE: If this flag is not set, the codec is guaranteed to never be fed with + * with NULL data. The user can still send NULL data to the public encode + * or decode function, but libavcodec will not pass it along to the codec + * unless this flag is set. + * + * Decoders: + * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to get the delayed data until the decoder no longer + * returns frames. + * + * Encoders: + * The encoder needs to be fed with NULL data at the end of encoding until the + * encoder no longer returns data. + * + * NOTE: For encoders implementing the AVCodec.encode2() function, setting this + * flag also means that the encoder must set the pts and duration for + * each output packet. If this flag is not set, the pts and duration will + * be determined by libavcodec from the input frame. + */ +#define AV_CODEC_CAP_DELAY (1 << 5) +/** + * Codec can be fed a final frame with a smaller size. + * This can be used to prevent truncation of the last audio samples. + */ +#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6) + +#if FF_API_CAP_VDPAU +/** + * Codec can export data for HW decoding (VDPAU). + */ +#define AV_CODEC_CAP_HWACCEL_VDPAU (1 << 7) +#endif + +/** + * Codec can output multiple frames per AVPacket + * Normally demuxers return one frame at a time, demuxers which do not do + * are connected to a parser to split what they return into proper frames. + * This flag is reserved to the very rare category of codecs which have a + * bitstream that cannot be split into frames without timeconsuming + * operations like full decoding. Demuxers carrying such bitstreams thus + * may return multiple frames in a packet. This has many disadvantages like + * prohibiting stream copy in many cases thus it should only be considered + * as a last resort. + */ +#define AV_CODEC_CAP_SUBFRAMES (1 << 8) +/** + * Codec is experimental and is thus avoided in favor of non experimental + * encoders + */ +#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9) +/** + * Codec should fill in channel configuration and samplerate instead of container + */ +#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10) +/** + * Codec supports frame-level multithreading. + */ +#define AV_CODEC_CAP_FRAME_THREADS (1 << 12) +/** + * Codec supports slice-based (or partition-based) multithreading. + */ +#define AV_CODEC_CAP_SLICE_THREADS (1 << 13) +/** + * Codec supports changed parameters at any point. + */ +#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14) +/** + * Codec supports avctx->thread_count == 0 (auto). + */ +#define AV_CODEC_CAP_AUTO_THREADS (1 << 15) +/** + * Audio encoder supports receiving a different number of samples in each call. + */ +#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16) +/** + * Decoder is not a preferred choice for probing. + * This indicates that the decoder is not a good choice for probing. + * It could for example be an expensive to spin up hardware decoder, + * or it could simply not provide a lot of useful information about + * the stream. + * A decoder marked with this flag should only be used as last resort + * choice for probing. + */ +#define AV_CODEC_CAP_AVOID_PROBING (1 << 17) +/** + * Codec is intra only. + */ +#define AV_CODEC_CAP_INTRA_ONLY 0x40000000 +/** + * Codec is lossless. + */ +#define AV_CODEC_CAP_LOSSLESS 0x80000000 + + +#if FF_API_WITHOUT_PREFIX +/** + * Allow decoders to produce frames with data planes that are not aligned + * to CPU requirements (e.g. due to cropping). + */ +#define CODEC_FLAG_UNALIGNED AV_CODEC_FLAG_UNALIGNED +#define CODEC_FLAG_QSCALE AV_CODEC_FLAG_QSCALE +#define CODEC_FLAG_4MV AV_CODEC_FLAG_4MV +#define CODEC_FLAG_OUTPUT_CORRUPT AV_CODEC_FLAG_OUTPUT_CORRUPT +#define CODEC_FLAG_QPEL AV_CODEC_FLAG_QPEL +#if FF_API_GMC +/** + * @deprecated use the "gmc" private option of the libxvid encoder + */ +#define CODEC_FLAG_GMC 0x0020 ///< Use GMC. +#endif +#if FF_API_MV0 +/** + * @deprecated use the flag "mv0" in the "mpv_flags" private option of the + * mpegvideo encoders + */ +#define CODEC_FLAG_MV0 0x0040 +#endif +#if FF_API_INPUT_PRESERVED +/** + * @deprecated passing reference-counted frames to the encoders replaces this + * flag + */ +#define CODEC_FLAG_INPUT_PRESERVED 0x0100 +#endif +#define CODEC_FLAG_PASS1 AV_CODEC_FLAG_PASS1 +#define CODEC_FLAG_PASS2 AV_CODEC_FLAG_PASS2 +#define CODEC_FLAG_GRAY AV_CODEC_FLAG_GRAY +#if FF_API_EMU_EDGE +/** + * @deprecated edges are not used/required anymore. I.e. this flag is now always + * set. + */ +#define CODEC_FLAG_EMU_EDGE 0x4000 +#endif +#define CODEC_FLAG_PSNR AV_CODEC_FLAG_PSNR +#define CODEC_FLAG_TRUNCATED AV_CODEC_FLAG_TRUNCATED + +#if FF_API_NORMALIZE_AQP +/** + * @deprecated use the flag "naq" in the "mpv_flags" private option of the + * mpegvideo encoders + */ +#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 +#endif +#define CODEC_FLAG_INTERLACED_DCT AV_CODEC_FLAG_INTERLACED_DCT +#define CODEC_FLAG_LOW_DELAY AV_CODEC_FLAG_LOW_DELAY +#define CODEC_FLAG_GLOBAL_HEADER AV_CODEC_FLAG_GLOBAL_HEADER +#define CODEC_FLAG_BITEXACT AV_CODEC_FLAG_BITEXACT +#define CODEC_FLAG_AC_PRED AV_CODEC_FLAG_AC_PRED +#define CODEC_FLAG_LOOP_FILTER AV_CODEC_FLAG_LOOP_FILTER +#define CODEC_FLAG_INTERLACED_ME AV_CODEC_FLAG_INTERLACED_ME +#define CODEC_FLAG_CLOSED_GOP AV_CODEC_FLAG_CLOSED_GOP +#define CODEC_FLAG2_FAST AV_CODEC_FLAG2_FAST +#define CODEC_FLAG2_NO_OUTPUT AV_CODEC_FLAG2_NO_OUTPUT +#define CODEC_FLAG2_LOCAL_HEADER AV_CODEC_FLAG2_LOCAL_HEADER +#define CODEC_FLAG2_DROP_FRAME_TIMECODE AV_CODEC_FLAG2_DROP_FRAME_TIMECODE +#define CODEC_FLAG2_IGNORE_CROP AV_CODEC_FLAG2_IGNORE_CROP + +#define CODEC_FLAG2_CHUNKS AV_CODEC_FLAG2_CHUNKS +#define CODEC_FLAG2_SHOW_ALL AV_CODEC_FLAG2_SHOW_ALL +#define CODEC_FLAG2_EXPORT_MVS AV_CODEC_FLAG2_EXPORT_MVS +#define CODEC_FLAG2_SKIP_MANUAL AV_CODEC_FLAG2_SKIP_MANUAL + +/* Unsupported options : + * Syntax Arithmetic coding (SAC) + * Reference Picture Selection + * Independent Segment Decoding */ +/* /Fx */ +/* codec capabilities */ + +#define CODEC_CAP_DRAW_HORIZ_BAND AV_CODEC_CAP_DRAW_HORIZ_BAND ///< Decoder can use draw_horiz_band callback. +/** + * Codec uses get_buffer() for allocating buffers and supports custom allocators. + * If not set, it might not use get_buffer() at all or use operations that + * assume the buffer was allocated by avcodec_default_get_buffer. + */ +#define CODEC_CAP_DR1 AV_CODEC_CAP_DR1 +#define CODEC_CAP_TRUNCATED AV_CODEC_CAP_TRUNCATED +#if FF_API_XVMC +/* Codec can export data for HW decoding. This flag indicates that + * the codec would call get_format() with list that might contain HW accelerated + * pixel formats (XvMC, VDPAU, VAAPI, etc). The application can pick any of them + * including raw image format. + * The application can use the passed context to determine bitstream version, + * chroma format, resolution etc. + */ +#define CODEC_CAP_HWACCEL 0x0010 +#endif /* FF_API_XVMC */ +/** + * Encoder or decoder requires flushing with NULL input at the end in order to + * give the complete and correct output. + * + * NOTE: If this flag is not set, the codec is guaranteed to never be fed with + * with NULL data. The user can still send NULL data to the public encode + * or decode function, but libavcodec will not pass it along to the codec + * unless this flag is set. + * + * Decoders: + * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to get the delayed data until the decoder no longer + * returns frames. + * + * Encoders: + * The encoder needs to be fed with NULL data at the end of encoding until the + * encoder no longer returns data. + * + * NOTE: For encoders implementing the AVCodec.encode2() function, setting this + * flag also means that the encoder must set the pts and duration for + * each output packet. If this flag is not set, the pts and duration will + * be determined by libavcodec from the input frame. + */ +#define CODEC_CAP_DELAY AV_CODEC_CAP_DELAY +/** + * Codec can be fed a final frame with a smaller size. + * This can be used to prevent truncation of the last audio samples. + */ +#define CODEC_CAP_SMALL_LAST_FRAME AV_CODEC_CAP_SMALL_LAST_FRAME +#if FF_API_CAP_VDPAU +/** + * Codec can export data for HW decoding (VDPAU). + */ +#define CODEC_CAP_HWACCEL_VDPAU AV_CODEC_CAP_HWACCEL_VDPAU +#endif +/** + * Codec can output multiple frames per AVPacket + * Normally demuxers return one frame at a time, demuxers which do not do + * are connected to a parser to split what they return into proper frames. + * This flag is reserved to the very rare category of codecs which have a + * bitstream that cannot be split into frames without timeconsuming + * operations like full decoding. Demuxers carrying such bitstreams thus + * may return multiple frames in a packet. This has many disadvantages like + * prohibiting stream copy in many cases thus it should only be considered + * as a last resort. + */ +#define CODEC_CAP_SUBFRAMES AV_CODEC_CAP_SUBFRAMES +/** + * Codec is experimental and is thus avoided in favor of non experimental + * encoders + */ +#define CODEC_CAP_EXPERIMENTAL AV_CODEC_CAP_EXPERIMENTAL +/** + * Codec should fill in channel configuration and samplerate instead of container + */ +#define CODEC_CAP_CHANNEL_CONF AV_CODEC_CAP_CHANNEL_CONF +#if FF_API_NEG_LINESIZES +/** + * @deprecated no codecs use this capability + */ +#define CODEC_CAP_NEG_LINESIZES 0x0800 +#endif +/** + * Codec supports frame-level multithreading. + */ +#define CODEC_CAP_FRAME_THREADS AV_CODEC_CAP_FRAME_THREADS +/** + * Codec supports slice-based (or partition-based) multithreading. + */ +#define CODEC_CAP_SLICE_THREADS AV_CODEC_CAP_SLICE_THREADS +/** + * Codec supports changed parameters at any point. + */ +#define CODEC_CAP_PARAM_CHANGE AV_CODEC_CAP_PARAM_CHANGE +/** + * Codec supports avctx->thread_count == 0 (auto). + */ +#define CODEC_CAP_AUTO_THREADS AV_CODEC_CAP_AUTO_THREADS +/** + * Audio encoder supports receiving a different number of samples in each call. + */ +#define CODEC_CAP_VARIABLE_FRAME_SIZE AV_CODEC_CAP_VARIABLE_FRAME_SIZE +/** + * Codec is intra only. + */ +#define CODEC_CAP_INTRA_ONLY AV_CODEC_CAP_INTRA_ONLY +/** + * Codec is lossless. + */ +#define CODEC_CAP_LOSSLESS AV_CODEC_CAP_LOSSLESS + +/** + * HWAccel is experimental and is thus avoided in favor of non experimental + * codecs + */ +#define HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200 +#endif /* FF_API_WITHOUT_PREFIX */ + +#if FF_API_MB_TYPE +//The following defines may change, don't expect compatibility if you use them. +#define MB_TYPE_INTRA4x4 0x0001 +#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific +#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific +#define MB_TYPE_16x16 0x0008 +#define MB_TYPE_16x8 0x0010 +#define MB_TYPE_8x16 0x0020 +#define MB_TYPE_8x8 0x0040 +#define MB_TYPE_INTERLACED 0x0080 +#define MB_TYPE_DIRECT2 0x0100 //FIXME +#define MB_TYPE_ACPRED 0x0200 +#define MB_TYPE_GMC 0x0400 +#define MB_TYPE_SKIP 0x0800 +#define MB_TYPE_P0L0 0x1000 +#define MB_TYPE_P1L0 0x2000 +#define MB_TYPE_P0L1 0x4000 +#define MB_TYPE_P1L1 0x8000 +#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0) +#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1) +#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1) +#define MB_TYPE_QUANT 0x00010000 +#define MB_TYPE_CBP 0x00020000 +// Note bits 24-31 are reserved for codec specific use (H.264 ref0, MPEG-1 0mv, ...) +#endif + +/** + * Pan Scan area. + * This specifies the area which should be displayed. + * Note there may be multiple such areas for one frame. + */ +typedef struct AVPanScan{ + /** + * id + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int id; + + /** + * width and height in 1/16 pel + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int width; + int height; + + /** + * position of the top left corner in 1/16 pel for up to 3 fields/frames + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int16_t position[3][2]; +}AVPanScan; + +/** + * This structure describes the bitrate properties of an encoded bitstream. It + * roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD + * parameters for H.264/HEVC. + */ +typedef struct AVCPBProperties { + /** + * Maximum bitrate of the stream, in bits per second. + * Zero if unknown or unspecified. + */ + int max_bitrate; + /** + * Minimum bitrate of the stream, in bits per second. + * Zero if unknown or unspecified. + */ + int min_bitrate; + /** + * Average bitrate of the stream, in bits per second. + * Zero if unknown or unspecified. + */ + int avg_bitrate; + + /** + * The size of the buffer to which the ratecontrol is applied, in bits. + * Zero if unknown or unspecified. + */ + int buffer_size; + + /** + * The delay between the time the packet this structure is associated with + * is received and the time when it should be decoded, in periods of a 27MHz + * clock. + * + * UINT64_MAX when unknown or unspecified. + */ + uint64_t vbv_delay; +} AVCPBProperties; + +#if FF_API_QSCALE_TYPE +#define FF_QSCALE_TYPE_MPEG1 0 +#define FF_QSCALE_TYPE_MPEG2 1 +#define FF_QSCALE_TYPE_H264 2 +#define FF_QSCALE_TYPE_VP56 3 +#endif + +/** + * The decoder will keep a reference to the frame and may reuse it later. + */ +#define AV_GET_BUFFER_FLAG_REF (1 << 0) + +/** + * @defgroup lavc_packet AVPacket + * + * Types and functions for working with AVPacket. + * @{ + */ +enum AVPacketSideDataType { + /** + * An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE + * bytes worth of palette. This side data signals that a new palette is + * present. + */ + AV_PKT_DATA_PALETTE, + + /** + * The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format + * that the extradata buffer was changed and the receiving side should + * act upon it appropriately. The new extradata is embedded in the side + * data buffer and should be immediately used for processing the current + * frame or packet. + */ + AV_PKT_DATA_NEW_EXTRADATA, + + /** + * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows: + * @code + * u32le param_flags + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) + * s32le channel_count + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) + * u64le channel_layout + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) + * s32le sample_rate + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) + * s32le width + * s32le height + * @endcode + */ + AV_PKT_DATA_PARAM_CHANGE, + + /** + * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of + * structures with info about macroblocks relevant to splitting the + * packet into smaller packets on macroblock edges (e.g. as for RFC 2190). + * That is, it does not necessarily contain info about all macroblocks, + * as long as the distance between macroblocks in the info is smaller + * than the target payload size. + * Each MB info structure is 12 bytes, and is laid out as follows: + * @code + * u32le bit offset from the start of the packet + * u8 current quantizer at the start of the macroblock + * u8 GOB number + * u16le macroblock address within the GOB + * u8 horizontal MV predictor + * u8 vertical MV predictor + * u8 horizontal MV predictor for block number 3 + * u8 vertical MV predictor for block number 3 + * @endcode + */ + AV_PKT_DATA_H263_MB_INFO, + + /** + * This side data should be associated with an audio stream and contains + * ReplayGain information in form of the AVReplayGain struct. + */ + AV_PKT_DATA_REPLAYGAIN, + + /** + * This side data contains a 3x3 transformation matrix describing an affine + * transformation that needs to be applied to the decoded video frames for + * correct presentation. + * + * See libavutil/display.h for a detailed description of the data. + */ + AV_PKT_DATA_DISPLAYMATRIX, + + /** + * This side data should be associated with a video stream and contains + * Stereoscopic 3D information in form of the AVStereo3D struct. + */ + AV_PKT_DATA_STEREO3D, + + /** + * This side data should be associated with an audio stream and corresponds + * to enum AVAudioServiceType. + */ + AV_PKT_DATA_AUDIO_SERVICE_TYPE, + + /** + * This side data contains quality related information from the encoder. + * @code + * u32le quality factor of the compressed frame. Allowed range is between 1 (good) and FF_LAMBDA_MAX (bad). + * u8 picture type + * u8 error count + * u16 reserved + * u64le[error count] sum of squared differences between encoder in and output + * @endcode + */ + AV_PKT_DATA_QUALITY_STATS, + + /** + * This side data contains an integer value representing the stream index + * of a "fallback" track. A fallback track indicates an alternate + * track to use when the current track can not be decoded for some reason. + * e.g. no decoder available for codec. + */ + AV_PKT_DATA_FALLBACK_TRACK, + + /** + * This side data corresponds to the AVCPBProperties struct. + */ + AV_PKT_DATA_CPB_PROPERTIES, + + /** + * Recommmends skipping the specified number of samples + * @code + * u32le number of samples to skip from start of this packet + * u32le number of samples to skip from end of this packet + * u8 reason for start skip + * u8 reason for end skip (0=padding silence, 1=convergence) + * @endcode + */ + AV_PKT_DATA_SKIP_SAMPLES=70, + + /** + * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that + * the packet may contain "dual mono" audio specific to Japanese DTV + * and if it is true, recommends only the selected channel to be used. + * @code + * u8 selected channels (0=mail/left, 1=sub/right, 2=both) + * @endcode + */ + AV_PKT_DATA_JP_DUALMONO, + + /** + * A list of zero terminated key/value strings. There is no end marker for + * the list, so it is required to rely on the side data size to stop. + */ + AV_PKT_DATA_STRINGS_METADATA, + + /** + * Subtitle event position + * @code + * u32le x1 + * u32le y1 + * u32le x2 + * u32le y2 + * @endcode + */ + AV_PKT_DATA_SUBTITLE_POSITION, + + /** + * Data found in BlockAdditional element of matroska container. There is + * no end marker for the data, so it is required to rely on the side data + * size to recognize the end. 8 byte id (as found in BlockAddId) followed + * by data. + */ + AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, + + /** + * The optional first identifier line of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_IDENTIFIER, + + /** + * The optional settings (rendering instructions) that immediately + * follow the timestamp specifier of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_SETTINGS, + + /** + * A list of zero terminated key/value strings. There is no end marker for + * the list, so it is required to rely on the side data size to stop. This + * side data includes updated metadata which appeared in the stream. + */ + AV_PKT_DATA_METADATA_UPDATE, + + /** + * MPEGTS stream ID, this is required to pass the stream ID + * information from the demuxer to the corresponding muxer. + */ + AV_PKT_DATA_MPEGTS_STREAM_ID, + + /** + * Mastering display metadata (based on SMPTE-2086:2014). This metadata + * should be associated with a video stream and containts data in the form + * of the AVMasteringDisplayMetadata struct. + */ + AV_PKT_DATA_MASTERING_DISPLAY_METADATA, + + /** + * This side data should be associated with a video stream and corresponds + * to the AVSphericalMapping structure. + */ + AV_PKT_DATA_SPHERICAL, +}; + +#define AV_PKT_DATA_QUALITY_FACTOR AV_PKT_DATA_QUALITY_STATS //DEPRECATED + +typedef struct AVPacketSideData { + uint8_t *data; + int size; + enum AVPacketSideDataType type; +} AVPacketSideData; + +/** + * This structure stores compressed data. It is typically exported by demuxers + * and then passed as input to decoders, or received as output from encoders and + * then passed to muxers. + * + * For video, it should typically contain one compressed frame. For audio it may + * contain several compressed frames. Encoders are allowed to output empty + * packets, with no compressed data, containing only side data + * (e.g. to update some stream parameters at the end of encoding). + * + * AVPacket is one of the few structs in FFmpeg, whose size is a part of public + * ABI. Thus it may be allocated on stack and no new fields can be added to it + * without libavcodec and libavformat major bump. + * + * The semantics of data ownership depends on the buf field. + * If it is set, the packet data is dynamically allocated and is + * valid indefinitely until a call to av_packet_unref() reduces the + * reference count to 0. + * + * If the buf field is not set av_packet_ref() would make a copy instead + * of increasing the reference count. + * + * The side data is always allocated with av_malloc(), copied by + * av_packet_ref() and freed by av_packet_unref(). + * + * @see av_packet_ref + * @see av_packet_unref + */ +typedef struct AVPacket { + /** + * A reference to the reference-counted buffer where the packet data is + * stored. + * May be NULL, then the packet data is not reference-counted. + */ + AVBufferRef *buf; + /** + * Presentation timestamp in AVStream->time_base units; the time at which + * the decompressed packet will be presented to the user. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + * pts MUST be larger or equal to dts as presentation cannot happen before + * decompression, unless one wants to view hex dumps. Some formats misuse + * the terms dts and pts/cts to mean something different. Such timestamps + * must be converted to true pts/dts before they are stored in AVPacket. + */ + int64_t pts; + /** + * Decompression timestamp in AVStream->time_base units; the time at which + * the packet is decompressed. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + */ + int64_t dts; + uint8_t *data; + int size; + int stream_index; + /** + * A combination of AV_PKT_FLAG values + */ + int flags; + /** + * Additional packet data that can be provided by the container. + * Packet can contain several types of side information. + */ + AVPacketSideData *side_data; + int side_data_elems; + + /** + * Duration of this packet in AVStream->time_base units, 0 if unknown. + * Equals next_pts - this_pts in presentation order. + */ + int64_t duration; + + int64_t pos; ///< byte position in stream, -1 if unknown + +#if FF_API_CONVERGENCE_DURATION + /** + * @deprecated Same as the duration field, but as int64_t. This was required + * for Matroska subtitles, whose duration values could overflow when the + * duration field was still an int. + */ + attribute_deprecated + int64_t convergence_duration; +#endif +} AVPacket; +#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe +#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted +/** + * Flag is used to discard packets which are required to maintain valid + * decoder state but are not required for output and should be dropped + * after decoding. + **/ +#define AV_PKT_FLAG_DISCARD 0x0004 +#define AV_PKT_FLAG_NEW_SEG 0x8000 ///< The packet is the first packet from a source in concat + +enum AVSideDataParamChangeFlags { + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001, + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002, + AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004, + AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008, +}; +/** + * @} + */ + +struct AVCodecInternal; + +enum AVFieldOrder { + AV_FIELD_UNKNOWN, + AV_FIELD_PROGRESSIVE, + AV_FIELD_TT, //< Top coded_first, top displayed first + AV_FIELD_BB, //< Bottom coded first, bottom displayed first + AV_FIELD_TB, //< Top coded first, bottom displayed first + AV_FIELD_BT, //< Bottom coded first, top displayed first +}; + +/** + * main external API structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * You can use AVOptions (av_opt* / av_set/get*()) to access these fields from user + * applications. + * The name string for AVOptions options matches the associated command line + * parameter name and can be found in libavcodec/options_table.h + * The AVOption/command line parameter names differ in some cases from the C + * structure field names for historic reasons or brevity. + * sizeof(AVCodecContext) must not be used outside libav*. + */ +typedef struct AVCodecContext { + /** + * information on struct for av_log + * - set by avcodec_alloc_context3 + */ + const AVClass *av_class; + int log_level_offset; + + enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */ + const struct AVCodec *codec; +#if FF_API_CODEC_NAME + /** + * @deprecated this field is not used for anything in libavcodec + */ + attribute_deprecated + char codec_name[32]; +#endif + enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */ + + /** + * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). + * This is used to work around some encoder bugs. + * A demuxer should set this to what is stored in the field used to identify the codec. + * If there are multiple such fields in a container then the demuxer should choose the one + * which maximizes the information about the used codec. + * If the codec tag field in a container is larger than 32 bits then the demuxer should + * remap the longer ID to 32 bits with a table or other structure. Alternatively a new + * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated + * first. + * - encoding: Set by user, if not then the default based on codec_id will be used. + * - decoding: Set by user, will be converted to uppercase by libavcodec during init. + */ + unsigned int codec_tag; + +#if FF_API_STREAM_CODEC_TAG + /** + * @deprecated this field is unused + */ + attribute_deprecated + unsigned int stream_codec_tag; +#endif + + void *priv_data; + + /** + * Private context used for internal data. + * + * Unlike priv_data, this is not codec-specific. It is used in general + * libavcodec functions. + */ + struct AVCodecInternal *internal; + + /** + * Private data of the user, can be used to carry app specific stuff. + * - encoding: Set by user. + * - decoding: Set by user. + */ + void *opaque; + + /** + * the average bitrate + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: Set by user, may be overwritten by libavcodec + * if this info is available in the stream + */ + int64_t bit_rate; + + /** + * number of bits the bitstream is allowed to diverge from the reference. + * the reference can be CBR (for CBR pass1) or VBR (for pass2) + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: unused + */ + int bit_rate_tolerance; + + /** + * Global quality for codecs which cannot change it per frame. + * This should be proportional to MPEG-1/2/4 qscale. + * - encoding: Set by user. + * - decoding: unused + */ + int global_quality; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int compression_level; +#define FF_COMPRESSION_DEFAULT -1 + + /** + * AV_CODEC_FLAG_*. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags; + + /** + * AV_CODEC_FLAG2_* + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags2; + + /** + * some codecs need / can use extradata like Huffman tables. + * MJPEG: Huffman tables + * rv10: additional flags + * MPEG-4: global headers (they can be in the bitstream or here) + * The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger + * than extradata_size to avoid problems if it is read with the bitstream reader. + * The bytewise contents of extradata must not depend on the architecture or CPU endianness. + * - encoding: Set/allocated/freed by libavcodec. + * - decoding: Set/allocated/freed by user. + */ + uint8_t *extradata; + int extradata_size; + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. For fixed-fps content, + * timebase should be 1/framerate and timestamp increments should be + * identically 1. + * This often, but not always is the inverse of the frame rate or field rate + * for video. 1/time_base is not the average frame rate if the frame rate is not + * constant. + * + * Like containers, elementary streams also can store timestamps, 1/time_base + * is the unit in which these timestamps are specified. + * As example of such codec time base see ISO/IEC 14496-2:2001(E) + * vop_time_increment_resolution and fixed_vop_rate + * (fixed_vop_rate == 0 implies that it is different from the framerate) + * + * - encoding: MUST be set by user. + * - decoding: the use of this field for decoding is deprecated. + * Use framerate instead. + */ + AVRational time_base; + + /** + * For some codecs, the time base is closer to the field rate than the frame rate. + * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration + * if no telecine is used ... + * + * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2. + */ + int ticks_per_frame; + + /** + * Codec delay. + * + * Encoding: Number of frames delay there will be from the encoder input to + * the decoder output. (we assume the decoder matches the spec) + * Decoding: Number of frames delay in addition to what a standard decoder + * as specified in the spec would produce. + * + * Video: + * Number of frames the decoded output will be delayed relative to the + * encoded input. + * + * Audio: + * For encoding, this field is unused (see initial_padding). + * + * For decoding, this is the number of samples the decoder needs to + * output before the decoder's output is valid. When seeking, you should + * start decoding this many samples prior to your desired seek point. + * + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int delay; + + + /* video only */ + /** + * picture width / height. + * + * @note Those fields may not match the values of the last + * AVFrame output by avcodec_decode_video2 due frame + * reordering. + * + * - encoding: MUST be set by user. + * - decoding: May be set by the user before opening the decoder if known e.g. + * from the container. Some decoders will require the dimensions + * to be set by the caller. During decoding, the decoder may + * overwrite those values as required while parsing the data. + */ + int width, height; + + /** + * Bitstream width / height, may be different from width/height e.g. when + * the decoded frame is cropped before being output or lowres is enabled. + * + * @note Those field may not match the value of the last + * AVFrame output by avcodec_receive_frame() due frame + * reordering. + * + * - encoding: unused + * - decoding: May be set by the user before opening the decoder if known + * e.g. from the container. During decoding, the decoder may + * overwrite those values as required while parsing the data. + */ + int coded_width, coded_height; + +#if FF_API_ASPECT_EXTENDED +#define FF_ASPECT_EXTENDED 15 +#endif + + /** + * the number of pictures in a group of pictures, or 0 for intra_only + * - encoding: Set by user. + * - decoding: unused + */ + int gop_size; + + /** + * Pixel format, see AV_PIX_FMT_xxx. + * May be set by the demuxer if known from headers. + * May be overridden by the decoder if it knows better. + * + * @note This field may not match the value of the last + * AVFrame output by avcodec_receive_frame() due frame + * reordering. + * + * - encoding: Set by user. + * - decoding: Set by user if known, overridden by libavcodec while + * parsing the data. + */ + enum AVPixelFormat pix_fmt; + +#if FF_API_MOTION_EST + /** + * This option does nothing + * @deprecated use codec private options instead + */ + attribute_deprecated int me_method; +#endif + + /** + * If non NULL, 'draw_horiz_band' is called by the libavcodec + * decoder to draw a horizontal band. It improves cache usage. Not + * all codecs can do that. You must check the codec capabilities + * beforehand. + * When multithreading is used, it may be called from multiple threads + * at the same time; threads might draw different parts of the same AVFrame, + * or multiple AVFrames, and there is no guarantee that slices will be drawn + * in order. + * The function is also used by hardware acceleration APIs. + * It is called at least once during frame decoding to pass + * the data needed for hardware render. + * In that mode instead of pixel data, AVFrame points to + * a structure specific to the acceleration API. The application + * reads the structure and can change some fields to indicate progress + * or mark state. + * - encoding: unused + * - decoding: Set by user. + * @param height the height of the slice + * @param y the y position of the slice + * @param type 1->top field, 2->bottom field, 3->frame + * @param offset offset into the AVFrame.data from which the slice should be read + */ + void (*draw_horiz_band)(struct AVCodecContext *s, + const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], + int y, int type, int height); + + /** + * callback to negotiate the pixelFormat + * @param fmt is the list of formats which are supported by the codec, + * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality. + * The first is always the native one. + * @note The callback may be called again immediately if initialization for + * the selected (hardware-accelerated) pixel format failed. + * @warning Behavior is undefined if the callback returns a value not + * in the fmt list of formats. + * @return the chosen format + * - encoding: unused + * - decoding: Set by user, if not set the native format will be chosen. + */ + enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + + /** + * maximum number of B-frames between non-B-frames + * Note: The output will be delayed by max_b_frames+1 relative to the input. + * - encoding: Set by user. + * - decoding: unused + */ + int max_b_frames; + + /** + * qscale factor between IP and B-frames + * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_factor; + +#if FF_API_RC_STRATEGY + /** @deprecated use codec private option instead */ + attribute_deprecated int rc_strategy; +#define FF_RC_STRATEGY_XVID 1 +#endif + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int b_frame_strategy; +#endif + + /** + * qscale offset between IP and B-frames + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_offset; + + /** + * Size of the frame reordering buffer in the decoder. + * For MPEG-2 it is 1 IPB or 0 low delay IP. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int has_b_frames; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int mpeg_quant; +#endif + + /** + * qscale factor between P- and I-frames + * If > 0 then the last P-frame quantizer will be used (q = lastp_q * factor + offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_factor; + + /** + * qscale offset between P and I-frames + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_offset; + + /** + * luminance masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float lumi_masking; + + /** + * temporary complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float temporal_cplx_masking; + + /** + * spatial complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float spatial_cplx_masking; + + /** + * p block masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float p_masking; + + /** + * darkness masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float dark_masking; + + /** + * slice count + * - encoding: Set by libavcodec. + * - decoding: Set by user (or 0). + */ + int slice_count; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int prediction_method; +#define FF_PRED_LEFT 0 +#define FF_PRED_PLANE 1 +#define FF_PRED_MEDIAN 2 +#endif + + /** + * slice offsets in the frame in bytes + * - encoding: Set/allocated by libavcodec. + * - decoding: Set/allocated by user (or NULL). + */ + int *slice_offset; + + /** + * sample aspect ratio (0 if unknown) + * That is the width of a pixel divided by the height of the pixel. + * Numerator and denominator must be relatively prime and smaller than 256 for some video standards. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVRational sample_aspect_ratio; + + /** + * motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_cmp; + /** + * subpixel motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_sub_cmp; + /** + * macroblock comparison function (not supported yet) + * - encoding: Set by user. + * - decoding: unused + */ + int mb_cmp; + /** + * interlaced DCT comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int ildct_cmp; +#define FF_CMP_SAD 0 +#define FF_CMP_SSE 1 +#define FF_CMP_SATD 2 +#define FF_CMP_DCT 3 +#define FF_CMP_PSNR 4 +#define FF_CMP_BIT 5 +#define FF_CMP_RD 6 +#define FF_CMP_ZERO 7 +#define FF_CMP_VSAD 8 +#define FF_CMP_VSSE 9 +#define FF_CMP_NSSE 10 +#define FF_CMP_W53 11 +#define FF_CMP_W97 12 +#define FF_CMP_DCTMAX 13 +#define FF_CMP_DCT264 14 +#define FF_CMP_MEDIAN_SAD 15 +#define FF_CMP_CHROMA 256 + + /** + * ME diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int dia_size; + + /** + * amount of previous MV predictors (2a+1 x 2a+1 square) + * - encoding: Set by user. + * - decoding: unused + */ + int last_predictor_count; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int pre_me; +#endif + + /** + * motion estimation prepass comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_pre_cmp; + + /** + * ME prepass diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int pre_dia_size; + + /** + * subpel ME quality + * - encoding: Set by user. + * - decoding: unused + */ + int me_subpel_quality; + +#if FF_API_AFD + /** + * DTG active format information (additional aspect ratio + * information only used in DVB MPEG-2 transport streams) + * 0 if not set. + * + * - encoding: unused + * - decoding: Set by decoder. + * @deprecated Deprecated in favor of AVSideData + */ + attribute_deprecated int dtg_active_format; +#define FF_DTG_AFD_SAME 8 +#define FF_DTG_AFD_4_3 9 +#define FF_DTG_AFD_16_9 10 +#define FF_DTG_AFD_14_9 11 +#define FF_DTG_AFD_4_3_SP_14_9 13 +#define FF_DTG_AFD_16_9_SP_14_9 14 +#define FF_DTG_AFD_SP_4_3 15 +#endif /* FF_API_AFD */ + + /** + * maximum motion estimation search range in subpel units + * If 0 then no limit. + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_range; + +#if FF_API_QUANT_BIAS + /** + * @deprecated use encoder private option instead + */ + attribute_deprecated int intra_quant_bias; +#define FF_DEFAULT_QUANT_BIAS 999999 + + /** + * @deprecated use encoder private option instead + */ + attribute_deprecated int inter_quant_bias; +#endif + + /** + * slice flags + * - encoding: unused + * - decoding: Set by user. + */ + int slice_flags; +#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display +#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG-2 field pics) +#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1) + +#if FF_API_XVMC + /** + * XVideo Motion Acceleration + * - encoding: forbidden + * - decoding: set by decoder + * @deprecated XvMC doesn't need it anymore. + */ + attribute_deprecated int xvmc_acceleration; +#endif /* FF_API_XVMC */ + + /** + * macroblock decision mode + * - encoding: Set by user. + * - decoding: unused + */ + int mb_decision; +#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp +#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits +#define FF_MB_DECISION_RD 2 ///< rate distortion + + /** + * custom intra quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *intra_matrix; + + /** + * custom inter quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *inter_matrix; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int scenechange_threshold; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int noise_reduction; +#endif + +#if FF_API_MPV_OPT + /** + * @deprecated this field is unused + */ + attribute_deprecated + int me_threshold; + + /** + * @deprecated this field is unused + */ + attribute_deprecated + int mb_threshold; +#endif + + /** + * precision of the intra DC coefficient - 8 + * - encoding: Set by user. + * - decoding: Set by libavcodec + */ + int intra_dc_precision; + + /** + * Number of macroblock rows at the top which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_top; + + /** + * Number of macroblock rows at the bottom which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_bottom; + +#if FF_API_MPV_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + float border_masking; +#endif + + /** + * minimum MB Lagrange multiplier + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmin; + + /** + * maximum MB Lagrange multiplier + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmax; + +#if FF_API_PRIVATE_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + int me_penalty_compensation; +#endif + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int bidir_refine; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int brd_scale; +#endif + + /** + * minimum GOP size + * - encoding: Set by user. + * - decoding: unused + */ + int keyint_min; + + /** + * number of reference frames + * - encoding: Set by user. + * - decoding: Set by lavc. + */ + int refs; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int chromaoffset; +#endif + +#if FF_API_UNUSED_MEMBERS + /** + * Multiplied by qscale for each frame and added to scene_change_score. + * - encoding: Set by user. + * - decoding: unused + */ + attribute_deprecated int scenechange_factor; +#endif + + /** + * Note: Value depends upon the compare function used for fullpel ME. + * - encoding: Set by user. + * - decoding: unused + */ + int mv0_threshold; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int b_sensitivity; +#endif + + /** + * Chromaticity coordinates of the source primaries. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorPrimaries color_primaries; + + /** + * Color Transfer Characteristic. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorTransferCharacteristic color_trc; + + /** + * YUV colorspace type. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + /** + * MPEG vs JPEG YUV range. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + /** + * This defines the location of chroma samples. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVChromaLocation chroma_sample_location; + + /** + * Number of slices. + * Indicates number of picture subdivisions. Used for parallelized + * decoding. + * - encoding: Set by user + * - decoding: unused + */ + int slices; + + /** Field order + * - encoding: set by libavcodec + * - decoding: Set by user. + */ + enum AVFieldOrder field_order; + + /* audio only */ + int sample_rate; ///< samples per second + int channels; ///< number of audio channels + + /** + * audio sample format + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVSampleFormat sample_fmt; ///< sample format + + /* The following data should not be initialized. */ + /** + * Number of samples per channel in an audio frame. + * + * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame + * except the last must contain exactly frame_size samples per channel. + * May be 0 when the codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE set, then the + * frame size is not restricted. + * - decoding: may be set by some decoders to indicate constant frame size + */ + int frame_size; + + /** + * Frame counter, set by libavcodec. + * + * - decoding: total number of frames returned from the decoder so far. + * - encoding: total number of frames passed to the encoder so far. + * + * @note the counter is not incremented if encoding/decoding resulted in + * an error. + */ + int frame_number; + + /** + * number of bytes per packet if constant and known or 0 + * Used by some WAV based audio codecs. + */ + int block_align; + + /** + * Audio cutoff bandwidth (0 means "automatic") + * - encoding: Set by user. + * - decoding: unused + */ + int cutoff; + + /** + * Audio channel layout. + * - encoding: set by user. + * - decoding: set by user, may be overwritten by libavcodec. + */ + uint64_t channel_layout; + + /** + * Request decoder to use this channel layout if it can (0 for default) + * - encoding: unused + * - decoding: Set by user. + */ + uint64_t request_channel_layout; + + /** + * Type of service that the audio stream conveys. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVAudioServiceType audio_service_type; + + /** + * desired sample format + * - encoding: Not used. + * - decoding: Set by user. + * Decoder will decode to this format if it can. + */ + enum AVSampleFormat request_sample_fmt; + + /** + * This callback is called at the beginning of each frame to get data + * buffer(s) for it. There may be one contiguous buffer for all the data or + * there may be a buffer per each data plane or anything in between. What + * this means is, you may set however many entries in buf[] you feel necessary. + * Each buffer must be reference-counted using the AVBuffer API (see description + * of buf[] below). + * + * The following fields will be set in the frame before this callback is + * called: + * - format + * - width, height (video only) + * - sample_rate, channel_layout, nb_samples (audio only) + * Their values may differ from the corresponding values in + * AVCodecContext. This callback must use the frame values, not the codec + * context values, to calculate the required buffer size. + * + * This callback must fill the following fields in the frame: + * - data[] + * - linesize[] + * - extended_data: + * * if the data is planar audio with more than 8 channels, then this + * callback must allocate and fill extended_data to contain all pointers + * to all data planes. data[] must hold as many pointers as it can. + * extended_data must be allocated with av_malloc() and will be freed in + * av_frame_unref(). + * * otherwise extended_data must point to data + * - buf[] must contain one or more pointers to AVBufferRef structures. Each of + * the frame's data and extended_data pointers must be contained in these. That + * is, one AVBufferRef for each allocated chunk of memory, not necessarily one + * AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(), + * and av_buffer_ref(). + * - extended_buf and nb_extended_buf must be allocated with av_malloc() by + * this callback and filled with the extra buffers if there are more + * buffers than buf[] can hold. extended_buf will be freed in + * av_frame_unref(). + * + * If AV_CODEC_CAP_DR1 is not set then get_buffer2() must call + * avcodec_default_get_buffer2() instead of providing buffers allocated by + * some other means. + * + * Each data plane must be aligned to the maximum required by the target + * CPU. + * + * @see avcodec_default_get_buffer2() + * + * Video: + * + * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused + * (read and/or written to if it is writable) later by libavcodec. + * + * avcodec_align_dimensions2() should be used to find the required width and + * height, as they normally need to be rounded up to the next multiple of 16. + * + * Some decoders do not support linesizes changing between frames. + * + * If frame multithreading is used and thread_safe_callbacks is set, + * this callback may be called from a different thread, but not from more + * than one at once. Does not need to be reentrant. + * + * @see avcodec_align_dimensions2() + * + * Audio: + * + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may, + * however, utilize only part of the buffer by setting AVFrame.nb_samples + * to a smaller value in the output frame. + * + * As a convenience, av_samples_get_buffer_size() and + * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2() + * functions to find the required data size and to fill data pointers and + * linesize. In AVFrame.linesize, only linesize[0] may be set for audio + * since all planes must be the same size. + * + * @see av_samples_get_buffer_size(), av_samples_fill_arrays() + * + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + */ + int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags); + + /** + * If non-zero, the decoded audio and video frames returned from + * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted + * and are valid indefinitely. The caller must free them with + * av_frame_unref() when they are not needed anymore. + * Otherwise, the decoded frames must not be freed by the caller and are + * only valid until the next decode call. + * + * This is always automatically enabled if avcodec_receive_frame() is used. + * + * - encoding: unused + * - decoding: set by the caller before avcodec_open2(). + */ + int refcounted_frames; + + /* - encoding parameters */ + float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0) + float qblur; ///< amount of qscale smoothing over time (0.0-1.0) + + /** + * minimum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmin; + + /** + * maximum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmax; + + /** + * maximum quantizer difference between frames + * - encoding: Set by user. + * - decoding: unused + */ + int max_qdiff; + +#if FF_API_MPV_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + float rc_qsquish; + + attribute_deprecated + float rc_qmod_amp; + attribute_deprecated + int rc_qmod_freq; +#endif + + /** + * decoder bitstream buffer size + * - encoding: Set by user. + * - decoding: unused + */ + int rc_buffer_size; + + /** + * ratecontrol override, see RcOverride + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + int rc_override_count; + RcOverride *rc_override; + +#if FF_API_MPV_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + const char *rc_eq; +#endif + + /** + * maximum bitrate + * - encoding: Set by user. + * - decoding: Set by user, may be overwritten by libavcodec. + */ + int64_t rc_max_rate; + + /** + * minimum bitrate + * - encoding: Set by user. + * - decoding: unused + */ + int64_t rc_min_rate; + +#if FF_API_MPV_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + float rc_buffer_aggressivity; + + attribute_deprecated + float rc_initial_cplx; +#endif + + /** + * Ratecontrol attempt to use, at maximum, of what can be used without an underflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_max_available_vbv_use; + + /** + * Ratecontrol attempt to use, at least, times the amount needed to prevent a vbv overflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_min_vbv_overflow_use; + + /** + * Number of bits which should be loaded into the rc buffer before decoding starts. + * - encoding: Set by user. + * - decoding: unused + */ + int rc_initial_buffer_occupancy; + +#if FF_API_CODER_TYPE +#define FF_CODER_TYPE_VLC 0 +#define FF_CODER_TYPE_AC 1 +#define FF_CODER_TYPE_RAW 2 +#define FF_CODER_TYPE_RLE 3 +#if FF_API_UNUSED_MEMBERS +#define FF_CODER_TYPE_DEFLATE 4 +#endif /* FF_API_UNUSED_MEMBERS */ + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + int coder_type; +#endif /* FF_API_CODER_TYPE */ + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int context_model; +#endif + +#if FF_API_MPV_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + int lmin; + + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + int lmax; +#endif + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_threshold; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_factor; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_exp; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_cmp; +#endif /* FF_API_PRIVATE_OPT */ + + /** + * trellis RD quantization + * - encoding: Set by user. + * - decoding: unused + */ + int trellis; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int min_prediction_order; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int max_prediction_order; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int64_t timecode_frame_start; +#endif + +#if FF_API_RTP_CALLBACK + /** + * @deprecated unused + */ + /* The RTP callback: This function is called */ + /* every time the encoder has a packet to send. */ + /* It depends on the encoder if the data starts */ + /* with a Start Code (it should). H.263 does. */ + /* mb_nb contains the number of macroblocks */ + /* encoded in the RTP payload. */ + attribute_deprecated + void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb); +#endif + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int rtp_payload_size; /* The size of the RTP payload: the coder will */ + /* do its best to deliver a chunk with size */ + /* below rtp_payload_size, the chunk will start */ + /* with a start code on some codecs like H.263. */ + /* This doesn't take account of any particular */ + /* headers inside the transmitted RTP payload. */ +#endif + +#if FF_API_STAT_BITS + /* statistics, used for 2-pass encoding */ + attribute_deprecated + int mv_bits; + attribute_deprecated + int header_bits; + attribute_deprecated + int i_tex_bits; + attribute_deprecated + int p_tex_bits; + attribute_deprecated + int i_count; + attribute_deprecated + int p_count; + attribute_deprecated + int skip_count; + attribute_deprecated + int misc_bits; + + /** @deprecated this field is unused */ + attribute_deprecated + int frame_bits; +#endif + + /** + * pass1 encoding statistics output buffer + * - encoding: Set by libavcodec. + * - decoding: unused + */ + char *stats_out; + + /** + * pass2 encoding statistics input buffer + * Concatenated stuff from stats_out of pass1 should be placed here. + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + char *stats_in; + + /** + * Work around bugs in encoders which sometimes cannot be detected automatically. + * - encoding: Set by user + * - decoding: Set by user + */ + int workaround_bugs; +#define FF_BUG_AUTODETECT 1 ///< autodetection +#if FF_API_OLD_MSMPEG4 +#define FF_BUG_OLD_MSMPEG4 2 +#endif +#define FF_BUG_XVID_ILACE 4 +#define FF_BUG_UMP4 8 +#define FF_BUG_NO_PADDING 16 +#define FF_BUG_AMV 32 +#if FF_API_AC_VLC +#define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default. +#endif +#define FF_BUG_QPEL_CHROMA 64 +#define FF_BUG_STD_QPEL 128 +#define FF_BUG_QPEL_CHROMA2 256 +#define FF_BUG_DIRECT_BLOCKSIZE 512 +#define FF_BUG_EDGE 1024 +#define FF_BUG_HPEL_CHROMA 2048 +#define FF_BUG_DC_CLIP 4096 +#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders. +#define FF_BUG_TRUNCATED 16384 +#define FF_BUG_IEDGE 32768 + + /** + * strictly follow the standard (MPEG-4, ...). + * - encoding: Set by user. + * - decoding: Set by user. + * Setting this to STRICT or higher means the encoder and decoder will + * generally do stupid things, whereas setting it to unofficial or lower + * will mean the encoder might produce output that is not supported by all + * spec-compliant decoders. Decoders don't differentiate between normal, + * unofficial and experimental (that is, they always try to decode things + * when they can) unless they are explicitly asked to behave stupidly + * (=strictly conform to the specs) + */ + int strict_std_compliance; +#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software. +#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences. +#define FF_COMPLIANCE_NORMAL 0 +#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions +#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things. + + /** + * error concealment flags + * - encoding: unused + * - decoding: Set by user. + */ + int error_concealment; +#define FF_EC_GUESS_MVS 1 +#define FF_EC_DEBLOCK 2 +#define FF_EC_FAVOR_INTER 256 + + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug; +#define FF_DEBUG_PICT_INFO 1 +#define FF_DEBUG_RC 2 +#define FF_DEBUG_BITSTREAM 4 +#define FF_DEBUG_MB_TYPE 8 +#define FF_DEBUG_QP 16 +#if FF_API_DEBUG_MV +/** + * @deprecated this option does nothing + */ +#define FF_DEBUG_MV 32 +#endif +#define FF_DEBUG_DCT_COEFF 0x00000040 +#define FF_DEBUG_SKIP 0x00000080 +#define FF_DEBUG_STARTCODE 0x00000100 +#if FF_API_UNUSED_MEMBERS +#define FF_DEBUG_PTS 0x00000200 +#endif /* FF_API_UNUSED_MEMBERS */ +#define FF_DEBUG_ER 0x00000400 +#define FF_DEBUG_MMCO 0x00000800 +#define FF_DEBUG_BUGS 0x00001000 +#if FF_API_DEBUG_MV +#define FF_DEBUG_VIS_QP 0x00002000 +#define FF_DEBUG_VIS_MB_TYPE 0x00004000 +#endif +#define FF_DEBUG_BUFFERS 0x00008000 +#define FF_DEBUG_THREADS 0x00010000 +#define FF_DEBUG_GREEN_MD 0x00800000 +#define FF_DEBUG_NOMC 0x01000000 + +#if FF_API_DEBUG_MV + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 // visualize forward predicted MVs of P-frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 // visualize forward predicted MVs of B-frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 // visualize backward predicted MVs of B-frames +#endif + + /** + * Error recognition; may misdetect some more or less valid parts as errors. + * - encoding: unused + * - decoding: Set by user. + */ + int err_recognition; + +/** + * Verify checksums embedded in the bitstream (could be of either encoded or + * decoded data, depending on the codec) and print an error message on mismatch. + * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the + * decoder returning an error. + */ +#define AV_EF_CRCCHECK (1<<0) +#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations +#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length +#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection + +#define AV_EF_IGNORE_ERR (1<<15) ///< ignore errors and continue +#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors +#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliances as errors +#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder should not do as an error + + + /** + * opaque 64-bit number (generally a PTS) that will be reordered and + * output in AVFrame.reordered_opaque + * - encoding: unused + * - decoding: Set by user. + */ + int64_t reordered_opaque; + + /** + * Hardware accelerator in use + * - encoding: unused. + * - decoding: Set by libavcodec + */ + struct AVHWAccel *hwaccel; + + /** + * Hardware accelerator context. + * For some hardware accelerators, a global context needs to be + * provided by the user. In that case, this holds display-dependent + * data FFmpeg cannot instantiate itself. Please refer to the + * FFmpeg HW accelerator documentation to know how to fill this + * is. e.g. for VA API, this is a struct vaapi_context. + * - encoding: unused + * - decoding: Set by user + */ + void *hwaccel_context; + + /** + * error + * - encoding: Set by libavcodec if flags & AV_CODEC_FLAG_PSNR. + * - decoding: unused + */ + uint64_t error[AV_NUM_DATA_POINTERS]; + + /** + * DCT algorithm, see FF_DCT_* below + * - encoding: Set by user. + * - decoding: unused + */ + int dct_algo; +#define FF_DCT_AUTO 0 +#define FF_DCT_FASTINT 1 +#define FF_DCT_INT 2 +#define FF_DCT_MMX 3 +#define FF_DCT_ALTIVEC 5 +#define FF_DCT_FAAN 6 + + /** + * IDCT algorithm, see FF_IDCT_* below. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int idct_algo; +#define FF_IDCT_AUTO 0 +#define FF_IDCT_INT 1 +#define FF_IDCT_SIMPLE 2 +#define FF_IDCT_SIMPLEMMX 3 +#define FF_IDCT_ARM 7 +#define FF_IDCT_ALTIVEC 8 +#if FF_API_ARCH_SH4 +#define FF_IDCT_SH4 9 +#endif +#define FF_IDCT_SIMPLEARM 10 +#if FF_API_UNUSED_MEMBERS +#define FF_IDCT_IPP 13 +#endif /* FF_API_UNUSED_MEMBERS */ +#define FF_IDCT_XVID 14 +#if FF_API_IDCT_XVIDMMX +#define FF_IDCT_XVIDMMX 14 +#endif /* FF_API_IDCT_XVIDMMX */ +#define FF_IDCT_SIMPLEARMV5TE 16 +#define FF_IDCT_SIMPLEARMV6 17 +#if FF_API_ARCH_SPARC +#define FF_IDCT_SIMPLEVIS 18 +#endif +#define FF_IDCT_FAAN 20 +#define FF_IDCT_SIMPLENEON 22 +#if FF_API_ARCH_ALPHA +#define FF_IDCT_SIMPLEALPHA 23 +#endif +#define FF_IDCT_SIMPLEAUTO 128 + + /** + * bits per sample/pixel from the demuxer (needed for huffyuv). + * - encoding: Set by libavcodec. + * - decoding: Set by user. + */ + int bits_per_coded_sample; + + /** + * Bits per sample/pixel of internal libavcodec pixel/sample format. + * - encoding: set by user. + * - decoding: set by libavcodec. + */ + int bits_per_raw_sample; + +#if FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + */ + int lowres; +#endif + +#if FF_API_CODED_FRAME + /** + * the picture in the bitstream + * - encoding: Set by libavcodec. + * - decoding: unused + * + * @deprecated use the quality factor packet side data instead + */ + attribute_deprecated AVFrame *coded_frame; +#endif + + /** + * thread count + * is used to decide how many independent tasks should be passed to execute() + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_count; + + /** + * Which multithreading methods to use. + * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread, + * so clients which cannot provide future frames should not use it. + * + * - encoding: Set by user, otherwise the default is used. + * - decoding: Set by user, otherwise the default is used. + */ + int thread_type; +#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once +#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once + + /** + * Which multithreading methods are in use by the codec. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int active_thread_type; + + /** + * Set by the client if its custom get_buffer() callback can be called + * synchronously from another thread, which allows faster multithreaded decoding. + * draw_horiz_band() will be called from other threads regardless of this setting. + * Ignored if the default get_buffer() is used. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_safe_callbacks; + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * @param count the number of things to execute + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size); + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * Also see avcodec_thread_init and e.g. the --enable-pthread configure option. + * @param c context passed also to func + * @param count the number of things to execute + * @param arg2 argument passed unchanged to func + * @param ret return values of executed functions, must have space for "count" values. May be NULL. + * @param func function that will be called count times, with jobnr from 0 to count-1. + * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no + * two instances of func executing at the same time will have the same threadnr. + * @return always 0 currently, but code should handle a future improvement where when any call to func + * returns < 0 no further calls to func may be done and < 0 is returned. + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count); + + /** + * noise vs. sse weight for the nsse comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int nsse_weight; + + /** + * profile + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int profile; +#define FF_PROFILE_UNKNOWN -99 +#define FF_PROFILE_RESERVED -100 + +#define FF_PROFILE_AAC_MAIN 0 +#define FF_PROFILE_AAC_LOW 1 +#define FF_PROFILE_AAC_SSR 2 +#define FF_PROFILE_AAC_LTP 3 +#define FF_PROFILE_AAC_HE 4 +#define FF_PROFILE_AAC_HE_V2 28 +#define FF_PROFILE_AAC_LD 22 +#define FF_PROFILE_AAC_ELD 38 +#define FF_PROFILE_MPEG2_AAC_LOW 128 +#define FF_PROFILE_MPEG2_AAC_HE 131 + +#define FF_PROFILE_DNXHD 0 +#define FF_PROFILE_DNXHR_LB 1 +#define FF_PROFILE_DNXHR_SQ 2 +#define FF_PROFILE_DNXHR_HQ 3 +#define FF_PROFILE_DNXHR_HQX 4 +#define FF_PROFILE_DNXHR_444 5 + +#define FF_PROFILE_DTS 20 +#define FF_PROFILE_DTS_ES 30 +#define FF_PROFILE_DTS_96_24 40 +#define FF_PROFILE_DTS_HD_HRA 50 +#define FF_PROFILE_DTS_HD_MA 60 +#define FF_PROFILE_DTS_EXPRESS 70 + +#define FF_PROFILE_MPEG2_422 0 +#define FF_PROFILE_MPEG2_HIGH 1 +#define FF_PROFILE_MPEG2_SS 2 +#define FF_PROFILE_MPEG2_SNR_SCALABLE 3 +#define FF_PROFILE_MPEG2_MAIN 4 +#define FF_PROFILE_MPEG2_SIMPLE 5 + +#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag +#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag + +#define FF_PROFILE_H264_BASELINE 66 +#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED) +#define FF_PROFILE_H264_MAIN 77 +#define FF_PROFILE_H264_EXTENDED 88 +#define FF_PROFILE_H264_HIGH 100 +#define FF_PROFILE_H264_HIGH_10 110 +#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_MULTIVIEW_HIGH 118 +#define FF_PROFILE_H264_HIGH_422 122 +#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_STEREO_HIGH 128 +#define FF_PROFILE_H264_HIGH_444 144 +#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244 +#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_CAVLC_444 44 + +#define FF_PROFILE_VC1_SIMPLE 0 +#define FF_PROFILE_VC1_MAIN 1 +#define FF_PROFILE_VC1_COMPLEX 2 +#define FF_PROFILE_VC1_ADVANCED 3 + +#define FF_PROFILE_MPEG4_SIMPLE 0 +#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1 +#define FF_PROFILE_MPEG4_CORE 2 +#define FF_PROFILE_MPEG4_MAIN 3 +#define FF_PROFILE_MPEG4_N_BIT 4 +#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5 +#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6 +#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7 +#define FF_PROFILE_MPEG4_HYBRID 8 +#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9 +#define FF_PROFILE_MPEG4_CORE_SCALABLE 10 +#define FF_PROFILE_MPEG4_ADVANCED_CODING 11 +#define FF_PROFILE_MPEG4_ADVANCED_CORE 12 +#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13 +#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14 +#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15 + +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 1 +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 2 +#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 32768 +#define FF_PROFILE_JPEG2000_DCINEMA_2K 3 +#define FF_PROFILE_JPEG2000_DCINEMA_4K 4 + +#define FF_PROFILE_VP9_0 0 +#define FF_PROFILE_VP9_1 1 +#define FF_PROFILE_VP9_2 2 +#define FF_PROFILE_VP9_3 3 + +#define FF_PROFILE_HEVC_MAIN 1 +#define FF_PROFILE_HEVC_MAIN_10 2 +#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3 +#define FF_PROFILE_HEVC_REXT 4 + + /** + * level + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int level; +#define FF_LEVEL_UNKNOWN -99 + + /** + * Skip loop filtering for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_loop_filter; + + /** + * Skip IDCT/dequantization for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_idct; + + /** + * Skip decoding for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_frame; + + /** + * Header containing style information for text subtitles. + * For SUBTITLE_ASS subtitle type, it should contain the whole ASS + * [Script Info] and [V4+ Styles] section, plus the [Events] line and + * the Format line following. It shouldn't include any Dialogue line. + * - encoding: Set/allocated/freed by user (before avcodec_open2()) + * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2()) + */ + uint8_t *subtitle_header; + int subtitle_header_size; + +#if FF_API_ERROR_RATE + /** + * @deprecated use the 'error_rate' private AVOption of the mpegvideo + * encoders + */ + attribute_deprecated + int error_rate; +#endif + +#if FF_API_VBV_DELAY + /** + * VBV delay coded in the last frame (in periods of a 27 MHz clock). + * Used for compliant TS muxing. + * - encoding: Set by libavcodec. + * - decoding: unused. + * @deprecated this value is now exported as a part of + * AV_PKT_DATA_CPB_PROPERTIES packet side data + */ + attribute_deprecated + uint64_t vbv_delay; +#endif + +#if FF_API_SIDEDATA_ONLY_PKT + /** + * Encoding only and set by default. Allow encoders to output packets + * that do not contain any encoded data, only side data. + * + * Some encoders need to output such packets, e.g. to update some stream + * parameters at the end of encoding. + * + * @deprecated this field disables the default behaviour and + * it is kept only for compatibility. + */ + attribute_deprecated + int side_data_only_packets; +#endif + + /** + * Audio only. The number of "priming" samples (padding) inserted by the + * encoder at the beginning of the audio. I.e. this number of leading + * decoded samples must be discarded by the caller to get the original audio + * without leading padding. + * + * - decoding: unused + * - encoding: Set by libavcodec. The timestamps on the output packets are + * adjusted by the encoder so that they always refer to the + * first sample of the data actually contained in the packet, + * including any added padding. E.g. if the timebase is + * 1/samplerate and the timestamp of the first input sample is + * 0, the timestamp of the first output packet will be + * -initial_padding. + */ + int initial_padding; + + /** + * - decoding: For codecs that store a framerate value in the compressed + * bitstream, the decoder may export it here. { 0, 1} when + * unknown. + * - encoding: May be used to signal the framerate of CFR content to an + * encoder. + */ + AVRational framerate; + + /** + * Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx. + * - encoding: unused. + * - decoding: Set by libavcodec before calling get_format() + */ + enum AVPixelFormat sw_pix_fmt; + + /** + * Timebase in which pkt_dts/pts and AVPacket.dts/pts are. + * - encoding unused. + * - decoding set by user. + */ + AVRational pkt_timebase; + + /** + * AVCodecDescriptor + * - encoding: unused. + * - decoding: set by libavcodec. + */ + const AVCodecDescriptor *codec_descriptor; + +#if !FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + */ + int lowres; +#endif + + /** + * Current statistics for PTS correction. + * - decoding: maintained and used by libavcodec, not intended to be used by user apps + * - encoding: unused + */ + int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far + int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far + int64_t pts_correction_last_pts; /// PTS of the last frame + int64_t pts_correction_last_dts; /// DTS of the last frame + + /** + * Character encoding of the input subtitles file. + * - decoding: set by user + * - encoding: unused + */ + char *sub_charenc; + + /** + * Subtitles character encoding mode. Formats or codecs might be adjusting + * this setting (if they are doing the conversion themselves for instance). + * - decoding: set by libavcodec + * - encoding: unused + */ + int sub_charenc_mode; +#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance) +#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself +#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv + + /** + * Skip processing alpha if supported by codec. + * Note that if the format uses pre-multiplied alpha (common with VP6, + * and recommended due to better video quality/compression) + * the image will look as if alpha-blended onto a black background. + * However for formats that do not use pre-multiplied alpha + * there might be serious artefacts (though e.g. libswscale currently + * assumes pre-multiplied alpha anyway). + * + * - decoding: set by user + * - encoding: unused + */ + int skip_alpha; + + /** + * Number of samples to skip after a discontinuity + * - decoding: unused + * - encoding: set by libavcodec + */ + int seek_preroll; + +#if !FF_API_DEBUG_MV + /** + * debug motion vectors + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames +#endif + + /** + * custom intra quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: unused. + */ + uint16_t *chroma_intra_matrix; + + /** + * dump format separator. + * can be ", " or "\n " or anything else + * - encoding: Set by user. + * - decoding: Set by user. + */ + uint8_t *dump_separator; + + /** + * ',' separated list of allowed decoders. + * If NULL then all are allowed + * - encoding: unused + * - decoding: set by user + */ + char *codec_whitelist; + + /* + * Properties of the stream that gets decoded + * - encoding: unused + * - decoding: set by libavcodec + */ + unsigned properties; +#define FF_CODEC_PROPERTY_LOSSLESS 0x00000001 +#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002 + + /** + * Additional data associated with the entire coded stream. + * + * - decoding: unused + * - encoding: may be set by libavcodec after avcodec_open2(). + */ + AVPacketSideData *coded_side_data; + int nb_coded_side_data; + + /** + * A reference to the AVHWFramesContext describing the input (for encoding) + * or output (decoding) frames. The reference is set by the caller and + * afterwards owned (and freed) by libavcodec - it should never be read by + * the caller after being set. + * + * - decoding: This field should be set by the caller from the get_format() + * callback. The previous reference (if any) will always be + * unreffed by libavcodec before the get_format() call. + * + * If the default get_buffer2() is used with a hwaccel pixel + * format, then this AVHWFramesContext will be used for + * allocating the frame buffers. + * + * - encoding: For hardware encoders configured to use a hwaccel pixel + * format, this field should be set by the caller to a reference + * to the AVHWFramesContext describing input frames. + * AVHWFramesContext.format must be equal to + * AVCodecContext.pix_fmt. + * + * This field should be set before avcodec_open2() is called. + */ + AVBufferRef *hw_frames_ctx; + + /** + * Control the form of AVSubtitle.rects[N]->ass + * - decoding: set by user + * - encoding: unused + */ + int sub_text_format; +#define FF_SUB_TEXT_FMT_ASS 0 +#if FF_API_ASS_TIMING +#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS 1 +#endif + + /** + * Audio only. The amount of padding (in samples) appended by the encoder to + * the end of the audio. I.e. this number of decoded samples must be + * discarded by the caller from the end of the stream to get the original + * audio without any trailing padding. + * + * - decoding: unused + * - encoding: unused + */ + int trailing_padding; + + /** + * The number of pixels per image to maximally accept. + * + * - decoding: set by user + * - encoding: set by user + */ + int64_t max_pixels; + + /** + * A reference to the AVHWDeviceContext describing the device which will + * be used by a hardware encoder/decoder. The reference is set by the + * caller and afterwards owned (and freed) by libavcodec. + * + * This should be used if either the codec device does not require + * hardware frames or any that are used are to be allocated internally by + * libavcodec. If the user wishes to supply any of the frames used as + * encoder input or decoder output then hw_frames_ctx should be used + * instead. When hw_frames_ctx is set in get_format() for a decoder, this + * field will be ignored while decoding the associated stream segment, but + * may again be used on a following one after another get_format() call. + * + * For both encoders and decoders this field should be set before + * avcodec_open2() is called and must not be written to thereafter. + * + * Note that some decoders may require this field to be set initially in + * order to support hw_frames_ctx at all - in that case, all frames + * contexts used must be created on the same device. + */ + AVBufferRef *hw_device_ctx; + + /** + * Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated + * decoding (if active). + * - encoding: unused + * - decoding: Set by user (either before avcodec_open2(), or in the + * AVCodecContext.get_format callback) + */ + int hwaccel_flags; +} AVCodecContext; + +AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx); +void av_codec_set_pkt_timebase (AVCodecContext *avctx, AVRational val); + +const AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx); +void av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc); + +unsigned av_codec_get_codec_properties(const AVCodecContext *avctx); + +int av_codec_get_lowres(const AVCodecContext *avctx); +void av_codec_set_lowres(AVCodecContext *avctx, int val); + +int av_codec_get_seek_preroll(const AVCodecContext *avctx); +void av_codec_set_seek_preroll(AVCodecContext *avctx, int val); + +uint16_t *av_codec_get_chroma_intra_matrix(const AVCodecContext *avctx); +void av_codec_set_chroma_intra_matrix(AVCodecContext *avctx, uint16_t *val); + +/** + * AVProfile. + */ +typedef struct AVProfile { + int profile; + const char *name; ///< short name for the profile +} AVProfile; + +typedef struct AVCodecDefault AVCodecDefault; + +struct AVSubtitle; + +/** + * AVCodec. + */ +typedef struct AVCodec { + /** + * Name of the codec implementation. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + * This is the primary way to find a codec from the user perspective. + */ + const char *name; + /** + * Descriptive name for the codec, meant to be more human readable than name. + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *long_name; + enum AVMediaType type; + enum AVCodecID id; + /** + * Codec capabilities. + * see AV_CODEC_CAP_* + */ + int capabilities; + const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0} + const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1 + const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 + const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 + const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 + uint8_t max_lowres; ///< maximum value for lowres supported by the decoder + const AVClass *priv_class; ///< AVClass for the private context + const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int priv_data_size; + struct AVCodec *next; + /** + * @name Frame-level threading support functions + * @{ + */ + /** + * If defined, called on thread contexts when they are created. + * If the codec allocates writable tables in init(), re-allocate them here. + * priv_data will be set to a copy of the original. + */ + int (*init_thread_copy)(AVCodecContext *); + /** + * Copy necessary context variables from a previous thread context to the current one. + * If not defined, the next thread will start automatically; otherwise, the codec + * must call ff_thread_finish_setup(). + * + * dst and src will (rarely) point to the same context, in which case memcpy should be skipped. + */ + int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src); + /** @} */ + + /** + * Private codec-specific defaults. + */ + const AVCodecDefault *defaults; + + /** + * Initialize codec static data, called from avcodec_register(). + */ + void (*init_static_data)(struct AVCodec *codec); + + int (*init)(AVCodecContext *); + int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size, + const struct AVSubtitle *sub); + /** + * Encode data to an AVPacket. + * + * @param avctx codec context + * @param avpkt output AVPacket (may contain a user-provided buffer) + * @param[in] frame AVFrame containing the raw data to be encoded + * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a + * non-empty packet was returned in avpkt. + * @return 0 on success, negative error code on failure + */ + int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, + int *got_packet_ptr); + int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt); + int (*close)(AVCodecContext *); + /** + * Decode/encode API with decoupled packet/frame dataflow. The API is the + * same as the avcodec_ prefixed APIs (avcodec_send_frame() etc.), except + * that: + * - never called if the codec is closed or the wrong type, + * - AVPacket parameter change side data is applied right before calling + * AVCodec->send_packet, + * - if AV_CODEC_CAP_DELAY is not set, drain packets or frames are never sent, + * - only one drain packet is ever passed down (until the next flush()), + * - a drain AVPacket is always NULL (no need to check for avpkt->size). + */ + int (*send_frame)(AVCodecContext *avctx, const AVFrame *frame); + int (*send_packet)(AVCodecContext *avctx, const AVPacket *avpkt); + int (*receive_frame)(AVCodecContext *avctx, AVFrame *frame); + int (*receive_packet)(AVCodecContext *avctx, AVPacket *avpkt); + /** + * Flush buffers. + * Will be called when seeking + */ + void (*flush)(AVCodecContext *); + /** + * Internal codec capabilities. + * See FF_CODEC_CAP_* in internal.h + */ + int caps_internal; +} AVCodec; + +int av_codec_get_max_lowres(const AVCodec *codec); + +struct MpegEncContext; + +/** + * @defgroup lavc_hwaccel AVHWAccel + * @{ + */ +typedef struct AVHWAccel { + /** + * Name of the hardware accelerated codec. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + */ + const char *name; + + /** + * Type of codec implemented by the hardware accelerator. + * + * See AVMEDIA_TYPE_xxx + */ + enum AVMediaType type; + + /** + * Codec implemented by the hardware accelerator. + * + * See AV_CODEC_ID_xxx + */ + enum AVCodecID id; + + /** + * Supported pixel format. + * + * Only hardware accelerated formats are supported here. + */ + enum AVPixelFormat pix_fmt; + + /** + * Hardware accelerated codec capabilities. + * see HWACCEL_CODEC_CAP_* + */ + int capabilities; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVHWAccel *next; + + /** + * Allocate a custom buffer + */ + int (*alloc_frame)(AVCodecContext *avctx, AVFrame *frame); + + /** + * Called at the beginning of each frame or field picture. + * + * Meaningful frame information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * + * Note that buf can be NULL along with buf_size set to 0. + * Otherwise, this means the whole frame is available at this point. + * + * @param avctx the codec context + * @param buf the frame data buffer base + * @param buf_size the size of the frame in bytes + * @return zero if successful, a negative value otherwise + */ + int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Callback for each slice. + * + * Meaningful slice information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * The only exception is XvMC, that works on MB level. + * + * @param avctx the codec context + * @param buf the slice data buffer base + * @param buf_size the size of the slice in bytes + * @return zero if successful, a negative value otherwise + */ + int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Called at the end of each frame or field picture. + * + * The whole picture is parsed at this point and can now be sent + * to the hardware accelerator. This function is mandatory. + * + * @param avctx the codec context + * @return zero if successful, a negative value otherwise + */ + int (*end_frame)(AVCodecContext *avctx); + + /** + * Size of per-frame hardware accelerator private data. + * + * Private data is allocated with av_mallocz() before + * AVCodecContext.get_buffer() and deallocated after + * AVCodecContext.release_buffer(). + */ + int frame_priv_data_size; + + /** + * Called for every Macroblock in a slice. + * + * XvMC uses it to replace the ff_mpv_decode_mb(). + * Instead of decoding to raw picture, MB parameters are + * stored in an array provided by the video driver. + * + * @param s the mpeg context + */ + void (*decode_mb)(struct MpegEncContext *s); + + /** + * Initialize the hwaccel private data. + * + * This will be called from ff_get_format(), after hwaccel and + * hwaccel_context are set and the hwaccel private data in AVCodecInternal + * is allocated. + */ + int (*init)(AVCodecContext *avctx); + + /** + * Uninitialize the hwaccel private data. + * + * This will be called from get_format() or avcodec_close(), after hwaccel + * and hwaccel_context are already uninitialized. + */ + int (*uninit)(AVCodecContext *avctx); + + /** + * Size of the private data to allocate in + * AVCodecInternal.hwaccel_priv_data. + */ + int priv_data_size; + + /** + * Internal hwaccel capabilities. + */ + int caps_internal; +} AVHWAccel; + +/** + * Hardware acceleration should be used for decoding even if the codec level + * used is unknown or higher than the maximum supported level reported by the + * hardware driver. + * + * It's generally a good idea to pass this flag unless you have a specific + * reason not to, as hardware tends to under-report supported levels. + */ +#define AV_HWACCEL_FLAG_IGNORE_LEVEL (1 << 0) + +/** + * Hardware acceleration can output YUV pixel formats with a different chroma + * sampling than 4:2:0 and/or other than 8 bits per component. + */ +#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH (1 << 1) + +/** + * @} + */ + +#if FF_API_AVPICTURE +/** + * @defgroup lavc_picture AVPicture + * + * Functions for working with AVPicture + * @{ + */ + +/** + * Picture data structure. + * + * Up to four components can be stored into it, the last component is + * alpha. + * @deprecated use AVFrame or imgutils functions instead + */ +typedef struct AVPicture { + attribute_deprecated + uint8_t *data[AV_NUM_DATA_POINTERS]; ///< pointers to the image data planes + attribute_deprecated + int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line +} AVPicture; + +/** + * @} + */ +#endif + +enum AVSubtitleType { + SUBTITLE_NONE, + + SUBTITLE_BITMAP, ///< A bitmap, pict will be set + + /** + * Plain text, the text field must be set by the decoder and is + * authoritative. ass and pict fields may contain approximations. + */ + SUBTITLE_TEXT, + + /** + * Formatted text, the ass field must be set by the decoder and is + * authoritative. pict and text fields may contain approximations. + */ + SUBTITLE_ASS, +}; + +#define AV_SUBTITLE_FLAG_FORCED 0x00000001 + +typedef struct AVSubtitleRect { + int x; ///< top left corner of pict, undefined when pict is not set + int y; ///< top left corner of pict, undefined when pict is not set + int w; ///< width of pict, undefined when pict is not set + int h; ///< height of pict, undefined when pict is not set + int nb_colors; ///< number of colors in pict, undefined when pict is not set + +#if FF_API_AVPICTURE + /** + * @deprecated unused + */ + attribute_deprecated + AVPicture pict; +#endif + /** + * data+linesize for the bitmap of this subtitle. + * Can be set for text/ass as well once they are rendered. + */ + uint8_t *data[4]; + int linesize[4]; + + enum AVSubtitleType type; + + char *text; ///< 0 terminated plain UTF-8 text + + /** + * 0 terminated ASS/SSA compatible event line. + * The presentation of this is unaffected by the other values in this + * struct. + */ + char *ass; + + int flags; +} AVSubtitleRect; + +typedef struct AVSubtitle { + uint16_t format; /* 0 = graphics */ + uint32_t start_display_time; /* relative to packet pts, in ms */ + uint32_t end_display_time; /* relative to packet pts, in ms */ + unsigned num_rects; + AVSubtitleRect **rects; + int64_t pts; ///< Same as packet pts, in AV_TIME_BASE +} AVSubtitle; + +/** + * This struct describes the properties of an encoded stream. + * + * sizeof(AVCodecParameters) is not a part of the public ABI, this struct must + * be allocated with avcodec_parameters_alloc() and freed with + * avcodec_parameters_free(). + */ +typedef struct AVCodecParameters { + /** + * General type of the encoded data. + */ + enum AVMediaType codec_type; + /** + * Specific type of the encoded data (the codec used). + */ + enum AVCodecID codec_id; + /** + * Additional information about the codec (corresponds to the AVI FOURCC). + */ + uint32_t codec_tag; + + /** + * Extra binary data needed for initializing the decoder, codec-dependent. + * + * Must be allocated with av_malloc() and will be freed by + * avcodec_parameters_free(). The allocated size of extradata must be at + * least extradata_size + AV_INPUT_BUFFER_PADDING_SIZE, with the padding + * bytes zeroed. + */ + uint8_t *extradata; + /** + * Size of the extradata content in bytes. + */ + int extradata_size; + + /** + * - video: the pixel format, the value corresponds to enum AVPixelFormat. + * - audio: the sample format, the value corresponds to enum AVSampleFormat. + */ + int format; + + /** + * The average bitrate of the encoded data (in bits per second). + */ + int64_t bit_rate; + + /** + * The number of bits per sample in the codedwords. + * + * This is basically the bitrate per sample. It is mandatory for a bunch of + * formats to actually decode them. It's the number of bits for one sample in + * the actual coded bitstream. + * + * This could be for example 4 for ADPCM + * For PCM formats this matches bits_per_raw_sample + * Can be 0 + */ + int bits_per_coded_sample; + + /** + * This is the number of valid bits in each output sample. If the + * sample format has more bits, the least significant bits are additional + * padding bits, which are always 0. Use right shifts to reduce the sample + * to its actual size. For example, audio formats with 24 bit samples will + * have bits_per_raw_sample set to 24, and format set to AV_SAMPLE_FMT_S32. + * To get the original sample use "(int32_t)sample >> 8"." + * + * For ADPCM this might be 12 or 16 or similar + * Can be 0 + */ + int bits_per_raw_sample; + + /** + * Codec-specific bitstream restrictions that the stream conforms to. + */ + int profile; + int level; + + /** + * Video only. The dimensions of the video frame in pixels. + */ + int width; + int height; + + /** + * Video only. The aspect ratio (width / height) which a single pixel + * should have when displayed. + * + * When the aspect ratio is unknown / undefined, the numerator should be + * set to 0 (the denominator may have any value). + */ + AVRational sample_aspect_ratio; + + /** + * Video only. The order of the fields in interlaced video. + */ + enum AVFieldOrder field_order; + + /** + * Video only. Additional colorspace characteristics. + */ + enum AVColorRange color_range; + enum AVColorPrimaries color_primaries; + enum AVColorTransferCharacteristic color_trc; + enum AVColorSpace color_space; + enum AVChromaLocation chroma_location; + + /** + * Video only. Number of delayed frames. + */ + int video_delay; + + /** + * Audio only. The channel layout bitmask. May be 0 if the channel layout is + * unknown or unspecified, otherwise the number of bits set must be equal to + * the channels field. + */ + uint64_t channel_layout; + /** + * Audio only. The number of audio channels. + */ + int channels; + /** + * Audio only. The number of audio samples per second. + */ + int sample_rate; + /** + * Audio only. The number of bytes per coded audio frame, required by some + * formats. + * + * Corresponds to nBlockAlign in WAVEFORMATEX. + */ + int block_align; + /** + * Audio only. Audio frame size, if known. Required by some formats to be static. + */ + int frame_size; + + /** + * Audio only. The amount of padding (in samples) inserted by the encoder at + * the beginning of the audio. I.e. this number of leading decoded samples + * must be discarded by the caller to get the original audio without leading + * padding. + */ + int initial_padding; + /** + * Audio only. The amount of padding (in samples) appended by the encoder to + * the end of the audio. I.e. this number of decoded samples must be + * discarded by the caller from the end of the stream to get the original + * audio without any trailing padding. + */ + int trailing_padding; + /** + * Audio only. Number of samples to skip after a discontinuity. + */ + int seek_preroll; +} AVCodecParameters; + +/** + * If c is NULL, returns the first registered codec, + * if c is non-NULL, returns the next registered codec after c, + * or NULL if c is the last one. + */ +AVCodec *av_codec_next(const AVCodec *c); + +/** + * Return the LIBAVCODEC_VERSION_INT constant. + */ +unsigned avcodec_version(void); + +/** + * Return the libavcodec build-time configuration. + */ +const char *avcodec_configuration(void); + +/** + * Return the libavcodec license. + */ +const char *avcodec_license(void); + +/** + * Register the codec codec and initialize libavcodec. + * + * @warning either this function or avcodec_register_all() must be called + * before any other libavcodec functions. + * + * @see avcodec_register_all() + */ +void avcodec_register(AVCodec *codec); + +/** + * Register all the codecs, parsers and bitstream filters which were enabled at + * configuration time. If you do not call this function you can select exactly + * which formats you want to support, by using the individual registration + * functions. + * + * @see avcodec_register + * @see av_register_codec_parser + * @see av_register_bitstream_filter + */ +void avcodec_register_all(void); + +/** + * Allocate an AVCodecContext and set its fields to default values. The + * resulting struct should be freed with avcodec_free_context(). + * + * @param codec if non-NULL, allocate private data and initialize defaults + * for the given codec. It is illegal to then call avcodec_open2() + * with a different codec. + * If NULL, then the codec-specific defaults won't be initialized, + * which may result in suboptimal default settings (this is + * important mainly for encoders, e.g. libx264). + * + * @return An AVCodecContext filled with default values or NULL on failure. + */ +AVCodecContext *avcodec_alloc_context3(const AVCodec *codec); + +/** + * Free the codec context and everything associated with it and write NULL to + * the provided pointer. + */ +void avcodec_free_context(AVCodecContext **avctx); + +#if FF_API_GET_CONTEXT_DEFAULTS +/** + * @deprecated This function should not be used, as closing and opening a codec + * context multiple time is not supported. A new codec context should be + * allocated for each new use. + */ +int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec); +#endif + +/** + * Get the AVClass for AVCodecContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_class(void); + +#if FF_API_COPY_CONTEXT +/** + * Get the AVClass for AVFrame. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_frame_class(void); + +/** + * Get the AVClass for AVSubtitleRect. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_subtitle_rect_class(void); + +/** + * Copy the settings of the source AVCodecContext into the destination + * AVCodecContext. The resulting destination codec context will be + * unopened, i.e. you are required to call avcodec_open2() before you + * can use this AVCodecContext to decode/encode video/audio data. + * + * @param dest target codec context, should be initialized with + * avcodec_alloc_context3(NULL), but otherwise uninitialized + * @param src source codec context + * @return AVERROR() on error (e.g. memory allocation error), 0 on success + * + * @deprecated The semantics of this function are ill-defined and it should not + * be used. If you need to transfer the stream parameters from one codec context + * to another, use an intermediate AVCodecParameters instance and the + * avcodec_parameters_from_context() / avcodec_parameters_to_context() + * functions. + */ +attribute_deprecated +int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src); +#endif + +/** + * Allocate a new AVCodecParameters and set its fields to default values + * (unknown/invalid/0). The returned struct must be freed with + * avcodec_parameters_free(). + */ +AVCodecParameters *avcodec_parameters_alloc(void); + +/** + * Free an AVCodecParameters instance and everything associated with it and + * write NULL to the supplied pointer. + */ +void avcodec_parameters_free(AVCodecParameters **par); + +/** + * Copy the contents of src to dst. Any allocated fields in dst are freed and + * replaced with newly allocated duplicates of the corresponding fields in src. + * + * @return >= 0 on success, a negative AVERROR code on failure. + */ +int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src); + +/** + * Fill the parameters struct based on the values from the supplied codec + * context. Any allocated fields in par are freed and replaced with duplicates + * of the corresponding fields in codec. + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int avcodec_parameters_from_context(AVCodecParameters *par, + const AVCodecContext *codec); + +/** + * Fill the codec context based on the values from the supplied codec + * parameters. Any allocated fields in codec that have a corresponding field in + * par are freed and replaced with duplicates of the corresponding field in par. + * Fields in codec that do not have a counterpart in par are not touched. + * + * @return >= 0 on success, a negative AVERROR code on failure. + */ +int avcodec_parameters_to_context(AVCodecContext *codec, + const AVCodecParameters *par); + +/** + * Initialize the AVCodecContext to use the given AVCodec. Prior to using this + * function the context has to be allocated with avcodec_alloc_context3(). + * + * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), + * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for + * retrieving a codec. + * + * @warning This function is not thread safe! + * + * @note Always call this function before using decoding routines (such as + * @ref avcodec_receive_frame()). + * + * @code + * avcodec_register_all(); + * av_dict_set(&opts, "b", "2.5M", 0); + * codec = avcodec_find_decoder(AV_CODEC_ID_H264); + * if (!codec) + * exit(1); + * + * context = avcodec_alloc_context3(codec); + * + * if (avcodec_open2(context, codec, opts) < 0) + * exit(1); + * @endcode + * + * @param avctx The context to initialize. + * @param codec The codec to open this context for. If a non-NULL codec has been + * previously passed to avcodec_alloc_context3() or + * for this context, then this parameter MUST be either NULL or + * equal to the previously passed codec. + * @param options A dictionary filled with AVCodecContext and codec-private options. + * On return this object will be filled with options that were not found. + * + * @return zero on success, a negative value on error + * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(), + * av_dict_set(), av_opt_find(). + */ +int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options); + +/** + * Close a given AVCodecContext and free all the data associated with it + * (but not the AVCodecContext itself). + * + * Calling this function on an AVCodecContext that hasn't been opened will free + * the codec-specific data allocated in avcodec_alloc_context3() with a non-NULL + * codec. Subsequent calls will do nothing. + * + * @note Do not use this function. Use avcodec_free_context() to destroy a + * codec context (either open or closed). Opening and closing a codec context + * multiple times is not supported anymore -- use multiple codec contexts + * instead. + */ +int avcodec_close(AVCodecContext *avctx); + +/** + * Free all allocated data in the given subtitle struct. + * + * @param sub AVSubtitle to free. + */ +void avsubtitle_free(AVSubtitle *sub); + +/** + * @} + */ + +/** + * @addtogroup lavc_packet + * @{ + */ + +/** + * Allocate an AVPacket and set its fields to default values. The resulting + * struct must be freed using av_packet_free(). + * + * @return An AVPacket filled with default values or NULL on failure. + * + * @note this only allocates the AVPacket itself, not the data buffers. Those + * must be allocated through other means such as av_new_packet. + * + * @see av_new_packet + */ +AVPacket *av_packet_alloc(void); + +/** + * Create a new packet that references the same data as src. + * + * This is a shortcut for av_packet_alloc()+av_packet_ref(). + * + * @return newly created AVPacket on success, NULL on error. + * + * @see av_packet_alloc + * @see av_packet_ref + */ +AVPacket *av_packet_clone(const AVPacket *src); + +/** + * Free the packet, if the packet is reference counted, it will be + * unreferenced first. + * + * @param packet packet to be freed. The pointer will be set to NULL. + * @note passing NULL is a no-op. + */ +void av_packet_free(AVPacket **pkt); + +/** + * Initialize optional fields of a packet with default values. + * + * Note, this does not touch the data and size members, which have to be + * initialized separately. + * + * @param pkt packet + */ +void av_init_packet(AVPacket *pkt); + +/** + * Allocate the payload of a packet and initialize its fields with + * default values. + * + * @param pkt packet + * @param size wanted payload size + * @return 0 if OK, AVERROR_xxx otherwise + */ +int av_new_packet(AVPacket *pkt, int size); + +/** + * Reduce packet size, correctly zeroing padding + * + * @param pkt packet + * @param size new size + */ +void av_shrink_packet(AVPacket *pkt, int size); + +/** + * Increase packet size, correctly zeroing padding + * + * @param pkt packet + * @param grow_by number of bytes by which to increase the size of the packet + */ +int av_grow_packet(AVPacket *pkt, int grow_by); + +/** + * Initialize a reference-counted packet from av_malloc()ed data. + * + * @param pkt packet to be initialized. This function will set the data, size, + * buf and destruct fields, all others are left untouched. + * @param data Data allocated by av_malloc() to be used as packet data. If this + * function returns successfully, the data is owned by the underlying AVBuffer. + * The caller may not access the data through other means. + * @param size size of data in bytes, without the padding. I.e. the full buffer + * size is assumed to be size + AV_INPUT_BUFFER_PADDING_SIZE. + * + * @return 0 on success, a negative AVERROR on error + */ +int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size); + +#if FF_API_AVPACKET_OLD_API +/** + * @warning This is a hack - the packet memory allocation stuff is broken. The + * packet is allocated if it was not really allocated. + * + * @deprecated Use av_packet_ref + */ +attribute_deprecated +int av_dup_packet(AVPacket *pkt); +/** + * Copy packet, including contents + * + * @return 0 on success, negative AVERROR on fail + */ +int av_copy_packet(AVPacket *dst, const AVPacket *src); + +/** + * Copy packet side data + * + * @return 0 on success, negative AVERROR on fail + */ +int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src); + +/** + * Free a packet. + * + * @deprecated Use av_packet_unref + * + * @param pkt packet to free + */ +attribute_deprecated +void av_free_packet(AVPacket *pkt); +#endif +/** + * Allocate new information of a packet. + * + * @param pkt packet + * @param type side information type + * @param size side information size + * @return pointer to fresh allocated data or NULL otherwise + */ +uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Wrap an existing array as a packet side data. + * + * @param pkt packet + * @param type side information type + * @param data the side data array. It must be allocated with the av_malloc() + * family of functions. The ownership of the data is transferred to + * pkt. + * @param size side information size + * @return a non-negative number on success, a negative AVERROR code on + * failure. On failure, the packet is unchanged and the data remains + * owned by the caller. + */ +int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + uint8_t *data, size_t size); + +/** + * Shrink the already allocated side data buffer + * + * @param pkt packet + * @param type side information type + * @param size new side information size + * @return 0 on success, < 0 on failure + */ +int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Get side information from packet. + * + * @param pkt packet + * @param type desired side information type + * @param size pointer for side information size to store (optional) + * @return pointer to data if present or NULL otherwise + */ +uint8_t* av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, + int *size); + +#if FF_API_MERGE_SD_API +attribute_deprecated +int av_packet_merge_side_data(AVPacket *pkt); + +attribute_deprecated +int av_packet_split_side_data(AVPacket *pkt); +#endif + +const char *av_packet_side_data_name(enum AVPacketSideDataType type); + +/** + * Pack a dictionary for use in side_data. + * + * @param dict The dictionary to pack. + * @param size pointer to store the size of the returned data + * @return pointer to data if successful, NULL otherwise + */ +uint8_t *av_packet_pack_dictionary(AVDictionary *dict, int *size); +/** + * Unpack a dictionary from side_data. + * + * @param data data from side_data + * @param size size of the data + * @param dict the metadata storage dictionary + * @return 0 on success, < 0 on failure + */ +int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict); + + +/** + * Convenience function to free all the side data stored. + * All the other fields stay untouched. + * + * @param pkt packet + */ +void av_packet_free_side_data(AVPacket *pkt); + +/** + * Setup a new reference to the data described by a given packet + * + * If src is reference-counted, setup dst as a new reference to the + * buffer in src. Otherwise allocate a new buffer in dst and copy the + * data from src into it. + * + * All the other fields are copied from src. + * + * @see av_packet_unref + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_packet_ref(AVPacket *dst, const AVPacket *src); + +/** + * Wipe the packet. + * + * Unreference the buffer referenced by the packet and reset the + * remaining packet fields to their default values. + * + * @param pkt The packet to be unreferenced. + */ +void av_packet_unref(AVPacket *pkt); + +/** + * Move every field in src to dst and reset src. + * + * @see av_packet_unref + * + * @param src Source packet, will be reset + * @param dst Destination packet + */ +void av_packet_move_ref(AVPacket *dst, AVPacket *src); + +/** + * Copy only "properties" fields from src to dst. + * + * Properties for the purpose of this function are all the fields + * beside those related to the packet data (buf, data, size) + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success AVERROR on failure. + */ +int av_packet_copy_props(AVPacket *dst, const AVPacket *src); + +/** + * Convert valid timing fields (timestamps / durations) in a packet from one + * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be + * ignored. + * + * @param pkt packet on which the conversion will be performed + * @param tb_src source timebase, in which the timing fields in pkt are + * expressed + * @param tb_dst destination timebase, to which the timing fields will be + * converted + */ +void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst); + +/** + * @} + */ + +/** + * @addtogroup lavc_decoding + * @{ + */ + +/** + * Find a registered decoder with a matching codec ID. + * + * @param id AVCodecID of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_decoder(enum AVCodecID id); + +/** + * Find a registered decoder with the specified name. + * + * @param name name of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_decoder_by_name(const char *name); + +/** + * The default callback for AVCodecContext.get_buffer2(). It is made public so + * it can be called by custom get_buffer2() implementations for decoders without + * AV_CODEC_CAP_DR1 set. + */ +int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags); + +#if FF_API_EMU_EDGE +/** + * Return the amount of padding in pixels which the get_buffer callback must + * provide around the edge of the image for codecs which do not have the + * CODEC_FLAG_EMU_EDGE flag. + * + * @return Required padding in pixels. + * + * @deprecated CODEC_FLAG_EMU_EDGE is deprecated, so this function is no longer + * needed + */ +attribute_deprecated +unsigned avcodec_get_edge_width(void); +#endif + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you do not use any horizontal + * padding. + * + * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened. + */ +void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you also ensure that all + * line sizes are a multiple of the respective linesize_align[i]. + * + * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened. + */ +void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, + int linesize_align[AV_NUM_DATA_POINTERS]); + +/** + * Converts AVChromaLocation to swscale x/y chroma position. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos); + +/** + * Converts swscale x/y chroma position to AVChromaLocation. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos); + +/** + * Decode the audio frame of size avpkt->size from avpkt->data into frame. + * + * Some decoders may support multiple frames in a single AVPacket. Such + * decoders would then just decode the first frame and the return value would be + * less than the packet size. In this case, avcodec_decode_audio4 has to be + * called again with an AVPacket containing the remaining data in order to + * decode the second frame, etc... Even if no frames are returned, the packet + * needs to be fed to the decoder with remaining data until it is completely + * consumed or an error occurs. + * + * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning samples. It is safe to flush even those decoders that are not + * marked with AV_CODEC_CAP_DELAY, then no samples will be returned. + * + * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx the codec context + * @param[out] frame The AVFrame in which to store decoded audio samples. + * The decoder will allocate a buffer for the decoded frame by + * calling the AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is + * non-zero. Note that this field being set to zero + * does not mean that an error has occurred. For + * decoders with AV_CODEC_CAP_DELAY set, no given decode + * call is guaranteed to produce a frame. + * @param[in] avpkt The input AVPacket containing the input buffer. + * At least avpkt->data and avpkt->size should be set. Some + * decoders might also require additional fields to be set. + * @return A negative error code is returned if an error occurred during + * decoding, otherwise the number of bytes consumed from the input + * AVPacket is returned. + * +* @deprecated Use avcodec_send_packet() and avcodec_receive_frame(). + */ +attribute_deprecated +int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, + int *got_frame_ptr, const AVPacket *avpkt); + +/** + * Decode the video frame of size avpkt->size from avpkt->data into picture. + * Some decoders may support multiple frames in a single AVPacket, such + * decoders would then just decode the first frame. + * + * @warning The input buffer must be AV_INPUT_BUFFER_PADDING_SIZE larger than + * the actual read bytes because some optimized bitstream readers read 32 or 64 + * bits at once and could read over the end. + * + * @warning The end of the input buffer buf should be set to 0 to ensure that + * no overreading happens for damaged MPEG streams. + * + * @note Codecs which have the AV_CODEC_CAP_DELAY capability set have a delay + * between input and output, these need to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to return the remaining frames. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx the codec context + * @param[out] picture The AVFrame in which the decoded video frame will be stored. + * Use av_frame_alloc() to get an AVFrame. The codec will + * allocate memory for the actual bitmap by calling the + * AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * + * @param[in] avpkt The input AVPacket containing the input buffer. + * You can create such packet with av_init_packet() and by then setting + * data and size, some decoders might in addition need other fields like + * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least + * fields possible. + * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. + * @return On error a negative value is returned, otherwise the number of bytes + * used or zero if no frame could be decompressed. + * + * @deprecated Use avcodec_send_packet() and avcodec_receive_frame(). + */ +attribute_deprecated +int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, + int *got_picture_ptr, + const AVPacket *avpkt); + +/** + * Decode a subtitle message. + * Return a negative value on error, otherwise return the number of bytes used. + * If no subtitle could be decompressed, got_sub_ptr is zero. + * Otherwise, the subtitle is stored in *sub. + * Note that AV_CODEC_CAP_DR1 is not available for subtitle codecs. This is for + * simplicity, because the performance difference is expect to be negligible + * and reusing a get_buffer written for video codecs would probably perform badly + * due to a potentially very different allocation pattern. + * + * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning subtitles. It is safe to flush even those decoders that are not + * marked with CODEC_CAP_DELAY, then no subtitles will be returned. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx the codec context + * @param[out] sub The Preallocated AVSubtitle in which the decoded subtitle will be stored, + * must be freed with avsubtitle_free if *got_sub_ptr is set. + * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero. + * @param[in] avpkt The input AVPacket containing the input buffer. + */ +int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, + int *got_sub_ptr, + AVPacket *avpkt); + +/** + * Supply raw packet data as input to a decoder. + * + * Internally, this call will copy relevant AVCodecContext fields, which can + * influence decoding per-packet, and apply them when the packet is actually + * decoded. (For example AVCodecContext.skip_frame, which might direct the + * decoder to drop the frame contained by the packet sent with this function.) + * + * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @warning Do not mix this API with the legacy API (like avcodec_decode_video2()) + * on the same AVCodecContext. It will return unexpected results now + * or in future libavcodec versions. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx codec context + * @param[in] avpkt The input AVPacket. Usually, this will be a single video + * frame, or several complete audio frames. + * Ownership of the packet remains with the caller, and the + * decoder will not write to the packet. The decoder may create + * a reference to the packet data (or copy it if the packet is + * not reference-counted). + * Unlike with older APIs, the packet is always fully consumed, + * and if it contains multiple frames (e.g. some audio codecs), + * will require you to call avcodec_receive_frame() multiple + * times afterwards before you can send a new packet. + * It can be NULL (or an AVPacket with data set to NULL and + * size set to 0); in this case, it is considered a flush + * packet, which signals the end of the stream. Sending the + * first flush packet will return success. Subsequent ones are + * unnecessary and will return AVERROR_EOF. If the decoder + * still has frames buffered, it will return them after sending + * a flush packet. + * + * @return 0 on success, otherwise negative error code: + * AVERROR(EAGAIN): input is not accepted in the current state - user + * must read output with avcodec_receive_frame() (once + * all output is read, the packet should be resent, and + * the call will not fail with EAGAIN). + * AVERROR_EOF: the decoder has been flushed, and no new packets can + * be sent to it (also returned if more than 1 flush + * packet is sent) + * AVERROR(EINVAL): codec not opened, it is an encoder, or requires flush + * AVERROR(ENOMEM): failed to add packet to internal queue, or similar + * other errors: legitimate decoding errors + */ +int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt); + +/** + * Return decoded output data from a decoder. + * + * @param avctx codec context + * @param frame This will be set to a reference-counted video or audio + * frame (depending on the decoder type) allocated by the + * decoder. Note that the function will always call + * av_frame_unref(frame) before doing anything else. + * + * @return + * 0: success, a frame was returned + * AVERROR(EAGAIN): output is not available in this state - user must try + * to send new input + * AVERROR_EOF: the decoder has been fully flushed, and there will be + * no more output frames + * AVERROR(EINVAL): codec not opened, or it is an encoder + * other negative values: legitimate decoding errors + */ +int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame); + +/** + * Supply a raw video or audio frame to the encoder. Use avcodec_receive_packet() + * to retrieve buffered output packets. + * + * @param avctx codec context + * @param[in] frame AVFrame containing the raw audio or video frame to be encoded. + * Ownership of the frame remains with the caller, and the + * encoder will not write to the frame. The encoder may create + * a reference to the frame data (or copy it if the frame is + * not reference-counted). + * It can be NULL, in which case it is considered a flush + * packet. This signals the end of the stream. If the encoder + * still has packets buffered, it will return them after this + * call. Once flushing mode has been entered, additional flush + * packets are ignored, and sending frames will return + * AVERROR_EOF. + * + * For audio: + * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame + * can have any number of samples. + * If it is not set, frame->nb_samples must be equal to + * avctx->frame_size for all frames except the last. + * The final frame may be smaller than avctx->frame_size. + * @return 0 on success, otherwise negative error code: + * AVERROR(EAGAIN): input is not accepted in the current state - user + * must read output with avcodec_receive_packet() (once + * all output is read, the packet should be resent, and + * the call will not fail with EAGAIN). + * AVERROR_EOF: the encoder has been flushed, and no new frames can + * be sent to it + * AVERROR(EINVAL): codec not opened, refcounted_frames not set, it is a + * decoder, or requires flush + * AVERROR(ENOMEM): failed to add packet to internal queue, or similar + * other errors: legitimate decoding errors + */ +int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame); + +/** + * Read encoded data from the encoder. + * + * @param avctx codec context + * @param avpkt This will be set to a reference-counted packet allocated by the + * encoder. Note that the function will always call + * av_frame_unref(frame) before doing anything else. + * @return 0 on success, otherwise negative error code: + * AVERROR(EAGAIN): output is not available in the current state - user + * must try to send input + * AVERROR_EOF: the encoder has been fully flushed, and there will be + * no more output packets + * AVERROR(EINVAL): codec not opened, or it is an encoder + * other errors: legitimate decoding errors + */ +int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt); + + +/** + * @defgroup lavc_parsing Frame parsing + * @{ + */ + +enum AVPictureStructure { + AV_PICTURE_STRUCTURE_UNKNOWN, //< unknown + AV_PICTURE_STRUCTURE_TOP_FIELD, //< coded as top field + AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field + AV_PICTURE_STRUCTURE_FRAME, //< coded as frame +}; + +typedef struct AVCodecParserContext { + void *priv_data; + struct AVCodecParser *parser; + int64_t frame_offset; /* offset of the current frame */ + int64_t cur_offset; /* current offset + (incremented by each av_parser_parse()) */ + int64_t next_frame_offset; /* offset of the next frame */ + /* video info */ + int pict_type; /* XXX: Put it back in AVCodecContext. */ + /** + * This field is used for proper frame duration computation in lavf. + * It signals, how much longer the frame duration of the current frame + * is compared to normal frame duration. + * + * frame_duration = (1 + repeat_pict) * time_base + * + * It is used by codecs like H.264 to display telecined material. + */ + int repeat_pict; /* XXX: Put it back in AVCodecContext. */ + int64_t pts; /* pts of the current frame */ + int64_t dts; /* dts of the current frame */ + + /* private data */ + int64_t last_pts; + int64_t last_dts; + int fetch_timestamp; + +#define AV_PARSER_PTS_NB 4 + int cur_frame_start_index; + int64_t cur_frame_offset[AV_PARSER_PTS_NB]; + int64_t cur_frame_pts[AV_PARSER_PTS_NB]; + int64_t cur_frame_dts[AV_PARSER_PTS_NB]; + + int flags; +#define PARSER_FLAG_COMPLETE_FRAMES 0x0001 +#define PARSER_FLAG_ONCE 0x0002 +/// Set if the parser has a valid file offset +#define PARSER_FLAG_FETCHED_OFFSET 0x0004 +#define PARSER_FLAG_USE_CODEC_TS 0x1000 + + int64_t offset; ///< byte offset from starting packet start + int64_t cur_frame_end[AV_PARSER_PTS_NB]; + + /** + * Set by parser to 1 for key frames and 0 for non-key frames. + * It is initialized to -1, so if the parser doesn't set this flag, + * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames + * will be used. + */ + int key_frame; + +#if FF_API_CONVERGENCE_DURATION + /** + * @deprecated unused + */ + attribute_deprecated + int64_t convergence_duration; +#endif + + // Timestamp generation support: + /** + * Synchronization point for start of timestamp generation. + * + * Set to >0 for sync point, 0 for no sync point and <0 for undefined + * (default). + * + * For example, this corresponds to presence of H.264 buffering period + * SEI message. + */ + int dts_sync_point; + + /** + * Offset of the current timestamp against last timestamp sync point in + * units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain a valid timestamp offset. + * + * Note that the timestamp of sync point has usually a nonzero + * dts_ref_dts_delta, which refers to the previous sync point. Offset of + * the next frame after timestamp sync point will be usually 1. + * + * For example, this corresponds to H.264 cpb_removal_delay. + */ + int dts_ref_dts_delta; + + /** + * Presentation delay of current frame in units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain valid non-negative timestamp delta (presentation time of a frame + * must not lie in the past). + * + * This delay represents the difference between decoding and presentation + * time of the frame. + * + * For example, this corresponds to H.264 dpb_output_delay. + */ + int pts_dts_delta; + + /** + * Position of the packet in file. + * + * Analogous to cur_frame_pts/dts + */ + int64_t cur_frame_pos[AV_PARSER_PTS_NB]; + + /** + * Byte position of currently parsed frame in stream. + */ + int64_t pos; + + /** + * Previous frame byte position. + */ + int64_t last_pos; + + /** + * Duration of the current frame. + * For audio, this is in units of 1 / AVCodecContext.sample_rate. + * For all other types, this is in units of AVCodecContext.time_base. + */ + int duration; + + enum AVFieldOrder field_order; + + /** + * Indicate whether a picture is coded as a frame, top field or bottom field. + * + * For example, H.264 field_pic_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag + * equal to 1 and bottom_field_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_TOP_FIELD. + */ + enum AVPictureStructure picture_structure; + + /** + * Picture number incremented in presentation or output order. + * This field may be reinitialized at the first picture of a new sequence. + * + * For example, this corresponds to H.264 PicOrderCnt. + */ + int output_picture_number; + + /** + * Dimensions of the decoded video intended for presentation. + */ + int width; + int height; + + /** + * Dimensions of the coded video. + */ + int coded_width; + int coded_height; + + /** + * The format of the coded data, corresponds to enum AVPixelFormat for video + * and for enum AVSampleFormat for audio. + * + * Note that a decoder can have considerable freedom in how exactly it + * decodes the data, so the format reported here might be different from the + * one returned by a decoder. + */ + int format; +} AVCodecParserContext; + +typedef struct AVCodecParser { + int codec_ids[5]; /* several codec IDs are permitted */ + int priv_data_size; + int (*parser_init)(AVCodecParserContext *s); + /* This callback never returns an error, a negative value means that + * the frame start was in a previous packet. */ + int (*parser_parse)(AVCodecParserContext *s, + AVCodecContext *avctx, + const uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size); + void (*parser_close)(AVCodecParserContext *s); + int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size); + struct AVCodecParser *next; +} AVCodecParser; + +AVCodecParser *av_parser_next(const AVCodecParser *c); + +void av_register_codec_parser(AVCodecParser *parser); +AVCodecParserContext *av_parser_init(int codec_id); + +/** + * Parse a packet. + * + * @param s parser context. + * @param avctx codec context. + * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished. + * @param poutbuf_size set to size of parsed buffer or zero if not yet finished. + * @param buf input buffer. + * @param buf_size buffer size in bytes without the padding. I.e. the full buffer + size is assumed to be buf_size + AV_INPUT_BUFFER_PADDING_SIZE. + To signal EOF, this should be 0 (so that the last frame + can be output). + * @param pts input presentation timestamp. + * @param dts input decoding timestamp. + * @param pos input byte position in stream. + * @return the number of bytes of the input bitstream used. + * + * Example: + * @code + * while(in_len){ + * len = av_parser_parse2(myparser, AVCodecContext, &data, &size, + * in_data, in_len, + * pts, dts, pos); + * in_data += len; + * in_len -= len; + * + * if(size) + * decode_frame(data, size); + * } + * @endcode + */ +int av_parser_parse2(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, + int64_t pts, int64_t dts, + int64_t pos); + +/** + * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed + * @deprecated use AVBitStreamFilter + */ +int av_parser_change(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); +void av_parser_close(AVCodecParserContext *s); + +/** + * @} + * @} + */ + +/** + * @addtogroup lavc_encoding + * @{ + */ + +/** + * Find a registered encoder with a matching codec ID. + * + * @param id AVCodecID of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder(enum AVCodecID id); + +/** + * Find a registered encoder with the specified name. + * + * @param name name of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder_by_name(const char *name); + +/** + * Encode a frame of audio. + * + * Takes input samples from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay, split, and combine input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. If avpkt->data and + * avpkt->size are set, avpkt->destruct must also be set. All + * other AVPacket fields will be reset by the encoder using + * av_init_packet(). If avpkt->data is NULL, the encoder will + * allocate it. The encoder will set avpkt->size to the size + * of the output packet. + * + * If this function fails or produces no output, avpkt will be + * freed using av_packet_unref(). + * @param[in] frame AVFrame containing the raw audio data to be encoded. + * May be NULL when flushing an encoder that has the + * AV_CODEC_CAP_DELAY capability set. + * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame + * can have any number of samples. + * If it is not set, frame->nb_samples must be equal to + * avctx->frame_size for all frames except the last. + * The final frame may be smaller than avctx->frame_size. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + * + * @deprecated use avcodec_send_frame()/avcodec_receive_packet() instead + */ +attribute_deprecated +int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +/** + * Encode a frame of video. + * + * Takes input raw video data from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay and reorder input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. All other AVPacket fields + * will be reset by the encoder using av_init_packet(). If + * avpkt->data is NULL, the encoder will allocate it. + * The encoder will set avpkt->size to the size of the + * output packet. The returned data (if any) belongs to the + * caller, he is responsible for freeing it. + * + * If this function fails or produces no output, avpkt will be + * freed using av_packet_unref(). + * @param[in] frame AVFrame containing the raw video data to be encoded. + * May be NULL when flushing an encoder that has the + * AV_CODEC_CAP_DELAY capability set. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + * + * @deprecated use avcodec_send_frame()/avcodec_receive_packet() instead + */ +attribute_deprecated +int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, + const AVSubtitle *sub); + + +/** + * @} + */ + +#if FF_API_AVCODEC_RESAMPLE +/** + * @defgroup lavc_resample Audio resampling + * @ingroup libavc + * @deprecated use libswresample instead + * + * @{ + */ +struct ReSampleContext; +struct AVResampleContext; + +typedef struct ReSampleContext ReSampleContext; + +/** + * Initialize audio resampling context. + * + * @param output_channels number of output channels + * @param input_channels number of input channels + * @param output_rate output sample rate + * @param input_rate input sample rate + * @param sample_fmt_out requested output sample format + * @param sample_fmt_in input sample format + * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency + * @param log2_phase_count log2 of the number of entries in the polyphase filterbank + * @param linear if 1 then the used FIR filter will be linearly interpolated + between the 2 closest, if 0 the closest will be used + * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + * @return allocated ReSampleContext, NULL if error occurred + */ +attribute_deprecated +ReSampleContext *av_audio_resample_init(int output_channels, int input_channels, + int output_rate, int input_rate, + enum AVSampleFormat sample_fmt_out, + enum AVSampleFormat sample_fmt_in, + int filter_length, int log2_phase_count, + int linear, double cutoff); + +attribute_deprecated +int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples); + +/** + * Free resample context. + * + * @param s a non-NULL pointer to a resample context previously + * created with av_audio_resample_init() + */ +attribute_deprecated +void audio_resample_close(ReSampleContext *s); + + +/** + * Initialize an audio resampler. + * Note, if either rate is not an integer then simply scale both rates up so they are. + * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq + * @param log2_phase_count log2 of the number of entries in the polyphase filterbank + * @param linear If 1 then the used FIR filter will be linearly interpolated + between the 2 closest, if 0 the closest will be used + * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + */ +attribute_deprecated +struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff); + +/** + * Resample an array of samples using a previously configured context. + * @param src an array of unconsumed samples + * @param consumed the number of samples of src which have been consumed are returned here + * @param src_size the number of unconsumed samples available + * @param dst_size the amount of space in samples available in dst + * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context. + * @return the number of samples written in dst or -1 if an error occurred + */ +attribute_deprecated +int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx); + + +/** + * Compensate samplerate/timestamp drift. The compensation is done by changing + * the resampler parameters, so no audible clicks or similar distortions occur + * @param compensation_distance distance in output samples over which the compensation should be performed + * @param sample_delta number of output samples which should be output less + * + * example: av_resample_compensate(c, 10, 500) + * here instead of 510 samples only 500 samples would be output + * + * note, due to rounding the actual compensation might be slightly different, + * especially if the compensation_distance is large and the in_rate used during init is small + */ +attribute_deprecated +void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance); +attribute_deprecated +void av_resample_close(struct AVResampleContext *c); + +/** + * @} + */ +#endif + +#if FF_API_AVPICTURE +/** + * @addtogroup lavc_picture + * @{ + */ + +/** + * @deprecated unused + */ +attribute_deprecated +int avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated unused + */ +attribute_deprecated +void avpicture_free(AVPicture *picture); + +/** + * @deprecated use av_image_fill_arrays() instead. + */ +attribute_deprecated +int avpicture_fill(AVPicture *picture, const uint8_t *ptr, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated use av_image_copy_to_buffer() instead. + */ +attribute_deprecated +int avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt, + int width, int height, + unsigned char *dest, int dest_size); + +/** + * @deprecated use av_image_get_buffer_size() instead. + */ +attribute_deprecated +int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated av_image_copy() instead. + */ +attribute_deprecated +void av_picture_copy(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated unused + */ +attribute_deprecated +int av_picture_crop(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int top_band, int left_band); + +/** + * @deprecated unused + */ +attribute_deprecated +int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt, + int padtop, int padbottom, int padleft, int padright, int *color); + +/** + * @} + */ +#endif + +/** + * @defgroup lavc_misc Utility functions + * @ingroup libavc + * + * Miscellaneous utility functions related to both encoding and decoding + * (or neither). + * @{ + */ + +/** + * @defgroup lavc_misc_pixfmt Pixel formats + * + * Functions for working with pixel formats. + * @{ + */ + +/** + * Utility function to access log2_chroma_w log2_chroma_h from + * the pixel format AVPixFmtDescriptor. + * + * This function asserts that pix_fmt is valid. See av_pix_fmt_get_chroma_sub_sample + * for one that returns a failure code and continues in case of invalid + * pix_fmts. + * + * @param[in] pix_fmt the pixel format + * @param[out] h_shift store log2_chroma_w + * @param[out] v_shift store log2_chroma_h + * + * @see av_pix_fmt_get_chroma_sub_sample + */ + +void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift); + +/** + * Return a value representing the fourCC code associated to the + * pixel format pix_fmt, or 0 if no associated fourCC code can be + * found. + */ +unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt); + +/** + * @deprecated see av_get_pix_fmt_loss() + */ +int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt, + int has_alpha); + +/** + * Find the best pixel format to convert to given a certain source pixel + * format. When converting from one pixel format to another, information loss + * may occur. For example, when converting from RGB24 to GRAY, the color + * information will be lost. Similarly, other losses occur when converting from + * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of + * the given pixel formats should be used to suffer the least amount of loss. + * The pixel formats from which it chooses one, are determined by the + * pix_fmt_list parameter. + * + * + * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur. + * @return The best pixel format to convert to or -1 if none was found. + */ +enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list, + enum AVPixelFormat src_pix_fmt, + int has_alpha, int *loss_ptr); + +/** + * @deprecated see av_find_best_pix_fmt_of_2() + */ +enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +attribute_deprecated +enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + +/** + * @} + */ + +#if FF_API_SET_DIMENSIONS +/** + * @deprecated this function is not supposed to be used from outside of lavc + */ +attribute_deprecated +void avcodec_set_dimensions(AVCodecContext *s, int width, int height); +#endif + +#if FF_API_TAG_STRING +/** + * Put a string representing the codec tag codec_tag in buf. + * + * @param buf buffer to place codec tag in + * @param buf_size size in bytes of buf + * @param codec_tag codec tag to assign + * @return the length of the string that would have been generated if + * enough space had been available, excluding the trailing null + * + * @deprecated see av_fourcc_make_string() and av_fourcc2str(). + */ +attribute_deprecated +size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag); +#endif + +void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); + +/** + * Return a name for the specified profile, if available. + * + * @param codec the codec that is searched for the given profile + * @param profile the profile value for which a name is requested + * @return A name for the profile if found, NULL otherwise. + */ +const char *av_get_profile_name(const AVCodec *codec, int profile); + +/** + * Return a name for the specified profile, if available. + * + * @param codec_id the ID of the codec to which the requested profile belongs + * @param profile the profile value for which a name is requested + * @return A name for the profile if found, NULL otherwise. + * + * @note unlike av_get_profile_name(), which searches a list of profiles + * supported by a specific decoder or encoder implementation, this + * function searches the list of profiles from the AVCodecDescriptor + */ +const char *avcodec_profile_name(enum AVCodecID codec_id, int profile); + +int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size); +int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); +//FIXME func typedef + +/** + * Fill AVFrame audio data and linesize pointers. + * + * The buffer buf must be a preallocated buffer with a size big enough + * to contain the specified samples amount. The filled AVFrame data + * pointers will point to this buffer. + * + * AVFrame extended_data channel pointers are allocated if necessary for + * planar audio. + * + * @param frame the AVFrame + * frame->nb_samples must be set prior to calling the + * function. This function fills in frame->data, + * frame->extended_data, frame->linesize[0]. + * @param nb_channels channel count + * @param sample_fmt sample format + * @param buf buffer to use for frame data + * @param buf_size size of buffer + * @param align plane size sample alignment (0 = default) + * @return >=0 on success, negative error code on failure + * @todo return the size in bytes required to store the samples in + * case of success, at the next libavutil bump + */ +int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, + enum AVSampleFormat sample_fmt, const uint8_t *buf, + int buf_size, int align); + +/** + * Reset the internal decoder state / flush internal buffers. Should be called + * e.g. when seeking or when switching to a different stream. + * + * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0), + * this invalidates the frames previously returned from the decoder. When + * refcounted frames are used, the decoder just releases any references it might + * keep internally, but the caller's reference remains valid. + */ +void avcodec_flush_buffers(AVCodecContext *avctx); + +/** + * Return codec bits per sample. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return the PCM codec associated with a sample format. + * @param be endianness, 0 for little, 1 for big, + * -1 (or anything else) for native + * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE + */ +enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be); + +/** + * Return codec bits per sample. + * Only return non-zero if the bits per sample is exactly correct, not an + * approximation. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_exact_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return audio frame duration. + * + * @param avctx codec context + * @param frame_bytes size of the frame, or 0 if unknown + * @return frame duration, in samples, if known. 0 if not able to + * determine. + */ +int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes); + +/** + * This function is the same as av_get_audio_frame_duration(), except it works + * with AVCodecParameters instead of an AVCodecContext. + */ +int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes); + +#if FF_API_OLD_BSF +typedef struct AVBitStreamFilterContext { + void *priv_data; + const struct AVBitStreamFilter *filter; + AVCodecParserContext *parser; + struct AVBitStreamFilterContext *next; + /** + * Internal default arguments, used if NULL is passed to av_bitstream_filter_filter(). + * Not for access by library users. + */ + char *args; +} AVBitStreamFilterContext; +#endif + +typedef struct AVBSFInternal AVBSFInternal; + +/** + * The bitstream filter state. + * + * This struct must be allocated with av_bsf_alloc() and freed with + * av_bsf_free(). + * + * The fields in the struct will only be changed (by the caller or by the + * filter) as described in their documentation, and are to be considered + * immutable otherwise. + */ +typedef struct AVBSFContext { + /** + * A class for logging and AVOptions + */ + const AVClass *av_class; + + /** + * The bitstream filter this context is an instance of. + */ + const struct AVBitStreamFilter *filter; + + /** + * Opaque libavcodec internal data. Must not be touched by the caller in any + * way. + */ + AVBSFInternal *internal; + + /** + * Opaque filter-specific private data. If filter->priv_class is non-NULL, + * this is an AVOptions-enabled struct. + */ + void *priv_data; + + /** + * Parameters of the input stream. This field is allocated in + * av_bsf_alloc(), it needs to be filled by the caller before + * av_bsf_init(). + */ + AVCodecParameters *par_in; + + /** + * Parameters of the output stream. This field is allocated in + * av_bsf_alloc(), it is set by the filter in av_bsf_init(). + */ + AVCodecParameters *par_out; + + /** + * The timebase used for the timestamps of the input packets. Set by the + * caller before av_bsf_init(). + */ + AVRational time_base_in; + + /** + * The timebase used for the timestamps of the output packets. Set by the + * filter in av_bsf_init(). + */ + AVRational time_base_out; +} AVBSFContext; + +typedef struct AVBitStreamFilter { + const char *name; + + /** + * A list of codec ids supported by the filter, terminated by + * AV_CODEC_ID_NONE. + * May be NULL, in that case the bitstream filter works with any codec id. + */ + const enum AVCodecID *codec_ids; + + /** + * A class for the private data, used to declare bitstream filter private + * AVOptions. This field is NULL for bitstream filters that do not declare + * any options. + * + * If this field is non-NULL, the first member of the filter private data + * must be a pointer to AVClass, which will be set by libavcodec generic + * code to this class. + */ + const AVClass *priv_class; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + int priv_data_size; + int (*init)(AVBSFContext *ctx); + int (*filter)(AVBSFContext *ctx, AVPacket *pkt); + void (*close)(AVBSFContext *ctx); +} AVBitStreamFilter; + +#if FF_API_OLD_BSF +/** + * Register a bitstream filter. + * + * The filter will be accessible to the application code through + * av_bitstream_filter_next() or can be directly initialized with + * av_bitstream_filter_init(). + * + * @see avcodec_register_all() + */ +attribute_deprecated +void av_register_bitstream_filter(AVBitStreamFilter *bsf); + +/** + * Create and initialize a bitstream filter context given a bitstream + * filter name. + * + * The returned context must be freed with av_bitstream_filter_close(). + * + * @param name the name of the bitstream filter + * @return a bitstream filter context if a matching filter was found + * and successfully initialized, NULL otherwise + */ +attribute_deprecated +AVBitStreamFilterContext *av_bitstream_filter_init(const char *name); + +/** + * Filter bitstream. + * + * This function filters the buffer buf with size buf_size, and places the + * filtered buffer in the buffer pointed to by poutbuf. + * + * The output buffer must be freed by the caller. + * + * @param bsfc bitstream filter context created by av_bitstream_filter_init() + * @param avctx AVCodecContext accessed by the filter, may be NULL. + * If specified, this must point to the encoder context of the + * output stream the packet is sent to. + * @param args arguments which specify the filter configuration, may be NULL + * @param poutbuf pointer which is updated to point to the filtered buffer + * @param poutbuf_size pointer which is updated to the filtered buffer size in bytes + * @param buf buffer containing the data to filter + * @param buf_size size in bytes of buf + * @param keyframe set to non-zero if the buffer to filter corresponds to a key-frame packet data + * @return >= 0 in case of success, or a negative error code in case of failure + * + * If the return value is positive, an output buffer is allocated and + * is available in *poutbuf, and is distinct from the input buffer. + * + * If the return value is 0, the output buffer is not allocated and + * should be considered identical to the input buffer, or in case + * *poutbuf was set it points to the input buffer (not necessarily to + * its starting address). A special case is if *poutbuf was set to NULL and + * *poutbuf_size was set to 0, which indicates the packet should be dropped. + */ +attribute_deprecated +int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, + AVCodecContext *avctx, const char *args, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); + +/** + * Release bitstream filter context. + * + * @param bsf the bitstream filter context created with + * av_bitstream_filter_init(), can be NULL + */ +attribute_deprecated +void av_bitstream_filter_close(AVBitStreamFilterContext *bsf); + +/** + * If f is NULL, return the first registered bitstream filter, + * if f is non-NULL, return the next registered bitstream filter + * after f, or NULL if f is the last one. + * + * This function can be used to iterate over all registered bitstream + * filters. + */ +attribute_deprecated +AVBitStreamFilter *av_bitstream_filter_next(const AVBitStreamFilter *f); +#endif + +/** + * @return a bitstream filter with the specified name or NULL if no such + * bitstream filter exists. + */ +const AVBitStreamFilter *av_bsf_get_by_name(const char *name); + +/** + * Iterate over all registered bitstream filters. + * + * @param opaque a pointer where libavcodec will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered bitstream filter or NULL when the iteration is + * finished + */ +const AVBitStreamFilter *av_bsf_next(void **opaque); + +/** + * Allocate a context for a given bitstream filter. The caller must fill in the + * context parameters as described in the documentation and then call + * av_bsf_init() before sending any data to the filter. + * + * @param filter the filter for which to allocate an instance. + * @param ctx a pointer into which the pointer to the newly-allocated context + * will be written. It must be freed with av_bsf_free() after the + * filtering is done. + * + * @return 0 on success, a negative AVERROR code on failure + */ +int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx); + +/** + * Prepare the filter for use, after all the parameters and options have been + * set. + */ +int av_bsf_init(AVBSFContext *ctx); + +/** + * Submit a packet for filtering. + * + * After sending each packet, the filter must be completely drained by calling + * av_bsf_receive_packet() repeatedly until it returns AVERROR(EAGAIN) or + * AVERROR_EOF. + * + * @param pkt the packet to filter. pkt must contain some payload (i.e data or + * side data must be present in pkt). The bitstream filter will take ownership of + * the packet and reset the contents of pkt. pkt is not touched if an error occurs. + * This parameter may be NULL, which signals the end of the stream (i.e. no more + * packets will be sent). That will cause the filter to output any packets it + * may have buffered internally. + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt); + +/** + * Retrieve a filtered packet. + * + * @param[out] pkt this struct will be filled with the contents of the filtered + * packet. It is owned by the caller and must be freed using + * av_packet_unref() when it is no longer needed. + * This parameter should be "clean" (i.e. freshly allocated + * with av_packet_alloc() or unreffed with av_packet_unref()) + * when this function is called. If this function returns + * successfully, the contents of pkt will be completely + * overwritten by the returned data. On failure, pkt is not + * touched. + * + * @return 0 on success. AVERROR(EAGAIN) if more packets need to be sent to the + * filter (using av_bsf_send_packet()) to get more output. AVERROR_EOF if there + * will be no further output from the filter. Another negative AVERROR value if + * an error occurs. + * + * @note one input packet may result in several output packets, so after sending + * a packet with av_bsf_send_packet(), this function needs to be called + * repeatedly until it stops returning 0. It is also possible for a filter to + * output fewer packets than were sent to it, so this function may return + * AVERROR(EAGAIN) immediately after a successful av_bsf_send_packet() call. + */ +int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt); + +/** + * Free a bitstream filter context and everything associated with it; write NULL + * into the supplied pointer. + */ +void av_bsf_free(AVBSFContext **ctx); + +/** + * Get the AVClass for AVBSFContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *av_bsf_get_class(void); + +/** + * Structure for chain/list of bitstream filters. + * Empty list can be allocated by av_bsf_list_alloc(). + */ +typedef struct AVBSFList AVBSFList; + +/** + * Allocate empty list of bitstream filters. + * The list must be later freed by av_bsf_list_free() + * or finalized by av_bsf_list_finalize(). + * + * @return Pointer to @ref AVBSFList on success, NULL in case of failure + */ +AVBSFList *av_bsf_list_alloc(void); + +/** + * Free list of bitstream filters. + * + * @param lst Pointer to pointer returned by av_bsf_list_alloc() + */ +void av_bsf_list_free(AVBSFList **lst); + +/** + * Append bitstream filter to the list of bitstream filters. + * + * @param lst List to append to + * @param bsf Filter context to be appended + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int av_bsf_list_append(AVBSFList *lst, AVBSFContext *bsf); + +/** + * Construct new bitstream filter context given it's name and options + * and append it to the list of bitstream filters. + * + * @param lst List to append to + * @param bsf_name Name of the bitstream filter + * @param options Options for the bitstream filter, can be set to NULL + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int av_bsf_list_append2(AVBSFList *lst, const char * bsf_name, AVDictionary **options); +/** + * Finalize list of bitstream filters. + * + * This function will transform @ref AVBSFList to single @ref AVBSFContext, + * so the whole chain of bitstream filters can be treated as single filter + * freshly allocated by av_bsf_alloc(). + * If the call is successful, @ref AVBSFList structure is freed and lst + * will be set to NULL. In case of failure, caller is responsible for + * freeing the structure by av_bsf_list_free() + * + * @param lst Filter list structure to be transformed + * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure + * representing the chain of bitstream filters + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf); + +/** + * Parse string describing list of bitstream filters and create single + * @ref AVBSFContext describing the whole chain of bitstream filters. + * Resulting @ref AVBSFContext can be treated as any other @ref AVBSFContext freshly + * allocated by av_bsf_alloc(). + * + * @param str String describing chain of bitstream filters in format + * `bsf1[=opt1=val1:opt2=val2][,bsf2]` + * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure + * representing the chain of bitstream filters + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf); + +/** + * Get null/pass-through bitstream filter. + * + * @param[out] bsf Pointer to be set to new instance of pass-through bitstream filter + * + * @return + */ +int av_bsf_get_null_filter(AVBSFContext **bsf); + +/* memory */ + +/** + * Same behaviour av_fast_malloc but the buffer has additional + * AV_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0. + * + * In addition the whole buffer will initially and after resizes + * be 0-initialized so that no uninitialized data will ever appear. + */ +void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Same behaviour av_fast_padded_malloc except that buffer will always + * be 0-initialized after call. + */ +void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size); + +/** + * Encode extradata length to a buffer. Used by xiph codecs. + * + * @param s buffer to write to; must be at least (v/255+1) bytes long + * @param v size of extradata in bytes + * @return number of bytes written to the buffer. + */ +unsigned int av_xiphlacing(unsigned char *s, unsigned int v); + +#if FF_API_MISSING_SAMPLE +/** + * Log a generic warning message about a missing feature. This function is + * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) + * only, and would normally not be used by applications. + * @param[in] avc a pointer to an arbitrary struct of which the first field is + * a pointer to an AVClass struct + * @param[in] feature string containing the name of the missing feature + * @param[in] want_sample indicates if samples are wanted which exhibit this feature. + * If want_sample is non-zero, additional verbiage will be added to the log + * message which tells the user how to report samples to the development + * mailing list. + * @deprecated Use avpriv_report_missing_feature() instead. + */ +attribute_deprecated +void av_log_missing_feature(void *avc, const char *feature, int want_sample); + +/** + * Log a generic warning message asking for a sample. This function is + * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) + * only, and would normally not be used by applications. + * @param[in] avc a pointer to an arbitrary struct of which the first field is + * a pointer to an AVClass struct + * @param[in] msg string containing an optional message, or NULL if no message + * @deprecated Use avpriv_request_sample() instead. + */ +attribute_deprecated +void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3); +#endif /* FF_API_MISSING_SAMPLE */ + +/** + * Register the hardware accelerator hwaccel. + */ +void av_register_hwaccel(AVHWAccel *hwaccel); + +/** + * If hwaccel is NULL, returns the first registered hardware accelerator, + * if hwaccel is non-NULL, returns the next registered hardware accelerator + * after hwaccel, or NULL if hwaccel is the last one. + */ +AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel); + + +/** + * Lock operation used by lockmgr + */ +enum AVLockOp { + AV_LOCK_CREATE, ///< Create a mutex + AV_LOCK_OBTAIN, ///< Lock the mutex + AV_LOCK_RELEASE, ///< Unlock the mutex + AV_LOCK_DESTROY, ///< Free mutex resources +}; + +/** + * Register a user provided lock manager supporting the operations + * specified by AVLockOp. The "mutex" argument to the function points + * to a (void *) where the lockmgr should store/get a pointer to a user + * allocated mutex. It is NULL upon AV_LOCK_CREATE and equal to the + * value left by the last call for all other ops. If the lock manager is + * unable to perform the op then it should leave the mutex in the same + * state as when it was called and return a non-zero value. However, + * when called with AV_LOCK_DESTROY the mutex will always be assumed to + * have been successfully destroyed. If av_lockmgr_register succeeds + * it will return a non-negative value, if it fails it will return a + * negative value and destroy all mutex and unregister all callbacks. + * av_lockmgr_register is not thread-safe, it must be called from a + * single thread before any calls which make use of locking are used. + * + * @param cb User defined callback. av_lockmgr_register invokes calls + * to this callback and the previously registered callback. + * The callback will be used to create more than one mutex + * each of which must be backed by its own underlying locking + * mechanism (i.e. do not use a single static object to + * implement your lock manager). If cb is set to NULL the + * lockmgr will be unregistered. + */ +int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)); + +/** + * Get the type of the given codec. + */ +enum AVMediaType avcodec_get_type(enum AVCodecID codec_id); + +/** + * Get the name of a codec. + * @return a static string identifying the codec; never NULL + */ +const char *avcodec_get_name(enum AVCodecID id); + +/** + * @return a positive value if s is open (i.e. avcodec_open2() was called on it + * with no corresponding avcodec_close()), 0 otherwise. + */ +int avcodec_is_open(AVCodecContext *s); + +/** + * @return a non-zero number if codec is an encoder, zero otherwise + */ +int av_codec_is_encoder(const AVCodec *codec); + +/** + * @return a non-zero number if codec is a decoder, zero otherwise + */ +int av_codec_is_decoder(const AVCodec *codec); + +/** + * @return descriptor for given codec ID or NULL if no descriptor exists. + */ +const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id); + +/** + * Iterate over all codec descriptors known to libavcodec. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev); + +/** + * @return codec descriptor with the given name or NULL if no such descriptor + * exists. + */ +const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name); + +/** + * Allocate a CPB properties structure and initialize its fields to default + * values. + * + * @param size if non-NULL, the size of the allocated struct will be written + * here. This is useful for embedding it in side data. + * + * @return the newly allocated struct or NULL on failure + */ +AVCPBProperties *av_cpb_properties_alloc(size_t *size); + +/** + * @} + */ + +#endif /* AVCODEC_AVCODEC_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/avdct.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/avdct.h new file mode 100644 index 0000000..272422e --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/avdct.h @@ -0,0 +1,84 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVDCT_H +#define AVCODEC_AVDCT_H + +#include "libavutil/opt.h" + +/** + * AVDCT context. + * @note function pointers can be NULL if the specific features have been + * disabled at build time. + */ +typedef struct AVDCT { + const AVClass *av_class; + + void (*idct)(int16_t *block /* align 16 */); + + /** + * IDCT input permutation. + * Several optimized IDCTs need a permutated input (relative to the + * normal order of the reference IDCT). + * This permutation must be performed before the idct_put/add. + * Note, normally this can be merged with the zigzag/alternate scan
+ * An example to avoid confusion: + * - (->decode coeffs -> zigzag reorder -> dequant -> reference IDCT -> ...) + * - (x -> reference DCT -> reference IDCT -> x) + * - (x -> reference DCT -> simple_mmx_perm = idct_permutation + * -> simple_idct_mmx -> x) + * - (-> decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant + * -> simple_idct_mmx -> ...) + */ + uint8_t idct_permutation[64]; + + void (*fdct)(int16_t *block /* align 16 */); + + + /** + * DCT algorithm. + * must use AVOptions to set this field. + */ + int dct_algo; + + /** + * IDCT algorithm. + * must use AVOptions to set this field. + */ + int idct_algo; + + void (*get_pixels)(int16_t *block /* align 16 */, + const uint8_t *pixels /* align 8 */, + ptrdiff_t line_size); + + int bits_per_sample; +} AVDCT; + +/** + * Allocates a AVDCT context. + * This needs to be initialized with avcodec_dct_init() after optionally + * configuring it with AVOptions. + * + * To free it use av_free() + */ +AVDCT *avcodec_dct_alloc(void); +int avcodec_dct_init(AVDCT *); + +const AVClass *avcodec_dct_get_class(void); + +#endif /* AVCODEC_AVDCT_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/avfft.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/avfft.h new file mode 100644 index 0000000..0c0f9b8 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/avfft.h @@ -0,0 +1,118 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVFFT_H +#define AVCODEC_AVFFT_H + +/** + * @file + * @ingroup lavc_fft + * FFT functions + */ + +/** + * @defgroup lavc_fft FFT functions + * @ingroup lavc_misc + * + * @{ + */ + +typedef float FFTSample; + +typedef struct FFTComplex { + FFTSample re, im; +} FFTComplex; + +typedef struct FFTContext FFTContext; + +/** + * Set up a complex FFT. + * @param nbits log2 of the length of the input array + * @param inverse if 0 perform the forward transform, if 1 perform the inverse + */ +FFTContext *av_fft_init(int nbits, int inverse); + +/** + * Do the permutation needed BEFORE calling ff_fft_calc(). + */ +void av_fft_permute(FFTContext *s, FFTComplex *z); + +/** + * Do a complex FFT with the parameters defined in av_fft_init(). The + * input data must be permuted before. No 1.0/sqrt(n) normalization is done. + */ +void av_fft_calc(FFTContext *s, FFTComplex *z); + +void av_fft_end(FFTContext *s); + +FFTContext *av_mdct_init(int nbits, int inverse, double scale); +void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_mdct_end(FFTContext *s); + +/* Real Discrete Fourier Transform */ + +enum RDFTransformType { + DFT_R2C, + IDFT_C2R, + IDFT_R2C, + DFT_C2R, +}; + +typedef struct RDFTContext RDFTContext; + +/** + * Set up a real FFT. + * @param nbits log2 of the length of the input array + * @param trans the type of transform + */ +RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans); +void av_rdft_calc(RDFTContext *s, FFTSample *data); +void av_rdft_end(RDFTContext *s); + +/* Discrete Cosine Transform */ + +typedef struct DCTContext DCTContext; + +enum DCTTransformType { + DCT_II = 0, + DCT_III, + DCT_I, + DST_I, +}; + +/** + * Set up DCT. + * + * @param nbits size of the input array: + * (1 << nbits) for DCT-II, DCT-III and DST-I + * (1 << nbits) + 1 for DCT-I + * @param type the type of transform + * + * @note the first element of the input of DST-I is ignored + */ +DCTContext *av_dct_init(int nbits, enum DCTTransformType type); +void av_dct_calc(DCTContext *s, FFTSample *data); +void av_dct_end (DCTContext *s); + +/** + * @} + */ + +#endif /* AVCODEC_AVFFT_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/d3d11va.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/d3d11va.h new file mode 100644 index 0000000..6816b6c --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/d3d11va.h @@ -0,0 +1,112 @@ +/* + * Direct3D11 HW acceleration + * + * copyright (c) 2009 Laurent Aimar + * copyright (c) 2015 Steve Lhomme + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_D3D11VA_H +#define AVCODEC_D3D11VA_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_d3d11va + * Public libavcodec D3D11VA header. + */ + +#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602 +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0602 +#endif + +#include +#include + +/** + * @defgroup lavc_codec_hwaccel_d3d11va Direct3D11 + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for Direct3D11 and old UVD/UVD+ ATI video cards +#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO 2 ///< Work around for Direct3D11 and old Intel GPUs with ClearVideo interface + +/** + * This structure is used to provides the necessary configurations and data + * to the Direct3D11 FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + * + * Use av_d3d11va_alloc_context() exclusively to allocate an AVD3D11VAContext. + */ +typedef struct AVD3D11VAContext { + /** + * D3D11 decoder object + */ + ID3D11VideoDecoder *decoder; + + /** + * D3D11 VideoContext + */ + ID3D11VideoContext *video_context; + + /** + * D3D11 configuration used to create the decoder + */ + D3D11_VIDEO_DECODER_CONFIG *cfg; + + /** + * The number of surface in the surface array + */ + unsigned surface_count; + + /** + * The array of Direct3D surfaces used to create the decoder + */ + ID3D11VideoDecoderOutputView **surface; + + /** + * A bit field configuring the workarounds needed for using the decoder + */ + uint64_t workaround; + + /** + * Private to the FFmpeg AVHWAccel implementation + */ + unsigned report_id; + + /** + * Mutex to access video_context + */ + HANDLE context_mutex; +} AVD3D11VAContext; + +/** + * Allocate an AVD3D11VAContext. + * + * @return Newly-allocated AVD3D11VAContext or NULL on failure. + */ +AVD3D11VAContext *av_d3d11va_alloc_context(void); + +/** + * @} + */ + +#endif /* AVCODEC_D3D11VA_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/dirac.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/dirac.h new file mode 100644 index 0000000..e6d9d34 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/dirac.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2007 Marco Gerards + * Copyright (C) 2009 David Conrad + * Copyright (C) 2011 Jordi Ortiz + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DIRAC_H +#define AVCODEC_DIRAC_H + +/** + * @file + * Interface to Dirac Decoder/Encoder + * @author Marco Gerards + * @author David Conrad + * @author Jordi Ortiz + */ + +#include "avcodec.h" + +/** + * The spec limits the number of wavelet decompositions to 4 for both + * level 1 (VC-2) and 128 (long-gop default). + * 5 decompositions is the maximum before >16-bit buffers are needed. + * Schroedinger allows this for DD 9,7 and 13,7 wavelets only, limiting + * the others to 4 decompositions (or 3 for the fidelity filter). + * + * We use this instead of MAX_DECOMPOSITIONS to save some memory. + */ +#define MAX_DWT_LEVELS 5 + +/** + * Parse code values: + * + * Dirac Specification -> + * 9.6.1 Table 9.1 + * + * VC-2 Specification -> + * 10.4.1 Table 10.1 + */ + +enum DiracParseCodes { + DIRAC_PCODE_SEQ_HEADER = 0x00, + DIRAC_PCODE_END_SEQ = 0x10, + DIRAC_PCODE_AUX = 0x20, + DIRAC_PCODE_PAD = 0x30, + DIRAC_PCODE_PICTURE_CODED = 0x08, + DIRAC_PCODE_PICTURE_RAW = 0x48, + DIRAC_PCODE_PICTURE_LOW_DEL = 0xC8, + DIRAC_PCODE_PICTURE_HQ = 0xE8, + DIRAC_PCODE_INTER_NOREF_CO1 = 0x0A, + DIRAC_PCODE_INTER_NOREF_CO2 = 0x09, + DIRAC_PCODE_INTER_REF_CO1 = 0x0D, + DIRAC_PCODE_INTER_REF_CO2 = 0x0E, + DIRAC_PCODE_INTRA_REF_CO = 0x0C, + DIRAC_PCODE_INTRA_REF_RAW = 0x4C, + DIRAC_PCODE_INTRA_REF_PICT = 0xCC, + DIRAC_PCODE_MAGIC = 0x42424344, +}; + +typedef struct DiracVersionInfo { + int major; + int minor; +} DiracVersionInfo; + +typedef struct AVDiracSeqHeader { + unsigned width; + unsigned height; + uint8_t chroma_format; ///< 0: 444 1: 422 2: 420 + + uint8_t interlaced; + uint8_t top_field_first; + + uint8_t frame_rate_index; ///< index into dirac_frame_rate[] + uint8_t aspect_ratio_index; ///< index into dirac_aspect_ratio[] + + uint16_t clean_width; + uint16_t clean_height; + uint16_t clean_left_offset; + uint16_t clean_right_offset; + + uint8_t pixel_range_index; ///< index into dirac_pixel_range_presets[] + uint8_t color_spec_index; ///< index into dirac_color_spec_presets[] + + int profile; + int level; + + AVRational framerate; + AVRational sample_aspect_ratio; + + enum AVPixelFormat pix_fmt; + enum AVColorRange color_range; + enum AVColorPrimaries color_primaries; + enum AVColorTransferCharacteristic color_trc; + enum AVColorSpace colorspace; + + DiracVersionInfo version; + int bit_depth; +} AVDiracSeqHeader; + +/** + * Parse a Dirac sequence header. + * + * @param dsh this function will allocate and fill an AVDiracSeqHeader struct + * and write it into this pointer. The caller must free it with + * av_free(). + * @param buf the data buffer + * @param buf_size the size of the data buffer in bytes + * @param log_ctx if non-NULL, this function will log errors here + * @return 0 on success, a negative AVERROR code on failure + */ +int av_dirac_parse_sequence_header(AVDiracSeqHeader **dsh, + const uint8_t *buf, size_t buf_size, + void *log_ctx); + +#endif /* AVCODEC_DIRAC_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/dv_profile.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/dv_profile.h new file mode 100644 index 0000000..9380a66 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/dv_profile.h @@ -0,0 +1,83 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DV_PROFILE_H +#define AVCODEC_DV_PROFILE_H + +#include + +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" +#include "avcodec.h" + +/* minimum number of bytes to read from a DV stream in order to + * determine the profile */ +#define DV_PROFILE_BYTES (6 * 80) /* 6 DIF blocks */ + + +/* + * AVDVProfile is used to express the differences between various + * DV flavors. For now it's primarily used for differentiating + * 525/60 and 625/50, but the plans are to use it for various + * DV specs as well (e.g. SMPTE314M vs. IEC 61834). + */ +typedef struct AVDVProfile { + int dsf; /* value of the dsf in the DV header */ + int video_stype; /* stype for VAUX source pack */ + int frame_size; /* total size of one frame in bytes */ + int difseg_size; /* number of DIF segments per DIF channel */ + int n_difchan; /* number of DIF channels per frame */ + AVRational time_base; /* 1/framerate */ + int ltc_divisor; /* FPS from the LTS standpoint */ + int height; /* picture height in pixels */ + int width; /* picture width in pixels */ + AVRational sar[2]; /* sample aspect ratios for 4:3 and 16:9 */ + enum AVPixelFormat pix_fmt; /* picture pixel format */ + int bpm; /* blocks per macroblock */ + const uint8_t *block_sizes; /* AC block sizes, in bits */ + int audio_stride; /* size of audio_shuffle table */ + int audio_min_samples[3]; /* min amount of audio samples */ + /* for 48kHz, 44.1kHz and 32kHz */ + int audio_samples_dist[5]; /* how many samples are supposed to be */ + /* in each frame in a 5 frames window */ + const uint8_t (*audio_shuffle)[9]; /* PCM shuffling table */ +} AVDVProfile; + +/** + * Get a DV profile for the provided compressed frame. + * + * @param sys the profile used for the previous frame, may be NULL + * @param frame the compressed data buffer + * @param buf_size size of the buffer in bytes + * @return the DV profile for the supplied data or NULL on failure + */ +const AVDVProfile *av_dv_frame_profile(const AVDVProfile *sys, + const uint8_t *frame, unsigned buf_size); + +/** + * Get a DV profile for the provided stream parameters. + */ +const AVDVProfile *av_dv_codec_profile(int width, int height, enum AVPixelFormat pix_fmt); + +/** + * Get a DV profile for the provided stream parameters. + * The frame rate is used as a best-effort parameter. + */ +const AVDVProfile *av_dv_codec_profile2(int width, int height, enum AVPixelFormat pix_fmt, AVRational frame_rate); + +#endif /* AVCODEC_DV_PROFILE_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/dxva2.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/dxva2.h new file mode 100644 index 0000000..22c9399 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/dxva2.h @@ -0,0 +1,93 @@ +/* + * DXVA2 HW acceleration + * + * copyright (c) 2009 Laurent Aimar + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DXVA2_H +#define AVCODEC_DXVA2_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_dxva2 + * Public libavcodec DXVA2 header. + */ + +#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602 +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0602 +#endif + +#include +#include +#include + +/** + * @defgroup lavc_codec_hwaccel_dxva2 DXVA2 + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards +#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO 2 ///< Work around for DXVA2 and old Intel GPUs with ClearVideo interface + +/** + * This structure is used to provides the necessary configurations and data + * to the DXVA2 FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + */ +struct dxva_context { + /** + * DXVA2 decoder object + */ + IDirectXVideoDecoder *decoder; + + /** + * DXVA2 configuration used to create the decoder + */ + const DXVA2_ConfigPictureDecode *cfg; + + /** + * The number of surface in the surface array + */ + unsigned surface_count; + + /** + * The array of Direct3D surfaces used to create the decoder + */ + LPDIRECT3DSURFACE9 *surface; + + /** + * A bit field configuring the workarounds needed for using the decoder + */ + uint64_t workaround; + + /** + * Private to the FFmpeg AVHWAccel implementation + */ + unsigned report_id; +}; + +/** + * @} + */ + +#endif /* AVCODEC_DXVA2_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/jni.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/jni.h new file mode 100644 index 0000000..dd99e92 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/jni.h @@ -0,0 +1,46 @@ +/* + * JNI public API functions + * + * Copyright (c) 2015-2016 Matthieu Bouron + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_JNI_H +#define AVCODEC_JNI_H + +/* + * Manually set a Java virtual machine which will be used to retrieve the JNI + * environment. Once a Java VM is set it cannot be changed afterwards, meaning + * you can call multiple times av_jni_set_java_vm with the same Java VM pointer + * however it will error out if you try to set a different Java VM. + * + * @param vm Java virtual machine + * @param log_ctx context used for logging, can be NULL + * @return 0 on success, < 0 otherwise + */ +int av_jni_set_java_vm(void *vm, void *log_ctx); + +/* + * Get the Java virtual machine which has been set with av_jni_set_java_vm. + * + * @param vm Java virtual machine + * @return a pointer to the Java virtual machine + */ +void *av_jni_get_java_vm(void *log_ctx); + +#endif /* AVCODEC_JNI_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/mediacodec.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/mediacodec.h new file mode 100644 index 0000000..5606d24 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/mediacodec.h @@ -0,0 +1,88 @@ +/* + * Android MediaCodec public API + * + * Copyright (c) 2016 Matthieu Bouron + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_MEDIACODEC_H +#define AVCODEC_MEDIACODEC_H + +#include "libavcodec/avcodec.h" + +/** + * This structure holds a reference to a android/view/Surface object that will + * be used as output by the decoder. + * + */ +typedef struct AVMediaCodecContext { + + /** + * android/view/Surface object reference. + */ + void *surface; + +} AVMediaCodecContext; + +/** + * Allocate and initialize a MediaCodec context. + * + * When decoding with MediaCodec is finished, the caller must free the + * MediaCodec context with av_mediacodec_default_free. + * + * @return a pointer to a newly allocated AVMediaCodecContext on success, NULL otherwise + */ +AVMediaCodecContext *av_mediacodec_alloc_context(void); + +/** + * Convenience function that sets up the MediaCodec context. + * + * @param avctx codec context + * @param ctx MediaCodec context to initialize + * @param surface reference to an android/view/Surface + * @return 0 on success, < 0 otherwise + */ +int av_mediacodec_default_init(AVCodecContext *avctx, AVMediaCodecContext *ctx, void *surface); + +/** + * This function must be called to free the MediaCodec context initialized with + * av_mediacodec_default_init(). + * + * @param avctx codec context + */ +void av_mediacodec_default_free(AVCodecContext *avctx); + +/** + * Opaque structure representing a MediaCodec buffer to render. + */ +typedef struct MediaCodecBuffer AVMediaCodecBuffer; + +/** + * Release a MediaCodec buffer and render it to the surface that is associated + * with the decoder. This function should only be called once on a given + * buffer, once released the underlying buffer returns to the codec, thus + * subsequent calls to this function will have no effect. + * + * @param buffer the buffer to render + * @param render 1 to release and render the buffer to the surface or 0 to + * discard the buffer + * @return 0 on success, < 0 otherwise + */ +int av_mediacodec_release_buffer(AVMediaCodecBuffer *buffer, int render); + +#endif /* AVCODEC_MEDIACODEC_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/qsv.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/qsv.h new file mode 100644 index 0000000..b77158e --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/qsv.h @@ -0,0 +1,107 @@ +/* + * Intel MediaSDK QSV public API + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_QSV_H +#define AVCODEC_QSV_H + +#include + +#include "libavutil/buffer.h" + +/** + * This struct is used for communicating QSV parameters between libavcodec and + * the caller. It is managed by the caller and must be assigned to + * AVCodecContext.hwaccel_context. + * - decoding: hwaccel_context must be set on return from the get_format() + * callback + * - encoding: hwaccel_context must be set before avcodec_open2() + */ +typedef struct AVQSVContext { + /** + * If non-NULL, the session to use for encoding or decoding. + * Otherwise, libavcodec will try to create an internal session. + */ + mfxSession session; + + /** + * The IO pattern to use. + */ + int iopattern; + + /** + * Extra buffers to pass to encoder or decoder initialization. + */ + mfxExtBuffer **ext_buffers; + int nb_ext_buffers; + + /** + * Encoding only. If this field is set to non-zero by the caller, libavcodec + * will create an mfxExtOpaqueSurfaceAlloc extended buffer and pass it to + * the encoder initialization. This only makes sense if iopattern is also + * set to MFX_IOPATTERN_IN_OPAQUE_MEMORY. + * + * The number of allocated opaque surfaces will be the sum of the number + * required by the encoder and the user-provided value nb_opaque_surfaces. + * The array of the opaque surfaces will be exported to the caller through + * the opaque_surfaces field. + */ + int opaque_alloc; + + /** + * Encoding only, and only if opaque_alloc is set to non-zero. Before + * calling avcodec_open2(), the caller should set this field to the number + * of extra opaque surfaces to allocate beyond what is required by the + * encoder. + * + * On return from avcodec_open2(), this field will be set by libavcodec to + * the total number of allocated opaque surfaces. + */ + int nb_opaque_surfaces; + + /** + * Encoding only, and only if opaque_alloc is set to non-zero. On return + * from avcodec_open2(), this field will be used by libavcodec to export the + * array of the allocated opaque surfaces to the caller, so they can be + * passed to other parts of the pipeline. + * + * The buffer reference exported here is owned and managed by libavcodec, + * the callers should make their own reference with av_buffer_ref() and free + * it with av_buffer_unref() when it is no longer needed. + * + * The buffer data is an nb_opaque_surfaces-sized array of mfxFrameSurface1. + */ + AVBufferRef *opaque_surfaces; + + /** + * Encoding only, and only if opaque_alloc is set to non-zero. On return + * from avcodec_open2(), this field will be set to the surface type used in + * the opaque allocation request. + */ + int opaque_alloc_type; +} AVQSVContext; + +/** + * Allocate a new context. + * + * It must be freed by the caller with av_free(). + */ +AVQSVContext *av_qsv_alloc_context(void); + +#endif /* AVCODEC_QSV_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/vaapi.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/vaapi.h new file mode 100644 index 0000000..bb28455 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/vaapi.h @@ -0,0 +1,195 @@ +/* + * Video Acceleration API (shared data between FFmpeg and the video player) + * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1 + * + * Copyright (C) 2008-2009 Splitted-Desktop Systems + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VAAPI_H +#define AVCODEC_VAAPI_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vaapi + * Public libavcodec VA API header. + */ + +#include +#include "libavutil/attributes.h" +#include "version.h" + +#if FF_API_STRUCT_VAAPI_CONTEXT + +/** + * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding + * @ingroup lavc_codec_hwaccel + * @{ + */ + +/** + * This structure is used to share data between the FFmpeg library and + * the client video application. + * This shall be zero-allocated and available as + * AVCodecContext.hwaccel_context. All user members can be set once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + * + * Deprecated: use AVCodecContext.hw_frames_ctx instead. + */ +struct attribute_deprecated vaapi_context { + /** + * Window system dependent data + * + * - encoding: unused + * - decoding: Set by user + */ + void *display; + + /** + * Configuration ID + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t config_id; + + /** + * Context ID (video decode pipeline) + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t context_id; + +#if FF_API_VAAPI_CONTEXT + /** + * VAPictureParameterBuffer ID + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + uint32_t pic_param_buf_id; + + /** + * VAIQMatrixBuffer ID + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + uint32_t iq_matrix_buf_id; + + /** + * VABitPlaneBuffer ID (for VC-1 decoding) + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + uint32_t bitplane_buf_id; + + /** + * Slice parameter/data buffer IDs + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + uint32_t *slice_buf_ids; + + /** + * Number of effective slice buffer IDs to send to the HW + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + unsigned int n_slice_buf_ids; + + /** + * Size of pre-allocated slice_buf_ids + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + unsigned int slice_buf_ids_alloc; + + /** + * Pointer to VASliceParameterBuffers + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + void *slice_params; + + /** + * Size of a VASliceParameterBuffer element + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + unsigned int slice_param_size; + + /** + * Size of pre-allocated slice_params + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + unsigned int slice_params_alloc; + + /** + * Number of slices currently filled in + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + unsigned int slice_count; + + /** + * Pointer to slice data buffer base + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + const uint8_t *slice_data; + + /** + * Current size of slice data + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + uint32_t slice_data_size; +#endif +}; + +/* @} */ + +#endif /* FF_API_STRUCT_VAAPI_CONTEXT */ + +#endif /* AVCODEC_VAAPI_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/vda.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/vda.h new file mode 100644 index 0000000..bde14e3 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/vda.h @@ -0,0 +1,230 @@ +/* + * VDA HW acceleration + * + * copyright (c) 2011 Sebastien Zwickert + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VDA_H +#define AVCODEC_VDA_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vda + * Public libavcodec VDA header. + */ + +#include "libavcodec/avcodec.h" + +#include + +// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes +// http://openradar.appspot.com/8026390 +#undef __GNUC_STDC_INLINE__ + +#define Picture QuickdrawPicture +#include +#undef Picture + +#include "libavcodec/version.h" + +// extra flags not defined in VDADecoder.h +enum { + kVDADecodeInfo_Asynchronous = 1UL << 0, + kVDADecodeInfo_FrameDropped = 1UL << 1 +}; + +/** + * @defgroup lavc_codec_hwaccel_vda VDA + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +/** + * This structure is used to provide the necessary configurations and data + * to the VDA FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + */ +struct vda_context { + /** + * VDA decoder object. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + VDADecoder decoder; + + /** + * The Core Video pixel buffer that contains the current image data. + * + * encoding: unused + * decoding: Set by libavcodec. Unset by user. + */ + CVPixelBufferRef cv_buffer; + + /** + * Use the hardware decoder in synchronous mode. + * + * encoding: unused + * decoding: Set by user. + */ + int use_sync_decoding; + + /** + * The frame width. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int width; + + /** + * The frame height. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int height; + + /** + * The frame format. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int format; + + /** + * The pixel format for output image buffers. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + OSType cv_pix_fmt_type; + + /** + * unused + */ + uint8_t *priv_bitstream; + + /** + * unused + */ + int priv_bitstream_size; + + /** + * unused + */ + int priv_allocated_size; + + /** + * Use av_buffer to manage buffer. + * When the flag is set, the CVPixelBuffers returned by the decoder will + * be released automatically, so you have to retain them if necessary. + * Not setting this flag may cause memory leak. + * + * encoding: unused + * decoding: Set by user. + */ + int use_ref_buffer; +}; + +/** Create the video decoder. */ +int ff_vda_create_decoder(struct vda_context *vda_ctx, + uint8_t *extradata, + int extradata_size); + +/** Destroy the video decoder. */ +int ff_vda_destroy_decoder(struct vda_context *vda_ctx); + +/** + * This struct holds all the information that needs to be passed + * between the caller and libavcodec for initializing VDA decoding. + * Its size is not a part of the public ABI, it must be allocated with + * av_vda_alloc_context() and freed with av_free(). + */ +typedef struct AVVDAContext { + /** + * VDA decoder object. Created and freed by the caller. + */ + VDADecoder decoder; + + /** + * The output callback that must be passed to VDADecoderCreate. + * Set by av_vda_alloc_context(). + */ + VDADecoderOutputCallback output_callback; + + /** + * CVPixelBuffer Format Type that VDA will use for decoded frames; set by + * the caller. + */ + OSType cv_pix_fmt_type; +} AVVDAContext; + +/** + * Allocate and initialize a VDA context. + * + * This function should be called from the get_format() callback when the caller + * selects the AV_PIX_FMT_VDA format. The caller must then create the decoder + * object (using the output callback provided by libavcodec) that will be used + * for VDA-accelerated decoding. + * + * When decoding with VDA is finished, the caller must destroy the decoder + * object and free the VDA context using av_free(). + * + * @return the newly allocated context or NULL on failure + */ +AVVDAContext *av_vda_alloc_context(void); + +/** + * This is a convenience function that creates and sets up the VDA context using + * an internal implementation. + * + * @param avctx the corresponding codec context + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int av_vda_default_init(AVCodecContext *avctx); + +/** + * This is a convenience function that creates and sets up the VDA context using + * an internal implementation. + * + * @param avctx the corresponding codec context + * @param vdactx the VDA context to use + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int av_vda_default_init2(AVCodecContext *avctx, AVVDAContext *vdactx); + +/** + * This function must be called to free the VDA context initialized with + * av_vda_default_init(). + * + * @param avctx the corresponding codec context + */ +void av_vda_default_free(AVCodecContext *avctx); + +/** + * @} + */ + +#endif /* AVCODEC_VDA_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/vdpau.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/vdpau.h new file mode 100644 index 0000000..e85e4d9 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/vdpau.h @@ -0,0 +1,253 @@ +/* + * The Video Decode and Presentation API for UNIX (VDPAU) is used for + * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1. + * + * Copyright (C) 2008 NVIDIA + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VDPAU_H +#define AVCODEC_VDPAU_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vdpau + * Public libavcodec VDPAU header. + */ + + +/** + * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer + * @ingroup lavc_codec_hwaccel + * + * VDPAU hardware acceleration has two modules + * - VDPAU decoding + * - VDPAU presentation + * + * The VDPAU decoding module parses all headers using FFmpeg + * parsing mechanisms and uses VDPAU for the actual decoding. + * + * As per the current implementation, the actual decoding + * and rendering (API calls) are done as part of the VDPAU + * presentation (vo_vdpau.c) module. + * + * @{ + */ + +#include +#include +#include "libavutil/avconfig.h" +#include "libavutil/attributes.h" + +#include "avcodec.h" +#include "version.h" + +#if FF_API_BUFS_VDPAU +union AVVDPAUPictureInfo { + VdpPictureInfoH264 h264; + VdpPictureInfoMPEG1Or2 mpeg; + VdpPictureInfoVC1 vc1; + VdpPictureInfoMPEG4Part2 mpeg4; +}; +#endif + +struct AVCodecContext; +struct AVFrame; + +typedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, + const VdpPictureInfo *, uint32_t, + const VdpBitstreamBuffer *); + +/** + * This structure is used to share data between the libavcodec library and + * the client video application. + * The user shall allocate the structure via the av_alloc_vdpau_hwaccel + * function and make it available as + * AVCodecContext.hwaccel_context. Members can be set by the user once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + * + * The size of this structure is not a part of the public ABI and must not + * be used outside of libavcodec. Use av_vdpau_alloc_context() to allocate an + * AVVDPAUContext. + */ +typedef struct AVVDPAUContext { + /** + * VDPAU decoder handle + * + * Set by user. + */ + VdpDecoder decoder; + + /** + * VDPAU decoder render callback + * + * Set by the user. + */ + VdpDecoderRender *render; + +#if FF_API_BUFS_VDPAU + /** + * VDPAU picture information + * + * Set by libavcodec. + */ + attribute_deprecated + union AVVDPAUPictureInfo info; + + /** + * Allocated size of the bitstream_buffers table. + * + * Set by libavcodec. + */ + attribute_deprecated + int bitstream_buffers_allocated; + + /** + * Useful bitstream buffers in the bitstream buffers table. + * + * Set by libavcodec. + */ + attribute_deprecated + int bitstream_buffers_used; + + /** + * Table of bitstream buffers. + * The user is responsible for freeing this buffer using av_freep(). + * + * Set by libavcodec. + */ + attribute_deprecated + VdpBitstreamBuffer *bitstream_buffers; +#endif + AVVDPAU_Render2 render2; +} AVVDPAUContext; + +/** + * @brief allocation function for AVVDPAUContext + * + * Allows extending the struct without breaking API/ABI + */ +AVVDPAUContext *av_alloc_vdpaucontext(void); + +AVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *); +void av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2); + +/** + * Associate a VDPAU device with a codec context for hardware acceleration. + * This function is meant to be called from the get_format() codec callback, + * or earlier. It can also be called after avcodec_flush_buffers() to change + * the underlying VDPAU device mid-stream (e.g. to recover from non-transparent + * display preemption). + * + * @note get_format() must return AV_PIX_FMT_VDPAU if this function completes + * successfully. + * + * @param avctx decoding context whose get_format() callback is invoked + * @param device VDPAU device handle to use for hardware acceleration + * @param get_proc_address VDPAU device driver + * @param flags zero of more OR'd AV_HWACCEL_FLAG_* flags + * + * @return 0 on success, an AVERROR code on failure. + */ +int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device, + VdpGetProcAddress *get_proc_address, unsigned flags); + +/** + * Gets the parameters to create an adequate VDPAU video surface for the codec + * context using VDPAU hardware decoding acceleration. + * + * @note Behavior is undefined if the context was not successfully bound to a + * VDPAU device using av_vdpau_bind_context(). + * + * @param avctx the codec context being used for decoding the stream + * @param type storage space for the VDPAU video surface chroma type + * (or NULL to ignore) + * @param width storage space for the VDPAU video surface pixel width + * (or NULL to ignore) + * @param height storage space for the VDPAU video surface pixel height + * (or NULL to ignore) + * + * @return 0 on success, a negative AVERROR code on failure. + */ +int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type, + uint32_t *width, uint32_t *height); + +/** + * Allocate an AVVDPAUContext. + * + * @return Newly-allocated AVVDPAUContext or NULL on failure. + */ +AVVDPAUContext *av_vdpau_alloc_context(void); + +#if FF_API_VDPAU_PROFILE +/** + * Get a decoder profile that should be used for initializing a VDPAU decoder. + * Should be called from the AVCodecContext.get_format() callback. + * + * @deprecated Use av_vdpau_bind_context() instead. + * + * @param avctx the codec context being used for decoding the stream + * @param profile a pointer into which the result will be written on success. + * The contents of profile are undefined if this function returns + * an error. + * + * @return 0 on success (non-negative), a negative AVERROR on failure. + */ +attribute_deprecated +int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile); +#endif + +#if FF_API_CAP_VDPAU +/** @brief The videoSurface is used for rendering. */ +#define FF_VDPAU_STATE_USED_FOR_RENDER 1 + +/** + * @brief The videoSurface is needed for reference/prediction. + * The codec manipulates this. + */ +#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2 + +/** + * @brief This structure is used as a callback between the FFmpeg + * decoder (vd_) and presentation (vo_) module. + * This is used for defining a video frame containing surface, + * picture parameter, bitstream information etc which are passed + * between the FFmpeg decoder and its clients. + */ +struct vdpau_render_state { + VdpVideoSurface surface; ///< Used as rendered surface, never changed. + + int state; ///< Holds FF_VDPAU_STATE_* values. + + /** picture parameter information for all supported codecs */ + union AVVDPAUPictureInfo info; + + /** Describe size/location of the compressed video data. + Set to 0 when freeing bitstream_buffers. */ + int bitstream_buffers_allocated; + int bitstream_buffers_used; + /** The user is responsible for freeing this buffer using av_freep(). */ + VdpBitstreamBuffer *bitstream_buffers; +}; +#endif + +/* @}*/ + +#endif /* AVCODEC_VDPAU_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/version.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/version.h new file mode 100644 index 0000000..51df9e0 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/version.h @@ -0,0 +1,243 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VERSION_H +#define AVCODEC_VERSION_H + +/** + * @file + * @ingroup libavc + * Libavcodec version macros. + */ + +#include "libavutil/version.h" + +#define LIBAVCODEC_VERSION_MAJOR 57 +#define LIBAVCODEC_VERSION_MINOR 89 +#define LIBAVCODEC_VERSION_MICRO 100 + +#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT + +#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @note, when bumping the major version it is recommended to manually + * disable each FF_API_* in its own commit instead of disabling them all + * at once through the bump. This improves the git bisect-ability of the change. + */ + +#ifndef FF_API_VIMA_DECODER +#define FF_API_VIMA_DECODER (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AUDIO_CONVERT +#define FF_API_AUDIO_CONVERT (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AVCODEC_RESAMPLE +#define FF_API_AVCODEC_RESAMPLE FF_API_AUDIO_CONVERT +#endif +#ifndef FF_API_GETCHROMA +#define FF_API_GETCHROMA (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_MISSING_SAMPLE +#define FF_API_MISSING_SAMPLE (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LOWRES +#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_CAP_VDPAU +#define FF_API_CAP_VDPAU (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_BUFS_VDPAU +#define FF_API_BUFS_VDPAU (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_VOXWARE +#define FF_API_VOXWARE (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_SET_DIMENSIONS +#define FF_API_SET_DIMENSIONS (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_DEBUG_MV +#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AC_VLC +#define FF_API_AC_VLC (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_OLD_MSMPEG4 +#define FF_API_OLD_MSMPEG4 (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_ASPECT_EXTENDED +#define FF_API_ASPECT_EXTENDED (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_ARCH_ALPHA +#define FF_API_ARCH_ALPHA (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_XVMC +#define FF_API_XVMC (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_ERROR_RATE +#define FF_API_ERROR_RATE (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_QSCALE_TYPE +#define FF_API_QSCALE_TYPE (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_MB_TYPE +#define FF_API_MB_TYPE (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_MAX_BFRAMES +#define FF_API_MAX_BFRAMES (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_NEG_LINESIZES +#define FF_API_NEG_LINESIZES (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_EMU_EDGE +#define FF_API_EMU_EDGE (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_ARCH_SH4 +#define FF_API_ARCH_SH4 (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_ARCH_SPARC +#define FF_API_ARCH_SPARC (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_UNUSED_MEMBERS +#define FF_API_UNUSED_MEMBERS (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_IDCT_XVIDMMX +#define FF_API_IDCT_XVIDMMX (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_INPUT_PRESERVED +#define FF_API_INPUT_PRESERVED (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_NORMALIZE_AQP +#define FF_API_NORMALIZE_AQP (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_GMC +#define FF_API_GMC (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_MV0 +#define FF_API_MV0 (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_CODEC_NAME +#define FF_API_CODEC_NAME (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AFD +#define FF_API_AFD (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_VISMV +/* XXX: don't forget to drop the -vismv documentation */ +#define FF_API_VISMV (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AUDIOENC_DELAY +#define FF_API_AUDIOENC_DELAY (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_VAAPI_CONTEXT +#define FF_API_VAAPI_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_MERGE_SD +#define FF_API_MERGE_SD (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AVCTX_TIMEBASE +#define FF_API_AVCTX_TIMEBASE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_MPV_OPT +#define FF_API_MPV_OPT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_STREAM_CODEC_TAG +#define FF_API_STREAM_CODEC_TAG (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_QUANT_BIAS +#define FF_API_QUANT_BIAS (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_RC_STRATEGY +#define FF_API_RC_STRATEGY (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CODED_FRAME +#define FF_API_CODED_FRAME (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_MOTION_EST +#define FF_API_MOTION_EST (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_WITHOUT_PREFIX +#define FF_API_WITHOUT_PREFIX (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_SIDEDATA_ONLY_PKT +#define FF_API_SIDEDATA_ONLY_PKT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_VDPAU_PROFILE +#define FF_API_VDPAU_PROFILE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CONVERGENCE_DURATION +#define FF_API_CONVERGENCE_DURATION (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_AVPICTURE +#define FF_API_AVPICTURE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_AVPACKET_OLD_API +#define FF_API_AVPACKET_OLD_API (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_RTP_CALLBACK +#define FF_API_RTP_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_VBV_DELAY +#define FF_API_VBV_DELAY (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CODER_TYPE +#define FF_API_CODER_TYPE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_STAT_BITS +#define FF_API_STAT_BITS (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_PRIVATE_OPT +#define FF_API_PRIVATE_OPT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_ASS_TIMING +#define FF_API_ASS_TIMING (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_OLD_BSF +#define FF_API_OLD_BSF (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_COPY_CONTEXT +#define FF_API_COPY_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_GET_CONTEXT_DEFAULTS +#define FF_API_GET_CONTEXT_DEFAULTS (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_NVENC_OLD_NAME +#define FF_API_NVENC_OLD_NAME (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_STRUCT_VAAPI_CONTEXT +#define FF_API_STRUCT_VAAPI_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_MERGE_SD_API +#define FF_API_MERGE_SD_API (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_TAG_STRING +#define FF_API_TAG_STRING (LIBAVCODEC_VERSION_MAJOR < 59) +#endif + + +#endif /* AVCODEC_VERSION_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/videotoolbox.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/videotoolbox.h new file mode 100644 index 0000000..af2db0d --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/videotoolbox.h @@ -0,0 +1,127 @@ +/* + * Videotoolbox hardware acceleration + * + * copyright (c) 2012 Sebastien Zwickert + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VIDEOTOOLBOX_H +#define AVCODEC_VIDEOTOOLBOX_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_videotoolbox + * Public libavcodec Videotoolbox header. + */ + +#include + +#define Picture QuickdrawPicture +#include +#undef Picture + +#include "libavcodec/avcodec.h" + +/** + * This struct holds all the information that needs to be passed + * between the caller and libavcodec for initializing Videotoolbox decoding. + * Its size is not a part of the public ABI, it must be allocated with + * av_videotoolbox_alloc_context() and freed with av_free(). + */ +typedef struct AVVideotoolboxContext { + /** + * Videotoolbox decompression session object. + * Created and freed the caller. + */ + VTDecompressionSessionRef session; + + /** + * The output callback that must be passed to the session. + * Set by av_videottoolbox_default_init() + */ + VTDecompressionOutputCallback output_callback; + + /** + * CVPixelBuffer Format Type that Videotoolbox will use for decoded frames. + * set by the caller. If this is set to 0, then no specific format is + * requested from the decoder, and its native format is output. + */ + OSType cv_pix_fmt_type; + + /** + * CoreMedia Format Description that Videotoolbox will use to create the decompression session. + * Set by the caller. + */ + CMVideoFormatDescriptionRef cm_fmt_desc; + + /** + * CoreMedia codec type that Videotoolbox will use to create the decompression session. + * Set by the caller. + */ + int cm_codec_type; +} AVVideotoolboxContext; + +/** + * Allocate and initialize a Videotoolbox context. + * + * This function should be called from the get_format() callback when the caller + * selects the AV_PIX_FMT_VIDETOOLBOX format. The caller must then create + * the decoder object (using the output callback provided by libavcodec) that + * will be used for Videotoolbox-accelerated decoding. + * + * When decoding with Videotoolbox is finished, the caller must destroy the decoder + * object and free the Videotoolbox context using av_free(). + * + * @return the newly allocated context or NULL on failure + */ +AVVideotoolboxContext *av_videotoolbox_alloc_context(void); + +/** + * This is a convenience function that creates and sets up the Videotoolbox context using + * an internal implementation. + * + * @param avctx the corresponding codec context + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int av_videotoolbox_default_init(AVCodecContext *avctx); + +/** + * This is a convenience function that creates and sets up the Videotoolbox context using + * an internal implementation. + * + * @param avctx the corresponding codec context + * @param vtctx the Videotoolbox context to use + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx); + +/** + * This function must be called to free the Videotoolbox context initialized with + * av_videotoolbox_default_init(). + * + * @param avctx the corresponding codec context + */ +void av_videotoolbox_default_free(AVCodecContext *avctx); + +/** + * @} + */ + +#endif /* AVCODEC_VIDEOTOOLBOX_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/vorbis_parser.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/vorbis_parser.h new file mode 100644 index 0000000..9205027 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/vorbis_parser.h @@ -0,0 +1,77 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * A public API for Vorbis parsing + * + * Determines the duration for each packet. + */ + +#ifndef AVCODEC_VORBIS_PARSER_H +#define AVCODEC_VORBIS_PARSER_H + +#include + +typedef struct AVVorbisParseContext AVVorbisParseContext; + +/** + * Allocate and initialize the Vorbis parser using headers in the extradata. + * + * @param avctx codec context + * @param s Vorbis parser context + */ +AVVorbisParseContext *av_vorbis_parse_init(const uint8_t *extradata, + int extradata_size); + +/** + * Free the parser and everything associated with it. + */ +void av_vorbis_parse_free(AVVorbisParseContext **s); + +#define VORBIS_FLAG_HEADER 0x00000001 +#define VORBIS_FLAG_COMMENT 0x00000002 +#define VORBIS_FLAG_SETUP 0x00000004 + +/** + * Get the duration for a Vorbis packet. + * + * If @p flags is @c NULL, + * special frames are considered invalid. + * + * @param s Vorbis parser context + * @param buf buffer containing a Vorbis frame + * @param buf_size size of the buffer + * @param flags flags for special frames + */ +int av_vorbis_parse_frame_flags(AVVorbisParseContext *s, const uint8_t *buf, + int buf_size, int *flags); + +/** + * Get the duration for a Vorbis packet. + * + * @param s Vorbis parser context + * @param buf buffer containing a Vorbis frame + * @param buf_size size of the buffer + */ +int av_vorbis_parse_frame(AVVorbisParseContext *s, const uint8_t *buf, + int buf_size); + +void av_vorbis_parse_reset(AVVorbisParseContext *s); + +#endif /* AVCODEC_VORBIS_PARSER_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/xvmc.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/xvmc.h new file mode 100644 index 0000000..465ee78 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavcodec/xvmc.h @@ -0,0 +1,170 @@ +/* + * Copyright (C) 2003 Ivan Kalvachev + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_XVMC_H +#define AVCODEC_XVMC_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_xvmc + * Public libavcodec XvMC header. + */ + +#include + +#include "libavutil/attributes.h" +#include "version.h" +#include "avcodec.h" + +/** + * @defgroup lavc_codec_hwaccel_xvmc XvMC + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct + the number is 1337 speak for the letters IDCT MCo (motion compensation) */ + +struct attribute_deprecated xvmc_pix_fmt { + /** The field contains the special constant value AV_XVMC_ID. + It is used as a test that the application correctly uses the API, + and that there is no corruption caused by pixel routines. + - application - set during initialization + - libavcodec - unchanged + */ + int xvmc_id; + + /** Pointer to the block array allocated by XvMCCreateBlocks(). + The array has to be freed by XvMCDestroyBlocks(). + Each group of 64 values represents one data block of differential + pixel information (in MoCo mode) or coefficients for IDCT. + - application - set the pointer during initialization + - libavcodec - fills coefficients/pixel data into the array + */ + short* data_blocks; + + /** Pointer to the macroblock description array allocated by + XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks(). + - application - set the pointer during initialization + - libavcodec - fills description data into the array + */ + XvMCMacroBlock* mv_blocks; + + /** Number of macroblock descriptions that can be stored in the mv_blocks + array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_mv_blocks; + + /** Number of blocks that can be stored at once in the data_blocks array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_data_blocks; + + /** Indicate that the hardware would interpret data_blocks as IDCT + coefficients and perform IDCT on them. + - application - set during initialization + - libavcodec - unchanged + */ + int idct; + + /** In MoCo mode it indicates that intra macroblocks are assumed to be in + unsigned format; same as the XVMC_INTRA_UNSIGNED flag. + - application - set during initialization + - libavcodec - unchanged + */ + int unsigned_intra; + + /** Pointer to the surface allocated by XvMCCreateSurface(). + It has to be freed by XvMCDestroySurface() on application exit. + It identifies the frame and its state on the video hardware. + - application - set during initialization + - libavcodec - unchanged + */ + XvMCSurface* p_surface; + +/** Set by the decoder before calling ff_draw_horiz_band(), + needed by the XvMCRenderSurface function. */ +//@{ + /** Pointer to the surface used as past reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_past_surface; + + /** Pointer to the surface used as future reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_future_surface; + + /** top/bottom field or frame + - application - unchanged + - libavcodec - set + */ + unsigned int picture_structure; + + /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence + - application - unchanged + - libavcodec - set + */ + unsigned int flags; +//}@ + + /** Number of macroblock descriptions in the mv_blocks array + that have already been passed to the hardware. + - application - zeroes it on get_buffer(). + A successful ff_draw_horiz_band() may increment it + with filled_mb_block_num or zero both. + - libavcodec - unchanged + */ + int start_mv_blocks_num; + + /** Number of new macroblock descriptions in the mv_blocks array (after + start_mv_blocks_num) that are filled by libavcodec and have to be + passed to the hardware. + - application - zeroes it on get_buffer() or after successful + ff_draw_horiz_band(). + - libavcodec - increment with one of each stored MB + */ + int filled_mv_blocks_num; + + /** Number of the next free data block; one data block consists of + 64 short values in the data_blocks array. + All blocks before this one have already been claimed by placing their + position into the corresponding block description structure field, + that are part of the mv_blocks array. + - application - zeroes it on get_buffer(). + A successful ff_draw_horiz_band() may zero it together + with start_mb_blocks_num. + - libavcodec - each decoded macroblock increases it by the number + of coded blocks it contains. + */ + int next_free_data_block_num; +}; + +/** + * @} + */ + +#endif /* AVCODEC_XVMC_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/avfilter.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/avfilter.h new file mode 100644 index 0000000..60662c1 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/avfilter.h @@ -0,0 +1,1182 @@ +/* + * filter layer + * Copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVFILTER_H +#define AVFILTER_AVFILTER_H + +/** + * @file + * @ingroup lavfi + * Main libavfilter public API header + */ + +/** + * @defgroup lavfi libavfilter + * Graph-based frame editing library. + * + * @{ + */ + +#include + +#include "libavutil/attributes.h" +#include "libavutil/avutil.h" +#include "libavutil/buffer.h" +#include "libavutil/dict.h" +#include "libavutil/frame.h" +#include "libavutil/log.h" +#include "libavutil/samplefmt.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" + +#include "libavfilter/version.h" + +/** + * Return the LIBAVFILTER_VERSION_INT constant. + */ +unsigned avfilter_version(void); + +/** + * Return the libavfilter build-time configuration. + */ +const char *avfilter_configuration(void); + +/** + * Return the libavfilter license. + */ +const char *avfilter_license(void); + +typedef struct AVFilterContext AVFilterContext; +typedef struct AVFilterLink AVFilterLink; +typedef struct AVFilterPad AVFilterPad; +typedef struct AVFilterFormats AVFilterFormats; + +/** + * Get the number of elements in a NULL-terminated array of AVFilterPads (e.g. + * AVFilter.inputs/outputs). + */ +int avfilter_pad_count(const AVFilterPad *pads); + +/** + * Get the name of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array it; is the caller's + * responsibility to ensure the index is valid + * + * @return name of the pad_idx'th pad in pads + */ +const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx); + +/** + * Get the type of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array; it is the caller's + * responsibility to ensure the index is valid + * + * @return type of the pad_idx'th pad in pads + */ +enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx); + +/** + * The number of the filter inputs is not determined just by AVFilter.inputs. + * The filter might add additional inputs during initialization depending on the + * options supplied to it. + */ +#define AVFILTER_FLAG_DYNAMIC_INPUTS (1 << 0) +/** + * The number of the filter outputs is not determined just by AVFilter.outputs. + * The filter might add additional outputs during initialization depending on + * the options supplied to it. + */ +#define AVFILTER_FLAG_DYNAMIC_OUTPUTS (1 << 1) +/** + * The filter supports multithreading by splitting frames into multiple parts + * and processing them concurrently. + */ +#define AVFILTER_FLAG_SLICE_THREADS (1 << 2) +/** + * Some filters support a generic "enable" expression option that can be used + * to enable or disable a filter in the timeline. Filters supporting this + * option have this flag set. When the enable expression is false, the default + * no-op filter_frame() function is called in place of the filter_frame() + * callback defined on each input pad, thus the frame is passed unchanged to + * the next filters. + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC (1 << 16) +/** + * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will + * have its filter_frame() callback(s) called as usual even when the enable + * expression is false. The filter will disable filtering within the + * filter_frame() callback(s) itself, for example executing code depending on + * the AVFilterContext->is_disabled value. + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17) +/** + * Handy mask to test whether the filter supports or no the timeline feature + * (internally or generically). + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL) + +/** + * Filter definition. This defines the pads a filter contains, and all the + * callback functions used to interact with the filter. + */ +typedef struct AVFilter { + /** + * Filter name. Must be non-NULL and unique among filters. + */ + const char *name; + + /** + * A description of the filter. May be NULL. + * + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *description; + + /** + * List of inputs, terminated by a zeroed element. + * + * NULL if there are no (static) inputs. Instances of filters with + * AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in + * this list. + */ + const AVFilterPad *inputs; + /** + * List of outputs, terminated by a zeroed element. + * + * NULL if there are no (static) outputs. Instances of filters with + * AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in + * this list. + */ + const AVFilterPad *outputs; + + /** + * A class for the private data, used to declare filter private AVOptions. + * This field is NULL for filters that do not declare any options. + * + * If this field is non-NULL, the first member of the filter private data + * must be a pointer to AVClass, which will be set by libavfilter generic + * code to this class. + */ + const AVClass *priv_class; + + /** + * A combination of AVFILTER_FLAG_* + */ + int flags; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Filter initialization function. + * + * This callback will be called only once during the filter lifetime, after + * all the options have been set, but before links between filters are + * established and format negotiation is done. + * + * Basic filter initialization should be done here. Filters with dynamic + * inputs and/or outputs should create those inputs/outputs here based on + * provided options. No more changes to this filter's inputs/outputs can be + * done after this callback. + * + * This callback must not assume that the filter links exist or frame + * parameters are known. + * + * @ref AVFilter.uninit "uninit" is guaranteed to be called even if + * initialization fails, so this callback does not have to clean up on + * failure. + * + * @return 0 on success, a negative AVERROR on failure + */ + int (*init)(AVFilterContext *ctx); + + /** + * Should be set instead of @ref AVFilter.init "init" by the filters that + * want to pass a dictionary of AVOptions to nested contexts that are + * allocated during init. + * + * On return, the options dict should be freed and replaced with one that + * contains all the options which could not be processed by this filter (or + * with NULL if all the options were processed). + * + * Otherwise the semantics is the same as for @ref AVFilter.init "init". + */ + int (*init_dict)(AVFilterContext *ctx, AVDictionary **options); + + /** + * Filter uninitialization function. + * + * Called only once right before the filter is freed. Should deallocate any + * memory held by the filter, release any buffer references, etc. It does + * not need to deallocate the AVFilterContext.priv memory itself. + * + * This callback may be called even if @ref AVFilter.init "init" was not + * called or failed, so it must be prepared to handle such a situation. + */ + void (*uninit)(AVFilterContext *ctx); + + /** + * Query formats supported by the filter on its inputs and outputs. + * + * This callback is called after the filter is initialized (so the inputs + * and outputs are fixed), shortly before the format negotiation. This + * callback may be called more than once. + * + * This callback must set AVFilterLink.out_formats on every input link and + * AVFilterLink.in_formats on every output link to a list of pixel/sample + * formats that the filter supports on that link. For audio links, this + * filter must also set @ref AVFilterLink.in_samplerates "in_samplerates" / + * @ref AVFilterLink.out_samplerates "out_samplerates" and + * @ref AVFilterLink.in_channel_layouts "in_channel_layouts" / + * @ref AVFilterLink.out_channel_layouts "out_channel_layouts" analogously. + * + * This callback may be NULL for filters with one input, in which case + * libavfilter assumes that it supports all input formats and preserves + * them on output. + * + * @return zero on success, a negative value corresponding to an + * AVERROR code otherwise + */ + int (*query_formats)(AVFilterContext *); + + int priv_size; ///< size of private data to allocate for the filter + + int flags_internal; ///< Additional flags for avfilter internal use only. + + /** + * Used by the filter registration system. Must not be touched by any other + * code. + */ + struct AVFilter *next; + + /** + * Make the filter instance process a command. + * + * @param cmd the command to process, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported. + * @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be + * time consuming then a filter should treat it like an unsupported command + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ + int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags); + + /** + * Filter initialization function, alternative to the init() + * callback. Args contains the user-supplied parameters, opaque is + * used for providing binary data. + */ + int (*init_opaque)(AVFilterContext *ctx, void *opaque); + + /** + * Filter activation function. + * + * Called when any processing is needed from the filter, instead of any + * filter_frame and request_frame on pads. + * + * The function must examine inlinks and outlinks and perform a single + * step of processing. If there is nothing to do, the function must do + * nothing and not return an error. If more steps are or may be + * possible, it must use ff_filter_set_ready() to schedule another + * activation. + */ + int (*activate)(AVFilterContext *ctx); +} AVFilter; + +/** + * Process multiple parts of the frame concurrently. + */ +#define AVFILTER_THREAD_SLICE (1 << 0) + +typedef struct AVFilterInternal AVFilterInternal; + +/** An instance of a filter */ +struct AVFilterContext { + const AVClass *av_class; ///< needed for av_log() and filters common options + + const AVFilter *filter; ///< the AVFilter of which this is an instance + + char *name; ///< name of this filter instance + + AVFilterPad *input_pads; ///< array of input pads + AVFilterLink **inputs; ///< array of pointers to input links + unsigned nb_inputs; ///< number of input pads + + AVFilterPad *output_pads; ///< array of output pads + AVFilterLink **outputs; ///< array of pointers to output links + unsigned nb_outputs; ///< number of output pads + + void *priv; ///< private data for use by the filter + + struct AVFilterGraph *graph; ///< filtergraph this filter belongs to + + /** + * Type of multithreading being allowed/used. A combination of + * AVFILTER_THREAD_* flags. + * + * May be set by the caller before initializing the filter to forbid some + * or all kinds of multithreading for this filter. The default is allowing + * everything. + * + * When the filter is initialized, this field is combined using bit AND with + * AVFilterGraph.thread_type to get the final mask used for determining + * allowed threading types. I.e. a threading type needs to be set in both + * to be allowed. + * + * After the filter is initialized, libavfilter sets this field to the + * threading type that is actually used (0 for no multithreading). + */ + int thread_type; + + /** + * An opaque struct for libavfilter internal use. + */ + AVFilterInternal *internal; + + struct AVFilterCommand *command_queue; + + char *enable_str; ///< enable expression string + void *enable; ///< parsed expression (AVExpr*) + double *var_values; ///< variable values for the enable expression + int is_disabled; ///< the enabled state from the last expression evaluation + + /** + * For filters which will create hardware frames, sets the device the + * filter should create them in. All other filters will ignore this field: + * in particular, a filter which consumes or processes hardware frames will + * instead use the hw_frames_ctx field in AVFilterLink to carry the + * hardware context information. + */ + AVBufferRef *hw_device_ctx; + + /** + * Max number of threads allowed in this filter instance. + * If <= 0, its value is ignored. + * Overrides global number of threads set per filter graph. + */ + int nb_threads; + + /** + * Ready status of the filter. + * A non-0 value means that the filter needs activating; + * a higher value suggests a more urgent activation. + */ + unsigned ready; +}; + +/** + * A link between two filters. This contains pointers to the source and + * destination filters between which this link exists, and the indexes of + * the pads involved. In addition, this link also contains the parameters + * which have been negotiated and agreed upon between the filter, such as + * image dimensions, format, etc. + * + * Applications must not normally access the link structure directly. + * Use the buffersrc and buffersink API instead. + * In the future, access to the header may be reserved for filters + * implementation. + */ +struct AVFilterLink { + AVFilterContext *src; ///< source filter + AVFilterPad *srcpad; ///< output pad on the source filter + + AVFilterContext *dst; ///< dest filter + AVFilterPad *dstpad; ///< input pad on the dest filter + + enum AVMediaType type; ///< filter media type + + /* These parameters apply only to video */ + int w; ///< agreed upon image width + int h; ///< agreed upon image height + AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio + /* These parameters apply only to audio */ + uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h) + int sample_rate; ///< samples per second + + int format; ///< agreed upon media format + + /** + * Define the time base used by the PTS of the frames/samples + * which will pass through this link. + * During the configuration stage, each filter is supposed to + * change only the output timebase, while the timebase of the + * input link is assumed to be an unchangeable property. + */ + AVRational time_base; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + /** + * Lists of formats and channel layouts supported by the input and output + * filters respectively. These lists are used for negotiating the format + * to actually be used, which will be loaded into the format and + * channel_layout members, above, when chosen. + * + */ + AVFilterFormats *in_formats; + AVFilterFormats *out_formats; + + /** + * Lists of channel layouts and sample rates used for automatic + * negotiation. + */ + AVFilterFormats *in_samplerates; + AVFilterFormats *out_samplerates; + struct AVFilterChannelLayouts *in_channel_layouts; + struct AVFilterChannelLayouts *out_channel_layouts; + + /** + * Audio only, the destination filter sets this to a non-zero value to + * request that buffers with the given number of samples should be sent to + * it. AVFilterPad.needs_fifo must also be set on the corresponding input + * pad. + * Last buffer before EOF will be padded with silence. + */ + int request_samples; + + /** stage of the initialization of the link properties (dimensions, etc) */ + enum { + AVLINK_UNINIT = 0, ///< not started + AVLINK_STARTINIT, ///< started, but incomplete + AVLINK_INIT ///< complete + } init_state; + + /** + * Graph the filter belongs to. + */ + struct AVFilterGraph *graph; + + /** + * Current timestamp of the link, as defined by the most recent + * frame(s), in link time_base units. + */ + int64_t current_pts; + + /** + * Current timestamp of the link, as defined by the most recent + * frame(s), in AV_TIME_BASE units. + */ + int64_t current_pts_us; + + /** + * Index in the age array. + */ + int age_index; + + /** + * Frame rate of the stream on the link, or 1/0 if unknown or variable; + * if left to 0/0, will be automatically copied from the first input + * of the source filter if it exists. + * + * Sources should set it to the best estimation of the real frame rate. + * If the source frame rate is unknown or variable, set this to 1/0. + * Filters should update it if necessary depending on their function. + * Sinks can use it to set a default output frame rate. + * It is similar to the r_frame_rate field in AVStream. + */ + AVRational frame_rate; + + /** + * Buffer partially filled with samples to achieve a fixed/minimum size. + */ + AVFrame *partial_buf; + + /** + * Size of the partial buffer to allocate. + * Must be between min_samples and max_samples. + */ + int partial_buf_size; + + /** + * Minimum number of samples to filter at once. If filter_frame() is + * called with fewer samples, it will accumulate them in partial_buf. + * This field and the related ones must not be changed after filtering + * has started. + * If 0, all related fields are ignored. + */ + int min_samples; + + /** + * Maximum number of samples to filter at once. If filter_frame() is + * called with more samples, it will split them. + */ + int max_samples; + + /** + * Number of channels. + */ + int channels; + + /** + * Link processing flags. + */ + unsigned flags; + + /** + * Number of past frames sent through the link. + */ + int64_t frame_count_in, frame_count_out; + + /** + * A pointer to a FFFramePool struct. + */ + void *frame_pool; + + /** + * True if a frame is currently wanted on the output of this filter. + * Set when ff_request_frame() is called by the output, + * cleared when a frame is filtered. + */ + int frame_wanted_out; + + /** + * For hwaccel pixel formats, this should be a reference to the + * AVHWFramesContext describing the frames. + */ + AVBufferRef *hw_frames_ctx; + +#ifndef FF_INTERNAL_FIELDS + + /** + * Internal structure members. + * The fields below this limit are internal for libavfilter's use + * and must in no way be accessed by applications. + */ + char reserved[0xF000]; + +#else /* FF_INTERNAL_FIELDS */ + + /** + * Queue of frames waiting to be filtered. + */ + FFFrameQueue fifo; + + /** + * If set, the source filter can not generate a frame as is. + * The goal is to avoid repeatedly calling the request_frame() method on + * the same link. + */ + int frame_blocked_in; + + /** + * Link input status. + * If not zero, all attempts of filter_frame will fail with the + * corresponding code. + */ + int status_in; + + /** + * Timestamp of the input status change. + */ + int64_t status_in_pts; + + /** + * Link output status. + * If not zero, all attempts of request_frame will fail with the + * corresponding code. + */ + int status_out; + +#endif /* FF_INTERNAL_FIELDS */ + +}; + +/** + * Link two filters together. + * + * @param src the source filter + * @param srcpad index of the output pad on the source filter + * @param dst the destination filter + * @param dstpad index of the input pad on the destination filter + * @return zero on success + */ +int avfilter_link(AVFilterContext *src, unsigned srcpad, + AVFilterContext *dst, unsigned dstpad); + +/** + * Free the link in *link, and set its pointer to NULL. + */ +void avfilter_link_free(AVFilterLink **link); + +/** + * Get the number of channels of a link. + */ +int avfilter_link_get_channels(AVFilterLink *link); + +/** + * Set the closed field of a link. + * @deprecated applications are not supposed to mess with links, they should + * close the sinks. + */ +attribute_deprecated +void avfilter_link_set_closed(AVFilterLink *link, int closed); + +/** + * Negotiate the media format, dimensions, etc of all inputs to a filter. + * + * @param filter the filter to negotiate the properties for its inputs + * @return zero on successful negotiation + */ +int avfilter_config_links(AVFilterContext *filter); + +#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically +#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw) + +/** + * Make the filter instance process a command. + * It is recommended to use avfilter_graph_send_command(). + */ +int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** Initialize the filter system. Register all builtin filters. */ +void avfilter_register_all(void); + +#if FF_API_OLD_FILTER_REGISTER +/** Uninitialize the filter system. Unregister all filters. */ +attribute_deprecated +void avfilter_uninit(void); +#endif + +/** + * Register a filter. This is only needed if you plan to use + * avfilter_get_by_name later to lookup the AVFilter structure by name. A + * filter can still by instantiated with avfilter_graph_alloc_filter even if it + * is not registered. + * + * @param filter the filter to register + * @return 0 if the registration was successful, a negative value + * otherwise + */ +int avfilter_register(AVFilter *filter); + +/** + * Get a filter definition matching the given name. + * + * @param name the filter name to find + * @return the filter definition, if any matching one is registered. + * NULL if none found. + */ +#if !FF_API_NOCONST_GET_NAME +const +#endif +AVFilter *avfilter_get_by_name(const char *name); + +/** + * Iterate over all registered filters. + * @return If prev is non-NULL, next registered filter after prev or NULL if + * prev is the last filter. If prev is NULL, return the first registered filter. + */ +const AVFilter *avfilter_next(const AVFilter *prev); + +#if FF_API_OLD_FILTER_REGISTER +/** + * If filter is NULL, returns a pointer to the first registered filter pointer, + * if filter is non-NULL, returns the next pointer after filter. + * If the returned pointer points to NULL, the last registered filter + * was already reached. + * @deprecated use avfilter_next() + */ +attribute_deprecated +AVFilter **av_filter_next(AVFilter **filter); +#endif + +#if FF_API_AVFILTER_OPEN +/** + * Create a filter instance. + * + * @param filter_ctx put here a pointer to the created filter context + * on success, NULL on failure + * @param filter the filter to create an instance of + * @param inst_name Name to give to the new instance. Can be NULL for none. + * @return >= 0 in case of success, a negative error code otherwise + * @deprecated use avfilter_graph_alloc_filter() instead + */ +attribute_deprecated +int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name); +#endif + + +#if FF_API_AVFILTER_INIT_FILTER +/** + * Initialize a filter. + * + * @param filter the filter to initialize + * @param args A string of parameters to use when initializing the filter. + * The format and meaning of this string varies by filter. + * @param opaque Any extra non-string data needed by the filter. The meaning + * of this parameter varies by filter. + * @return zero on success + */ +attribute_deprecated +int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque); +#endif + +/** + * Initialize a filter with the supplied parameters. + * + * @param ctx uninitialized filter context to initialize + * @param args Options to initialize the filter with. This must be a + * ':'-separated list of options in the 'key=value' form. + * May be NULL if the options have been set directly using the + * AVOptions API or there are no options that need to be set. + * @return 0 on success, a negative AVERROR on failure + */ +int avfilter_init_str(AVFilterContext *ctx, const char *args); + +/** + * Initialize a filter with the supplied dictionary of options. + * + * @param ctx uninitialized filter context to initialize + * @param options An AVDictionary filled with options for this filter. On + * return this parameter will be destroyed and replaced with + * a dict containing options that were not found. This dictionary + * must be freed by the caller. + * May be NULL, then this function is equivalent to + * avfilter_init_str() with the second parameter set to NULL. + * @return 0 on success, a negative AVERROR on failure + * + * @note This function and avfilter_init_str() do essentially the same thing, + * the difference is in manner in which the options are passed. It is up to the + * calling code to choose whichever is more preferable. The two functions also + * behave differently when some of the provided options are not declared as + * supported by the filter. In such a case, avfilter_init_str() will fail, but + * this function will leave those extra options in the options AVDictionary and + * continue as usual. + */ +int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options); + +/** + * Free a filter context. This will also remove the filter from its + * filtergraph's list of filters. + * + * @param filter the filter to free + */ +void avfilter_free(AVFilterContext *filter); + +/** + * Insert a filter in the middle of an existing link. + * + * @param link the link into which the filter should be inserted + * @param filt the filter to be inserted + * @param filt_srcpad_idx the input pad on the filter to connect + * @param filt_dstpad_idx the output pad on the filter to connect + * @return zero on success + */ +int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, + unsigned filt_srcpad_idx, unsigned filt_dstpad_idx); + +/** + * @return AVClass for AVFilterContext. + * + * @see av_opt_find(). + */ +const AVClass *avfilter_get_class(void); + +typedef struct AVFilterGraphInternal AVFilterGraphInternal; + +/** + * A function pointer passed to the @ref AVFilterGraph.execute callback to be + * executed multiple times, possibly in parallel. + * + * @param ctx the filter context the job belongs to + * @param arg an opaque parameter passed through from @ref + * AVFilterGraph.execute + * @param jobnr the index of the job being executed + * @param nb_jobs the total number of jobs + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs); + +/** + * A function executing multiple jobs, possibly in parallel. + * + * @param ctx the filter context to which the jobs belong + * @param func the function to be called multiple times + * @param arg the argument to be passed to func + * @param ret a nb_jobs-sized array to be filled with return values from each + * invocation of func + * @param nb_jobs the number of jobs to execute + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func, + void *arg, int *ret, int nb_jobs); + +typedef struct AVFilterGraph { + const AVClass *av_class; + AVFilterContext **filters; + unsigned nb_filters; + + char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters +#if FF_API_LAVR_OPTS + attribute_deprecated char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters +#endif + + /** + * Type of multithreading allowed for filters in this graph. A combination + * of AVFILTER_THREAD_* flags. + * + * May be set by the caller at any point, the setting will apply to all + * filters initialized after that. The default is allowing everything. + * + * When a filter in this graph is initialized, this field is combined using + * bit AND with AVFilterContext.thread_type to get the final mask used for + * determining allowed threading types. I.e. a threading type needs to be + * set in both to be allowed. + */ + int thread_type; + + /** + * Maximum number of threads used by filters in this graph. May be set by + * the caller before adding any filters to the filtergraph. Zero (the + * default) means that the number of threads is determined automatically. + */ + int nb_threads; + + /** + * Opaque object for libavfilter internal use. + */ + AVFilterGraphInternal *internal; + + /** + * Opaque user data. May be set by the caller to an arbitrary value, e.g. to + * be used from callbacks like @ref AVFilterGraph.execute. + * Libavfilter will not touch this field in any way. + */ + void *opaque; + + /** + * This callback may be set by the caller immediately after allocating the + * graph and before adding any filters to it, to provide a custom + * multithreading implementation. + * + * If set, filters with slice threading capability will call this callback + * to execute multiple jobs in parallel. + * + * If this field is left unset, libavfilter will use its internal + * implementation, which may or may not be multithreaded depending on the + * platform and build options. + */ + avfilter_execute_func *execute; + + char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions + + /** + * Private fields + * + * The following fields are for internal use only. + * Their type, offset, number and semantic can change without notice. + */ + + AVFilterLink **sink_links; + int sink_links_count; + + unsigned disable_auto_convert; +} AVFilterGraph; + +/** + * Allocate a filter graph. + * + * @return the allocated filter graph on success or NULL. + */ +AVFilterGraph *avfilter_graph_alloc(void); + +/** + * Create a new filter instance in a filter graph. + * + * @param graph graph in which the new filter will be used + * @param filter the filter to create an instance of + * @param name Name to give to the new instance (will be copied to + * AVFilterContext.name). This may be used by the caller to identify + * different filters, libavfilter itself assigns no semantics to + * this parameter. May be NULL. + * + * @return the context of the newly created filter instance (note that it is + * also retrievable directly through AVFilterGraph.filters or with + * avfilter_graph_get_filter()) on success or NULL on failure. + */ +AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph, + const AVFilter *filter, + const char *name); + +/** + * Get a filter instance identified by instance name from graph. + * + * @param graph filter graph to search through. + * @param name filter instance name (should be unique in the graph). + * @return the pointer to the found filter instance or NULL if it + * cannot be found. + */ +AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, const char *name); + +#if FF_API_AVFILTER_OPEN +/** + * Add an existing filter instance to a filter graph. + * + * @param graphctx the filter graph + * @param filter the filter to be added + * + * @deprecated use avfilter_graph_alloc_filter() to allocate a filter in a + * filter graph + */ +attribute_deprecated +int avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter); +#endif + +/** + * Create and add a filter instance into an existing graph. + * The filter instance is created from the filter filt and inited + * with the parameters args and opaque. + * + * In case of success put in *filt_ctx the pointer to the created + * filter instance, otherwise set *filt_ctx to NULL. + * + * @param name the instance name to give to the created filter instance + * @param graph_ctx the filter graph + * @return a negative AVERROR error code in case of failure, a non + * negative value otherwise + */ +int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, + const char *name, const char *args, void *opaque, + AVFilterGraph *graph_ctx); + +/** + * Enable or disable automatic format conversion inside the graph. + * + * Note that format conversion can still happen inside explicitly inserted + * scale and aresample filters. + * + * @param flags any of the AVFILTER_AUTO_CONVERT_* constants + */ +void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags); + +enum { + AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */ + AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */ +}; + +/** + * Check validity and configure all the links and formats in the graph. + * + * @param graphctx the filter graph + * @param log_ctx context used for logging + * @return >= 0 in case of success, a negative AVERROR code otherwise + */ +int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx); + +/** + * Free a graph, destroy its links, and set *graph to NULL. + * If *graph is NULL, do nothing. + */ +void avfilter_graph_free(AVFilterGraph **graph); + +/** + * A linked-list of the inputs/outputs of the filter chain. + * + * This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(), + * where it is used to communicate open (unlinked) inputs and outputs from and + * to the caller. + * This struct specifies, per each not connected pad contained in the graph, the + * filter context and the pad index required for establishing a link. + */ +typedef struct AVFilterInOut { + /** unique name for this input/output in the list */ + char *name; + + /** filter context associated to this input/output */ + AVFilterContext *filter_ctx; + + /** index of the filt_ctx pad to use for linking */ + int pad_idx; + + /** next input/input in the list, NULL if this is the last */ + struct AVFilterInOut *next; +} AVFilterInOut; + +/** + * Allocate a single AVFilterInOut entry. + * Must be freed with avfilter_inout_free(). + * @return allocated AVFilterInOut on success, NULL on failure. + */ +AVFilterInOut *avfilter_inout_alloc(void); + +/** + * Free the supplied list of AVFilterInOut and set *inout to NULL. + * If *inout is NULL, do nothing. + */ +void avfilter_inout_free(AVFilterInOut **inout); + +/** + * Add a graph described by a string to a graph. + * + * @note The caller must provide the lists of inputs and outputs, + * which therefore must be known before calling the function. + * + * @note The inputs parameter describes inputs of the already existing + * part of the graph; i.e. from the point of view of the newly created + * part, they are outputs. Similarly the outputs parameter describes + * outputs of the already existing filters, which are provided as + * inputs to the parsed filters. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs linked list to the inputs of the graph + * @param outputs linked list to the outputs of the graph + * @return zero on success, a negative AVERROR code on error + */ +int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, + AVFilterInOut *inputs, AVFilterInOut *outputs, + void *log_ctx); + +/** + * Add a graph described by a string to a graph. + * + * In the graph filters description, if the input label of the first + * filter is not specified, "in" is assumed; if the output label of + * the last filter is not specified, "out" is assumed. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs pointer to a linked list to the inputs of the graph, may be NULL. + * If non-NULL, *inputs is updated to contain the list of open inputs + * after the parsing, should be freed with avfilter_inout_free(). + * @param outputs pointer to a linked list to the outputs of the graph, may be NULL. + * If non-NULL, *outputs is updated to contain the list of open outputs + * after the parsing, should be freed with avfilter_inout_free(). + * @return non negative on success, a negative AVERROR code on error + */ +int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, AVFilterInOut **outputs, + void *log_ctx); + +/** + * Add a graph described by a string to a graph. + * + * @param[in] graph the filter graph where to link the parsed graph context + * @param[in] filters string to be parsed + * @param[out] inputs a linked list of all free (unlinked) inputs of the + * parsed graph will be returned here. It is to be freed + * by the caller using avfilter_inout_free(). + * @param[out] outputs a linked list of all free (unlinked) outputs of the + * parsed graph will be returned here. It is to be freed by the + * caller using avfilter_inout_free(). + * @return zero on success, a negative AVERROR code on error + * + * @note This function returns the inputs and outputs that are left + * unlinked after parsing the graph and the caller then deals with + * them. + * @note This function makes no reference whatsoever to already + * existing parts of the graph and the inputs parameter will on return + * contain inputs of the newly parsed part of the graph. Analogously + * the outputs parameter will contain outputs of the newly created + * filters. + */ +int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, + AVFilterInOut **outputs); + +/** + * Send a command to one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to send, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ +int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** + * Queue a command for one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to sent, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param ts time at which the command should be sent to the filter + * + * @note As this executes commands after this function returns, no return code + * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported. + */ +int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts); + + +/** + * Dump a graph into a human-readable string representation. + * + * @param graph the graph to dump + * @param options formatting options; currently ignored + * @return a string, or NULL in case of memory allocation failure; + * the string must be freed using av_free + */ +char *avfilter_graph_dump(AVFilterGraph *graph, const char *options); + +/** + * Request a frame on the oldest sink link. + * + * If the request returns AVERROR_EOF, try the next. + * + * Note that this function is not meant to be the sole scheduling mechanism + * of a filtergraph, only a convenience function to help drain a filtergraph + * in a balanced way under normal circumstances. + * + * Also note that AVERROR_EOF does not mean that frames did not arrive on + * some of the sinks during the process. + * When there are multiple sink links, in case the requested link + * returns an EOF, this may cause a filter to flush pending frames + * which are sent to another sink link, although unrequested. + * + * @return the return value of ff_request_frame(), + * or AVERROR_EOF if all links returned AVERROR_EOF + */ +int avfilter_graph_request_oldest(AVFilterGraph *graph); + +/** + * @} + */ + +#endif /* AVFILTER_AVFILTER_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/avfiltergraph.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/avfiltergraph.h new file mode 100644 index 0000000..b31d581 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/avfiltergraph.h @@ -0,0 +1,28 @@ +/* + * Filter graphs + * copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVFILTERGRAPH_H +#define AVFILTER_AVFILTERGRAPH_H + +#include "avfilter.h" +#include "libavutil/log.h" + +#endif /* AVFILTER_AVFILTERGRAPH_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/buffersink.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/buffersink.h new file mode 100644 index 0000000..f51fa7c --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/buffersink.h @@ -0,0 +1,165 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSINK_H +#define AVFILTER_BUFFERSINK_H + +/** + * @file + * @ingroup lavfi_buffersink + * memory buffer sink API for audio and video + */ + +#include "avfilter.h" + +/** + * @defgroup lavfi_buffersink Buffer sink API + * @ingroup lavfi + * @{ + */ + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a buffersink or abuffersink filter context. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags + * + * @return >= 0 in for success, a negative AVERROR code for failure. + */ +int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags); + +/** + * Tell av_buffersink_get_buffer_ref() to read video/samples buffer + * reference, but not remove it from the buffer. This is useful if you + * need only to read a video/samples buffer, without to fetch it. + */ +#define AV_BUFFERSINK_FLAG_PEEK 1 + +/** + * Tell av_buffersink_get_buffer_ref() not to request a frame from its input. + * If a frame is already buffered, it is read (and removed from the buffer), + * but if no frame is present, return AVERROR(EAGAIN). + */ +#define AV_BUFFERSINK_FLAG_NO_REQUEST 2 + +/** + * Struct to use for initializing a buffersink context. + */ +typedef struct { + const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE +} AVBufferSinkParams; + +/** + * Create an AVBufferSinkParams structure. + * + * Must be freed with av_free(). + */ +AVBufferSinkParams *av_buffersink_params_alloc(void); + +/** + * Struct to use for initializing an abuffersink context. + */ +typedef struct { + const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE + const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1 + const int *channel_counts; ///< list of allowed channel counts, terminated by -1 + int all_channel_counts; ///< if not 0, accept any channel count or layout + int *sample_rates; ///< list of allowed sample rates, terminated by -1 +} AVABufferSinkParams; + +/** + * Create an AVABufferSinkParams structure. + * + * Must be freed with av_free(). + */ +AVABufferSinkParams *av_abuffersink_params_alloc(void); + +/** + * Set the frame size for an audio buffer sink. + * + * All calls to av_buffersink_get_buffer_ref will return a buffer with + * exactly the specified number of samples, or AVERROR(EAGAIN) if there is + * not enough. The last buffer at EOF will be padded with 0. + */ +void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size); + +/** + * @defgroup lavfi_buffersink_accessors Buffer sink accessors + * Get the properties of the stream + * @{ + */ + +enum AVMediaType av_buffersink_get_type (const AVFilterContext *ctx); +AVRational av_buffersink_get_time_base (const AVFilterContext *ctx); +int av_buffersink_get_format (const AVFilterContext *ctx); + +AVRational av_buffersink_get_frame_rate (const AVFilterContext *ctx); +int av_buffersink_get_w (const AVFilterContext *ctx); +int av_buffersink_get_h (const AVFilterContext *ctx); +AVRational av_buffersink_get_sample_aspect_ratio (const AVFilterContext *ctx); + +int av_buffersink_get_channels (const AVFilterContext *ctx); +uint64_t av_buffersink_get_channel_layout (const AVFilterContext *ctx); +int av_buffersink_get_sample_rate (const AVFilterContext *ctx); + +AVBufferRef * av_buffersink_get_hw_frames_ctx (const AVFilterContext *ctx); + +/** @} */ + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * + * @return + * - >= 0 if a frame was successfully returned. + * - AVERROR(EAGAIN) if no frames are available at this point; more + * input frames must be added to the filtergraph to get more output. + * - AVERROR_EOF if there will be no more output frames on this sink. + * - A different negative AVERROR code in other failure cases. + */ +int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Same as av_buffersink_get_frame(), but with the ability to specify the number + * of samples read. This function is less efficient than + * av_buffersink_get_frame(), because it copies the data around. + * + * @param ctx pointer to a context of the abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * frame will contain exactly nb_samples audio samples, except at + * the end of stream, when it can contain less than nb_samples. + * + * @return The return codes have the same meaning as for + * av_buffersink_get_samples(). + * + * @warning do not mix this function with av_buffersink_get_frame(). Use only one or + * the other with a single sink, not both. + */ +int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples); + +/** + * @} + */ + +#endif /* AVFILTER_BUFFERSINK_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/buffersrc.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/buffersrc.h new file mode 100644 index 0000000..e42c781 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/buffersrc.h @@ -0,0 +1,201 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSRC_H +#define AVFILTER_BUFFERSRC_H + +/** + * @file + * @ingroup lavfi_buffersrc + * Memory buffer source API. + */ + +#include "avfilter.h" + +/** + * @defgroup lavfi_buffersrc Buffer source API + * @ingroup lavfi + * @{ + */ + +enum { + + /** + * Do not check for format changes. + */ + AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1, + + /** + * Immediately push the frame to the output. + */ + AV_BUFFERSRC_FLAG_PUSH = 4, + + /** + * Keep a reference to the frame. + * If the frame if reference-counted, create a new reference; otherwise + * copy the frame data. + */ + AV_BUFFERSRC_FLAG_KEEP_REF = 8, + +}; + +/** + * Get the number of failed requests. + * + * A failed request is when the request_frame method is called while no + * frame is present in the buffer. + * The number is reset when a frame is added. + */ +unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); + +/** + * This structure contains the parameters describing the frames that will be + * passed to this filter. + * + * It should be allocated with av_buffersrc_parameters_alloc() and freed with + * av_free(). All the allocated fields in it remain owned by the caller. + */ +typedef struct AVBufferSrcParameters { + /** + * video: the pixel format, value corresponds to enum AVPixelFormat + * audio: the sample format, value corresponds to enum AVSampleFormat + */ + int format; + /** + * The timebase to be used for the timestamps on the input frames. + */ + AVRational time_base; + + /** + * Video only, the display dimensions of the input frames. + */ + int width, height; + + /** + * Video only, the sample (pixel) aspect ratio. + */ + AVRational sample_aspect_ratio; + + /** + * Video only, the frame rate of the input video. This field must only be + * set to a non-zero value if input stream has a known constant framerate + * and should be left at its initial value if the framerate is variable or + * unknown. + */ + AVRational frame_rate; + + /** + * Video with a hwaccel pixel format only. This should be a reference to an + * AVHWFramesContext instance describing the input frames. + */ + AVBufferRef *hw_frames_ctx; + + /** + * Audio only, the audio sampling rate in samples per secon. + */ + int sample_rate; + + /** + * Audio only, the audio channel layout + */ + uint64_t channel_layout; +} AVBufferSrcParameters; + +/** + * Allocate a new AVBufferSrcParameters instance. It should be freed by the + * caller with av_free(). + */ +AVBufferSrcParameters *av_buffersrc_parameters_alloc(void); + +/** + * Initialize the buffersrc or abuffersrc filter with the provided parameters. + * This function may be called multiple times, the later calls override the + * previous ones. Some of the parameters may also be set through AVOptions, then + * whatever method is used last takes precedence. + * + * @param ctx an instance of the buffersrc or abuffersrc filter + * @param param the stream parameters. The frames later passed to this filter + * must conform to those parameters. All the allocated fields in + * param remain owned by the caller, libavfilter will make internal + * copies or references when necessary. + * @return 0 on success, a negative AVERROR code on failure. + */ +int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param); + +/** + * Add a frame to the buffer source. + * + * @param ctx an instance of the buffersrc filter + * @param frame frame to be added. If the frame is reference counted, this + * function will make a new reference to it. Otherwise the frame data will be + * copied. + * + * @return 0 on success, a negative AVERROR on error + * + * This function is equivalent to av_buffersrc_add_frame_flags() with the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +av_warn_unused_result +int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * @param ctx an instance of the buffersrc filter + * @param frame frame to be added. If the frame is reference counted, this + * function will take ownership of the reference(s) and reset the frame. + * Otherwise the frame data will be copied. If this function returns an error, + * the input frame is not touched. + * + * @return 0 on success, a negative AVERROR on error. + * + * @note the difference between this function and av_buffersrc_write_frame() is + * that av_buffersrc_write_frame() creates a new reference to the input frame, + * while this function takes ownership of the reference passed to it. + * + * This function is equivalent to av_buffersrc_add_frame_flags() without the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +av_warn_unused_result +int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * By default, if the frame is reference-counted, this function will take + * ownership of the reference(s) and reset the frame. This can be controlled + * using the flags. + * + * If this function returns an error, the input frame is not touched. + * + * @param buffer_src pointer to a buffer source context + * @param frame a frame, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +av_warn_unused_result +int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src, + AVFrame *frame, int flags); + + +/** + * @} + */ + +#endif /* AVFILTER_BUFFERSRC_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/version.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/version.h new file mode 100644 index 0000000..4cbd185 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavfilter/version.h @@ -0,0 +1,74 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_VERSION_H +#define AVFILTER_VERSION_H + +/** + * @file + * @ingroup lavfi + * Libavfilter version macros + */ + +#include "libavutil/version.h" + +#define LIBAVFILTER_VERSION_MAJOR 6 +#define LIBAVFILTER_VERSION_MINOR 82 +#define LIBAVFILTER_VERSION_MICRO 100 + +#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ + LIBAVFILTER_VERSION_MINOR, \ + LIBAVFILTER_VERSION_MICRO) +#define LIBAVFILTER_VERSION AV_VERSION(LIBAVFILTER_VERSION_MAJOR, \ + LIBAVFILTER_VERSION_MINOR, \ + LIBAVFILTER_VERSION_MICRO) +#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT + +#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_OLD_FILTER_OPTS +#define FF_API_OLD_FILTER_OPTS (LIBAVFILTER_VERSION_MAJOR < 7) +#endif +#ifndef FF_API_OLD_FILTER_OPTS_ERROR +#define FF_API_OLD_FILTER_OPTS_ERROR (LIBAVFILTER_VERSION_MAJOR < 7) +#endif +#ifndef FF_API_AVFILTER_OPEN +#define FF_API_AVFILTER_OPEN (LIBAVFILTER_VERSION_MAJOR < 7) +#endif +#ifndef FF_API_AVFILTER_INIT_FILTER +#define FF_API_AVFILTER_INIT_FILTER (LIBAVFILTER_VERSION_MAJOR < 7) +#endif +#ifndef FF_API_OLD_FILTER_REGISTER +#define FF_API_OLD_FILTER_REGISTER (LIBAVFILTER_VERSION_MAJOR < 7) +#endif +#ifndef FF_API_NOCONST_GET_NAME +#define FF_API_NOCONST_GET_NAME (LIBAVFILTER_VERSION_MAJOR < 7) +#endif +#ifndef FF_API_LAVR_OPTS +#define FF_API_LAVR_OPTS (LIBAVFILTER_VERSION_MAJOR < 7) +#endif + +#endif /* AVFILTER_VERSION_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/avc.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/avc.h new file mode 100644 index 0000000..c5e80ff --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/avc.h @@ -0,0 +1,37 @@ +/* + * AVC helper functions for muxers + * Copyright (c) 2008 Aurelien Jacobs + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_AVC_H +#define AVFORMAT_AVC_H + +#include +#include "avio.h" + +int ff_avc_parse_nal_units(AVIOContext *s, const uint8_t *buf, int size); +int ff_avc_parse_nal_units_buf(const uint8_t *buf_in, uint8_t **buf, int *size); +int ff_isom_write_avcc(AVIOContext *pb, const uint8_t *data, int len); +const uint8_t *ff_avc_find_startcode(const uint8_t *p, const uint8_t *end); +int ff_avc_write_annexb_extradata(const uint8_t *in, uint8_t **buf, int *size); +const uint8_t *ff_avc_mp4_find_startcode(const uint8_t *start, + const uint8_t *end, + int nal_length_size); + +#endif /* AVFORMAT_AVC_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/avformat.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/avformat.h new file mode 100644 index 0000000..6170b39 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/avformat.h @@ -0,0 +1,3031 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_AVFORMAT_H +#define AVFORMAT_AVFORMAT_H + +/** + * @file + * @ingroup libavf + * Main libavformat public API header + */ + +/** + * @defgroup libavf libavformat + * I/O and Muxing/Demuxing Library + * + * Libavformat (lavf) is a library for dealing with various media container + * formats. Its main two purposes are demuxing - i.e. splitting a media file + * into component streams, and the reverse process of muxing - writing supplied + * data in a specified container format. It also has an @ref lavf_io + * "I/O module" which supports a number of protocols for accessing the data (e.g. + * file, tcp, http and others). Before using lavf, you need to call + * av_register_all() to register all compiled muxers, demuxers and protocols. + * Unless you are absolutely sure you won't use libavformat's network + * capabilities, you should also call avformat_network_init(). + * + * A supported input format is described by an AVInputFormat struct, conversely + * an output format is described by AVOutputFormat. You can iterate over all + * registered input/output formats using the av_iformat_next() / + * av_oformat_next() functions. The protocols layer is not part of the public + * API, so you can only get the names of supported protocols with the + * avio_enum_protocols() function. + * + * Main lavf structure used for both muxing and demuxing is AVFormatContext, + * which exports all information about the file being read or written. As with + * most Libavformat structures, its size is not part of public ABI, so it cannot be + * allocated on stack or directly with av_malloc(). To create an + * AVFormatContext, use avformat_alloc_context() (some functions, like + * avformat_open_input() might do that for you). + * + * Most importantly an AVFormatContext contains: + * @li the @ref AVFormatContext.iformat "input" or @ref AVFormatContext.oformat + * "output" format. It is either autodetected or set by user for input; + * always set by user for output. + * @li an @ref AVFormatContext.streams "array" of AVStreams, which describe all + * elementary streams stored in the file. AVStreams are typically referred to + * using their index in this array. + * @li an @ref AVFormatContext.pb "I/O context". It is either opened by lavf or + * set by user for input, always set by user for output (unless you are dealing + * with an AVFMT_NOFILE format). + * + * @section lavf_options Passing options to (de)muxers + * It is possible to configure lavf muxers and demuxers using the @ref avoptions + * mechanism. Generic (format-independent) libavformat options are provided by + * AVFormatContext, they can be examined from a user program by calling + * av_opt_next() / av_opt_find() on an allocated AVFormatContext (or its AVClass + * from avformat_get_class()). Private (format-specific) options are provided by + * AVFormatContext.priv_data if and only if AVInputFormat.priv_class / + * AVOutputFormat.priv_class of the corresponding format struct is non-NULL. + * Further options may be provided by the @ref AVFormatContext.pb "I/O context", + * if its AVClass is non-NULL, and the protocols layer. See the discussion on + * nesting in @ref avoptions documentation to learn how to access those. + * + * @section urls + * URL strings in libavformat are made of a scheme/protocol, a ':', and a + * scheme specific string. URLs without a scheme and ':' used for local files + * are supported but deprecated. "file:" should be used for local files. + * + * It is important that the scheme string is not taken from untrusted + * sources without checks. + * + * Note that some schemes/protocols are quite powerful, allowing access to + * both local and remote files, parts of them, concatenations of them, local + * audio and video devices and so on. + * + * @{ + * + * @defgroup lavf_decoding Demuxing + * @{ + * Demuxers read a media file and split it into chunks of data (@em packets). A + * @ref AVPacket "packet" contains one or more encoded frames which belongs to a + * single elementary stream. In the lavf API this process is represented by the + * avformat_open_input() function for opening a file, av_read_frame() for + * reading a single packet and finally avformat_close_input(), which does the + * cleanup. + * + * @section lavf_decoding_open Opening a media file + * The minimum information required to open a file is its URL, which + * is passed to avformat_open_input(), as in the following code: + * @code + * const char *url = "file:in.mp3"; + * AVFormatContext *s = NULL; + * int ret = avformat_open_input(&s, url, NULL, NULL); + * if (ret < 0) + * abort(); + * @endcode + * The above code attempts to allocate an AVFormatContext, open the + * specified file (autodetecting the format) and read the header, exporting the + * information stored there into s. Some formats do not have a header or do not + * store enough information there, so it is recommended that you call the + * avformat_find_stream_info() function which tries to read and decode a few + * frames to find missing information. + * + * In some cases you might want to preallocate an AVFormatContext yourself with + * avformat_alloc_context() and do some tweaking on it before passing it to + * avformat_open_input(). One such case is when you want to use custom functions + * for reading input data instead of lavf internal I/O layer. + * To do that, create your own AVIOContext with avio_alloc_context(), passing + * your reading callbacks to it. Then set the @em pb field of your + * AVFormatContext to newly created AVIOContext. + * + * Since the format of the opened file is in general not known until after + * avformat_open_input() has returned, it is not possible to set demuxer private + * options on a preallocated context. Instead, the options should be passed to + * avformat_open_input() wrapped in an AVDictionary: + * @code + * AVDictionary *options = NULL; + * av_dict_set(&options, "video_size", "640x480", 0); + * av_dict_set(&options, "pixel_format", "rgb24", 0); + * + * if (avformat_open_input(&s, url, NULL, &options) < 0) + * abort(); + * av_dict_free(&options); + * @endcode + * This code passes the private options 'video_size' and 'pixel_format' to the + * demuxer. They would be necessary for e.g. the rawvideo demuxer, since it + * cannot know how to interpret raw video data otherwise. If the format turns + * out to be something different than raw video, those options will not be + * recognized by the demuxer and therefore will not be applied. Such unrecognized + * options are then returned in the options dictionary (recognized options are + * consumed). The calling program can handle such unrecognized options as it + * wishes, e.g. + * @code + * AVDictionaryEntry *e; + * if (e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) { + * fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key); + * abort(); + * } + * @endcode + * + * After you have finished reading the file, you must close it with + * avformat_close_input(). It will free everything associated with the file. + * + * @section lavf_decoding_read Reading from an opened file + * Reading data from an opened AVFormatContext is done by repeatedly calling + * av_read_frame() on it. Each call, if successful, will return an AVPacket + * containing encoded data for one AVStream, identified by + * AVPacket.stream_index. This packet may be passed straight into the libavcodec + * decoding functions avcodec_send_packet() or avcodec_decode_subtitle2() if the + * caller wishes to decode the data. + * + * AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be + * set if known. They may also be unset (i.e. AV_NOPTS_VALUE for + * pts/dts, 0 for duration) if the stream does not provide them. The timing + * information will be in AVStream.time_base units, i.e. it has to be + * multiplied by the timebase to convert them to seconds. + * + * If AVPacket.buf is set on the returned packet, then the packet is + * allocated dynamically and the user may keep it indefinitely. + * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a + * static storage somewhere inside the demuxer and the packet is only valid + * until the next av_read_frame() call or closing the file. If the caller + * requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy + * of it. + * In both cases, the packet must be freed with av_packet_unref() when it is no + * longer needed. + * + * @section lavf_decoding_seek Seeking + * @} + * + * @defgroup lavf_encoding Muxing + * @{ + * Muxers take encoded data in the form of @ref AVPacket "AVPackets" and write + * it into files or other output bytestreams in the specified container format. + * + * The main API functions for muxing are avformat_write_header() for writing the + * file header, av_write_frame() / av_interleaved_write_frame() for writing the + * packets and av_write_trailer() for finalizing the file. + * + * At the beginning of the muxing process, the caller must first call + * avformat_alloc_context() to create a muxing context. The caller then sets up + * the muxer by filling the various fields in this context: + * + * - The @ref AVFormatContext.oformat "oformat" field must be set to select the + * muxer that will be used. + * - Unless the format is of the AVFMT_NOFILE type, the @ref AVFormatContext.pb + * "pb" field must be set to an opened IO context, either returned from + * avio_open2() or a custom one. + * - Unless the format is of the AVFMT_NOSTREAMS type, at least one stream must + * be created with the avformat_new_stream() function. The caller should fill + * the @ref AVStream.codecpar "stream codec parameters" information, such as the + * codec @ref AVCodecParameters.codec_type "type", @ref AVCodecParameters.codec_id + * "id" and other parameters (e.g. width / height, the pixel or sample format, + * etc.) as known. The @ref AVStream.time_base "stream timebase" should + * be set to the timebase that the caller desires to use for this stream (note + * that the timebase actually used by the muxer can be different, as will be + * described later). + * - It is advised to manually initialize only the relevant fields in + * AVCodecParameters, rather than using @ref avcodec_parameters_copy() during + * remuxing: there is no guarantee that the codec context values remain valid + * for both input and output format contexts. + * - The caller may fill in additional information, such as @ref + * AVFormatContext.metadata "global" or @ref AVStream.metadata "per-stream" + * metadata, @ref AVFormatContext.chapters "chapters", @ref + * AVFormatContext.programs "programs", etc. as described in the + * AVFormatContext documentation. Whether such information will actually be + * stored in the output depends on what the container format and the muxer + * support. + * + * When the muxing context is fully set up, the caller must call + * avformat_write_header() to initialize the muxer internals and write the file + * header. Whether anything actually is written to the IO context at this step + * depends on the muxer, but this function must always be called. Any muxer + * private options must be passed in the options parameter to this function. + * + * The data is then sent to the muxer by repeatedly calling av_write_frame() or + * av_interleaved_write_frame() (consult those functions' documentation for + * discussion on the difference between them; only one of them may be used with + * a single muxing context, they should not be mixed). Do note that the timing + * information on the packets sent to the muxer must be in the corresponding + * AVStream's timebase. That timebase is set by the muxer (in the + * avformat_write_header() step) and may be different from the timebase + * requested by the caller. + * + * Once all the data has been written, the caller must call av_write_trailer() + * to flush any buffered packets and finalize the output file, then close the IO + * context (if any) and finally free the muxing context with + * avformat_free_context(). + * @} + * + * @defgroup lavf_io I/O Read/Write + * @{ + * @section lavf_io_dirlist Directory listing + * The directory listing API makes it possible to list files on remote servers. + * + * Some of possible use cases: + * - an "open file" dialog to choose files from a remote location, + * - a recursive media finder providing a player with an ability to play all + * files from a given directory. + * + * @subsection lavf_io_dirlist_open Opening a directory + * At first, a directory needs to be opened by calling avio_open_dir() + * supplied with a URL and, optionally, ::AVDictionary containing + * protocol-specific parameters. The function returns zero or positive + * integer and allocates AVIODirContext on success. + * + * @code + * AVIODirContext *ctx = NULL; + * if (avio_open_dir(&ctx, "smb://example.com/some_dir", NULL) < 0) { + * fprintf(stderr, "Cannot open directory.\n"); + * abort(); + * } + * @endcode + * + * This code tries to open a sample directory using smb protocol without + * any additional parameters. + * + * @subsection lavf_io_dirlist_read Reading entries + * Each directory's entry (i.e. file, another directory, anything else + * within ::AVIODirEntryType) is represented by AVIODirEntry. + * Reading consecutive entries from an opened AVIODirContext is done by + * repeatedly calling avio_read_dir() on it. Each call returns zero or + * positive integer if successful. Reading can be stopped right after the + * NULL entry has been read -- it means there are no entries left to be + * read. The following code reads all entries from a directory associated + * with ctx and prints their names to standard output. + * @code + * AVIODirEntry *entry = NULL; + * for (;;) { + * if (avio_read_dir(ctx, &entry) < 0) { + * fprintf(stderr, "Cannot list directory.\n"); + * abort(); + * } + * if (!entry) + * break; + * printf("%s\n", entry->name); + * avio_free_directory_entry(&entry); + * } + * @endcode + * @} + * + * @defgroup lavf_codec Demuxers + * @{ + * @defgroup lavf_codec_native Native Demuxers + * @{ + * @} + * @defgroup lavf_codec_wrappers External library wrappers + * @{ + * @} + * @} + * @defgroup lavf_protos I/O Protocols + * @{ + * @} + * @defgroup lavf_internal Internal + * @{ + * @} + * @} + */ + +#include +#include /* FILE */ +#include "libavcodec/avcodec.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "avio.h" +#include "libavformat/version.h" + +struct AVFormatContext; + +struct AVDeviceInfoList; +struct AVDeviceCapabilitiesQuery; + +/** + * @defgroup metadata_api Public Metadata API + * @{ + * @ingroup libavf + * The metadata API allows libavformat to export metadata tags to a client + * application when demuxing. Conversely it allows a client application to + * set metadata when muxing. + * + * Metadata is exported or set as pairs of key/value strings in the 'metadata' + * fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs + * using the @ref lavu_dict "AVDictionary" API. Like all strings in FFmpeg, + * metadata is assumed to be UTF-8 encoded Unicode. Note that metadata + * exported by demuxers isn't checked to be valid UTF-8 in most cases. + * + * Important concepts to keep in mind: + * - Keys are unique; there can never be 2 tags with the same key. This is + * also meant semantically, i.e., a demuxer should not knowingly produce + * several keys that are literally different but semantically identical. + * E.g., key=Author5, key=Author6. In this example, all authors must be + * placed in the same tag. + * - Metadata is flat, not hierarchical; there are no subtags. If you + * want to store, e.g., the email address of the child of producer Alice + * and actor Bob, that could have key=alice_and_bobs_childs_email_address. + * - Several modifiers can be applied to the tag name. This is done by + * appending a dash character ('-') and the modifier name in the order + * they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng. + * - language -- a tag whose value is localized for a particular language + * is appended with the ISO 639-2/B 3-letter language code. + * For example: Author-ger=Michael, Author-eng=Mike + * The original/default language is in the unqualified "Author" tag. + * A demuxer should set a default if it sets any translated tag. + * - sorting -- a modified version of a tag that should be used for + * sorting will have '-sort' appended. E.g. artist="The Beatles", + * artist-sort="Beatles, The". + * - Some protocols and demuxers support metadata updates. After a successful + * call to av_read_packet(), AVFormatContext.event_flags or AVStream.event_flags + * will be updated to indicate if metadata changed. In order to detect metadata + * changes on a stream, you need to loop through all streams in the AVFormatContext + * and check their individual event_flags. + * + * - Demuxers attempt to export metadata in a generic format, however tags + * with no generic equivalents are left as they are stored in the container. + * Follows a list of generic tag names: + * + @verbatim + album -- name of the set this work belongs to + album_artist -- main creator of the set/album, if different from artist. + e.g. "Various Artists" for compilation albums. + artist -- main creator of the work + comment -- any additional description of the file. + composer -- who composed the work, if different from artist. + copyright -- name of copyright holder. + creation_time-- date when the file was created, preferably in ISO 8601. + date -- date when the work was created, preferably in ISO 8601. + disc -- number of a subset, e.g. disc in a multi-disc collection. + encoder -- name/settings of the software/hardware that produced the file. + encoded_by -- person/group who created the file. + filename -- original name of the file. + genre -- . + language -- main language in which the work is performed, preferably + in ISO 639-2 format. Multiple languages can be specified by + separating them with commas. + performer -- artist who performed the work, if different from artist. + E.g for "Also sprach Zarathustra", artist would be "Richard + Strauss" and performer "London Philharmonic Orchestra". + publisher -- name of the label/publisher. + service_name -- name of the service in broadcasting (channel name). + service_provider -- name of the service provider in broadcasting. + title -- name of the work. + track -- number of this work in the set, can be in form current/total. + variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of + @endverbatim + * + * Look in the examples section for an application example how to use the Metadata API. + * + * @} + */ + +/* packet functions */ + + +/** + * Allocate and read the payload of a packet and initialize its + * fields with default values. + * + * @param s associated IO context + * @param pkt packet + * @param size desired payload size + * @return >0 (read size) if OK, AVERROR_xxx otherwise + */ +int av_get_packet(AVIOContext *s, AVPacket *pkt, int size); + + +/** + * Read data and append it to the current content of the AVPacket. + * If pkt->size is 0 this is identical to av_get_packet. + * Note that this uses av_grow_packet and thus involves a realloc + * which is inefficient. Thus this function should only be used + * when there is no reasonable way to know (an upper bound of) + * the final size. + * + * @param s associated IO context + * @param pkt packet + * @param size amount of data to read + * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data + * will not be lost even if an error occurs. + */ +int av_append_packet(AVIOContext *s, AVPacket *pkt, int size); + +#if FF_API_LAVF_FRAC +/*************************************************/ +/* fractional numbers for exact pts handling */ + +/** + * The exact value of the fractional number is: 'val + num / den'. + * num is assumed to be 0 <= num < den. + */ +typedef struct AVFrac { + int64_t val, num, den; +} AVFrac; +#endif + +/*************************************************/ +/* input/output formats */ + +struct AVCodecTag; + +/** + * This structure contains the data a format has to probe a file. + */ +typedef struct AVProbeData { + const char *filename; + unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */ + int buf_size; /**< Size of buf except extra allocated bytes */ + const char *mime_type; /**< mime_type, when known. */ +} AVProbeData; + +#define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4) +#define AVPROBE_SCORE_STREAM_RETRY (AVPROBE_SCORE_MAX/4-1) + +#define AVPROBE_SCORE_EXTENSION 50 ///< score for file extension +#define AVPROBE_SCORE_MIME 75 ///< score for file mime type +#define AVPROBE_SCORE_MAX 100 ///< maximum score + +#define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer + +/// Demuxer will use avio_open, no opened file should be provided by the caller. +#define AVFMT_NOFILE 0x0001 +#define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */ +#define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */ +#if FF_API_LAVF_FMT_RAWPICTURE +#define AVFMT_RAWPICTURE 0x0020 /**< Format wants AVPicture structure for + raw picture data. @deprecated Not used anymore */ +#endif +#define AVFMT_GLOBALHEADER 0x0040 /**< Format wants global header. */ +#define AVFMT_NOTIMESTAMPS 0x0080 /**< Format does not need / have any timestamps. */ +#define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */ +#define AVFMT_TS_DISCONT 0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */ +#define AVFMT_VARIABLE_FPS 0x0400 /**< Format allows variable fps. */ +#define AVFMT_NODIMENSIONS 0x0800 /**< Format does not need width/height */ +#define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */ +#define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */ +#define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fall back on generic search */ +#define AVFMT_NO_BYTE_SEEK 0x8000 /**< Format does not allow seeking by bytes */ +#define AVFMT_ALLOW_FLUSH 0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */ +#define AVFMT_TS_NONSTRICT 0x20000 /**< Format does not require strictly + increasing timestamps, but they must + still be monotonic */ +#define AVFMT_TS_NEGATIVE 0x40000 /**< Format allows muxing negative + timestamps. If not set the timestamp + will be shifted in av_write_frame and + av_interleaved_write_frame so they + start from 0. + The user or muxer can override this through + AVFormatContext.avoid_negative_ts + */ + +#define AVFMT_SEEK_TO_PTS 0x4000000 /**< Seeking is based on PTS */ + +/** + * @addtogroup lavf_encoding + * @{ + */ +typedef struct AVOutputFormat { + const char *name; + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + const char *mime_type; + const char *extensions; /**< comma-separated filename extensions */ + /* output support */ + enum AVCodecID audio_codec; /**< default audio codec */ + enum AVCodecID video_codec; /**< default video codec */ + enum AVCodecID subtitle_codec; /**< default subtitle codec */ + /** + * can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, + * AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, + * AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, + * AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE + */ + int flags; + + /** + * List of supported codec_id-codec_tag pairs, ordered by "better + * choice first". The arrays are all terminated by AV_CODEC_ID_NONE. + */ + const struct AVCodecTag * const *codec_tag; + + + const AVClass *priv_class; ///< AVClass for the private context + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVOutputFormat *next; + /** + * size of private data so that it can be allocated in the wrapper + */ + int priv_data_size; + + int (*write_header)(struct AVFormatContext *); + /** + * Write a packet. If AVFMT_ALLOW_FLUSH is set in flags, + * pkt can be NULL in order to flush data buffered in the muxer. + * When flushing, return 0 if there still is more data to flush, + * or 1 if everything was flushed and there is no more buffered + * data. + */ + int (*write_packet)(struct AVFormatContext *, AVPacket *pkt); + int (*write_trailer)(struct AVFormatContext *); + /** + * Currently only used to set pixel format if not YUV420P. + */ + int (*interleave_packet)(struct AVFormatContext *, AVPacket *out, + AVPacket *in, int flush); + /** + * Test if the given codec can be stored in this container. + * + * @return 1 if the codec is supported, 0 if it is not. + * A negative number if unknown. + * MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC + */ + int (*query_codec)(enum AVCodecID id, int std_compliance); + + void (*get_output_timestamp)(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + /** + * Allows sending messages from application to device. + */ + int (*control_message)(struct AVFormatContext *s, int type, + void *data, size_t data_size); + + /** + * Write an uncoded AVFrame. + * + * See av_write_uncoded_frame() for details. + * + * The library will free *frame afterwards, but the muxer can prevent it + * by setting the pointer to NULL. + */ + int (*write_uncoded_frame)(struct AVFormatContext *, int stream_index, + AVFrame **frame, unsigned flags); + /** + * Returns device list with it properties. + * @see avdevice_list_devices() for more details. + */ + int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list); + /** + * Initialize device capabilities submodule. + * @see avdevice_capabilities_create() for more details. + */ + int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + /** + * Free device capabilities submodule. + * @see avdevice_capabilities_free() for more details. + */ + int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + enum AVCodecID data_codec; /**< default data codec */ + /** + * Initialize format. May allocate data here, and set any AVFormatContext or + * AVStream parameters that need to be set before packets are sent. + * This method must not write output. + * + * Return 0 if streams were fully configured, 1 if not, negative AVERROR on failure + * + * Any allocations made here must be freed in deinit(). + */ + int (*init)(struct AVFormatContext *); + /** + * Deinitialize format. If present, this is called whenever the muxer is being + * destroyed, regardless of whether or not the header has been written. + * + * If a trailer is being written, this is called after write_trailer(). + * + * This is called if init() fails as well. + */ + void (*deinit)(struct AVFormatContext *); + /** + * Set up any necessary bitstream filtering and extract any extra data needed + * for the global header. + * Return 0 if more packets from this stream must be checked; 1 if not. + */ + int (*check_bitstream)(struct AVFormatContext *, const AVPacket *pkt); +} AVOutputFormat; +/** + * @} + */ + +/** + * @addtogroup lavf_decoding + * @{ + */ +typedef struct AVInputFormat { + /** + * A comma separated list of short names for the format. New names + * may be appended with a minor bump. + */ + const char *name; + + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + + /** + * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, + * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, + * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS. + */ + int flags; + + /** + * If extensions are defined, then no probe is done. You should + * usually not use extension format guessing because it is not + * reliable enough + */ + const char *extensions; + + const struct AVCodecTag * const *codec_tag; + + const AVClass *priv_class; ///< AVClass for the private context + + /** + * Comma-separated list of mime types. + * It is used check for matching mime types while probing. + * @see av_probe_input_format2 + */ + const char *mime_type; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVInputFormat *next; + + /** + * Raw demuxers store their codec ID here. + */ + int raw_codec_id; + + /** + * Size of private data so that it can be allocated in the wrapper. + */ + int priv_data_size; + + /** + * Tell if a given file has a chance of being parsed as this format. + * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes + * big so you do not have to check for that unless you need more. + */ + int (*read_probe)(AVProbeData *); + + /** + * Read the format header and initialize the AVFormatContext + * structure. Return 0 if OK. 'avformat_new_stream' should be + * called to create new streams. + */ + int (*read_header)(struct AVFormatContext *); + + /** + * Used by format which open further nested input. + */ + int (*read_header2)(struct AVFormatContext *, AVDictionary **options); + + /** + * Read one packet and put it in 'pkt'. pts and flags are also + * set. 'avformat_new_stream' can be called only if the flag + * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a + * background thread). + * @return 0 on success, < 0 on error. + * When returning an error, pkt must not have been allocated + * or must be freed before returning + */ + int (*read_packet)(struct AVFormatContext *, AVPacket *pkt); + + /** + * Close the stream. The AVFormatContext and AVStreams are not + * freed by this function + */ + int (*read_close)(struct AVFormatContext *); + + /** + * Seek to a given timestamp relative to the frames in + * stream component stream_index. + * @param stream_index Must not be -1. + * @param flags Selects which direction should be preferred if no exact + * match is available. + * @return >= 0 on success (but not necessarily the new offset) + */ + int (*read_seek)(struct AVFormatContext *, + int stream_index, int64_t timestamp, int flags); + + /** + * Get the next timestamp in stream[stream_index].time_base units. + * @return the timestamp or AV_NOPTS_VALUE if an error occurred + */ + int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index, + int64_t *pos, int64_t pos_limit); + + /** + * Start/resume playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_play)(struct AVFormatContext *); + + /** + * Pause playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_pause)(struct AVFormatContext *); + + /** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + */ + int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); + + /** + * Returns device list with it properties. + * @see avdevice_list_devices() for more details. + */ + int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list); + + /** + * Initialize device capabilities submodule. + * @see avdevice_capabilities_create() for more details. + */ + int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + + /** + * Free device capabilities submodule. + * @see avdevice_capabilities_free() for more details. + */ + int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + + /** + * Change to another stream + */ + int (*read_sync)(struct AVFormatContext *, int stream, int64_t timestamp, int64_t *pos); +} AVInputFormat; +/** + * @} + */ + +enum AVStreamParseType { + AVSTREAM_PARSE_NONE, + AVSTREAM_PARSE_FULL, /**< full parsing and repack */ + AVSTREAM_PARSE_HEADERS, /**< Only parse headers, do not repack. */ + AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */ + AVSTREAM_PARSE_FULL_ONCE, /**< full parsing and repack of the first frame only, only implemented for H.264 currently */ + AVSTREAM_PARSE_FULL_RAW=MKTAG(0,'R','A','W'), /**< full parsing and repack with timestamp and position generation by parser for raw + this assumes that each packet in the file contains no demuxer level headers and + just codec level data, otherwise position generation would fail */ +}; + +typedef struct AVIndexEntry { + int64_t pos; + int64_t timestamp; /**< + * Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available + * when seeking to this entry. That means preferable PTS on keyframe based formats. + * But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better + * is known + */ +#define AVINDEX_KEYFRAME 0x0001 +#define AVINDEX_DISCARD_FRAME 0x0002 /** + * Flag is used to indicate which frame should be discarded after decoding. + */ + int flags:2; + int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment). + int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */ +} AVIndexEntry; + +#define AV_DISPOSITION_DEFAULT 0x0001 +#define AV_DISPOSITION_DUB 0x0002 +#define AV_DISPOSITION_ORIGINAL 0x0004 +#define AV_DISPOSITION_COMMENT 0x0008 +#define AV_DISPOSITION_LYRICS 0x0010 +#define AV_DISPOSITION_KARAOKE 0x0020 + +/** + * Track should be used during playback by default. + * Useful for subtitle track that should be displayed + * even when user did not explicitly ask for subtitles. + */ +#define AV_DISPOSITION_FORCED 0x0040 +#define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */ +#define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */ +#define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */ +/** + * The stream is stored in the file as an attached picture/"cover art" (e.g. + * APIC frame in ID3v2). The first (usually only) packet associated with it + * will be returned among the first few packets read from the file unless + * seeking takes place. It can also be accessed at any time in + * AVStream.attached_pic. + */ +#define AV_DISPOSITION_ATTACHED_PIC 0x0400 +/** + * The stream is sparse, and contains thumbnail images, often corresponding + * to chapter markers. Only ever used with AV_DISPOSITION_ATTACHED_PIC. + */ +#define AV_DISPOSITION_TIMED_THUMBNAILS 0x0800 + +typedef struct AVStreamInternal AVStreamInternal; + +/** + * To specify text track kind (different from subtitles default). + */ +#define AV_DISPOSITION_CAPTIONS 0x10000 +#define AV_DISPOSITION_DESCRIPTIONS 0x20000 +#define AV_DISPOSITION_METADATA 0x40000 + +/** + * Options for behavior on timestamp wrap detection. + */ +#define AV_PTS_WRAP_IGNORE 0 ///< ignore the wrap +#define AV_PTS_WRAP_ADD_OFFSET 1 ///< add the format specific offset on wrap detection +#define AV_PTS_WRAP_SUB_OFFSET -1 ///< subtract the format specific offset on wrap detection + +/** + * Stream structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVStream) must not be used outside libav*. + */ +typedef struct AVStream { + int index; /**< stream index in AVFormatContext */ + /** + * Format-specific stream ID. + * decoding: set by libavformat + * encoding: set by the user, replaced by libavformat if left unset + */ + int id; +#if FF_API_LAVF_AVCTX + /** + * @deprecated use the codecpar struct instead + */ + attribute_deprecated + AVCodecContext *codec; +#endif + void *priv_data; + +#if FF_API_LAVF_FRAC + /** + * @deprecated this field is unused + */ + attribute_deprecated + struct AVFrac pts; +#endif + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. + * + * decoding: set by libavformat + * encoding: May be set by the caller before avformat_write_header() to + * provide a hint to the muxer about the desired timebase. In + * avformat_write_header(), the muxer will overwrite this field + * with the timebase that will actually be used for the timestamps + * written into the file (which may or may not be related to the + * user-provided one, depending on the format). + */ + AVRational time_base; + + /** + * Decoding: pts of the first frame of the stream in presentation order, in stream time base. + * Only set this if you are absolutely 100% sure that the value you set + * it to really is the pts of the first frame. + * This may be undefined (AV_NOPTS_VALUE). + * @note The ASF header does NOT contain a correct start_time the ASF + * demuxer must NOT set this. + */ + int64_t start_time; + + /** + * Decoding: duration of the stream, in stream time base. + * If a source file does not specify a duration, but does specify + * a bitrate, this value will be estimated from bitrate and file size. + * + * Encoding: May be set by the caller before avformat_write_header() to + * provide a hint to the muxer about the estimated duration. + */ + int64_t duration; + + int64_t nb_frames; ///< number of frames in this stream if known or 0 + + int disposition; /**< AV_DISPOSITION_* bit field */ + + enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed. + + /** + * sample aspect ratio (0 if unknown) + * - encoding: Set by user. + * - decoding: Set by libavformat. + */ + AVRational sample_aspect_ratio; + + AVDictionary *metadata; + + /** + * Average framerate + * + * - demuxing: May be set by libavformat when creating the stream or in + * avformat_find_stream_info(). + * - muxing: May be set by the caller before avformat_write_header(). + */ + AVRational avg_frame_rate; + + /** + * For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet + * will contain the attached picture. + * + * decoding: set by libavformat, must not be modified by the caller. + * encoding: unused + */ + AVPacket attached_pic; + + /** + * An array of side data that applies to the whole stream (i.e. the + * container does not allow it to change between packets). + * + * There may be no overlap between the side data in this array and side data + * in the packets. I.e. a given side data is either exported by the muxer + * (demuxing) / set by the caller (muxing) in this array, then it never + * appears in the packets, or the side data is exported / sent through + * the packets (always in the first packet where the value becomes known or + * changes), then it does not appear in this array. + * + * - demuxing: Set by libavformat when the stream is created. + * - muxing: May be set by the caller before avformat_write_header(). + * + * Freed by libavformat in avformat_free_context(). + * + * @see av_format_inject_global_side_data() + */ + AVPacketSideData *side_data; + /** + * The number of elements in the AVStream.side_data array. + */ + int nb_side_data; + + /** + * Flags for the user to detect events happening on the stream. Flags must + * be cleared by the user once the event has been handled. + * A combination of AVSTREAM_EVENT_FLAG_*. + */ + int event_flags; +#define AVSTREAM_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata. + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * Internal note: be aware that physically removing these fields + * will break ABI. Replace removed fields with dummy fields, and + * add new fields to AVStreamInternal. + ***************************************************************** + */ + + /** + * Stream information used internally by avformat_find_stream_info() + */ +#define MAX_STD_TIMEBASES (30*12+30+3+6) + struct { + int64_t last_dts; + int64_t duration_gcd; + int duration_count; + int64_t rfps_duration_sum; + double (*duration_error)[2][MAX_STD_TIMEBASES]; + int64_t codec_info_duration; + int64_t codec_info_duration_fields; + + /** + * 0 -> decoder has not been searched for yet. + * >0 -> decoder found + * <0 -> decoder with codec_id == -found_decoder has not been found + */ + int found_decoder; + + int64_t last_duration; + + /** + * Those are used for average framerate estimation. + */ + int64_t fps_first_dts; + int fps_first_dts_idx; + int64_t fps_last_dts; + int fps_last_dts_idx; + + } *info; + + int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */ + + // Timestamp generation support: + /** + * Timestamp corresponding to the last dts sync point. + * + * Initialized when AVCodecParserContext.dts_sync_point >= 0 and + * a DTS is received from the underlying container. Otherwise set to + * AV_NOPTS_VALUE by default. + */ + int64_t first_dts; + int64_t cur_dts; + int64_t last_IP_pts; + int last_IP_duration; + + /** + * Number of packets to buffer for codec probing + */ + int probe_packets; + + /** + * Number of frames that have been demuxed during avformat_find_stream_info() + */ + int codec_info_nb_frames; + + /* av_read_frame() support */ + enum AVStreamParseType need_parsing; + struct AVCodecParserContext *parser; + + /** + * last packet in packet_buffer for this stream when muxing. + */ + struct AVPacketList *last_in_packet_buffer; + AVProbeData probe_data; +#define MAX_REORDER_DELAY 16 + int64_t pts_buffer[MAX_REORDER_DELAY+1]; + + AVIndexEntry *index_entries; /**< Only used if the format does not + support seeking natively. */ + int nb_index_entries; + unsigned int index_entries_allocated_size; + + /** + * Real base framerate of the stream. + * This is the lowest framerate with which all timestamps can be + * represented accurately (it is the least common multiple of all + * framerates in the stream). Note, this value is just a guess! + * For example, if the time base is 1/90000 and all frames have either + * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1. + * + * Code outside avformat should access this field using: + * av_stream_get/set_r_frame_rate(stream) + */ + AVRational r_frame_rate; + + /** + * Stream Identifier + * This is the MPEG-TS stream identifier +1 + * 0 means unknown + */ + int stream_identifier; + + int64_t interleaver_chunk_size; + int64_t interleaver_chunk_duration; + + /** + * stream probing state + * -1 -> probing finished + * 0 -> no probing requested + * rest -> perform probing with request_probe being the minimum score to accept. + * NOT PART OF PUBLIC API + */ + int request_probe; + /** + * Indicates that everything up to the next keyframe + * should be discarded. + */ + int skip_to_keyframe; + + /** + * Number of samples to skip at the start of the frame decoded from the next packet. + */ + int skip_samples; + + /** + * If not 0, the number of samples that should be skipped from the start of + * the stream (the samples are removed from packets with pts==0, which also + * assumes negative timestamps do not happen). + * Intended for use with formats such as mp3 with ad-hoc gapless audio + * support. + */ + int64_t start_skip_samples; + + /** + * If not 0, the first audio sample that should be discarded from the stream. + * This is broken by design (needs global sample count), but can't be + * avoided for broken by design formats such as mp3 with ad-hoc gapless + * audio support. + */ + int64_t first_discard_sample; + + /** + * The sample after last sample that is intended to be discarded after + * first_discard_sample. Works on frame boundaries only. Used to prevent + * early EOF if the gapless info is broken (considered concatenated mp3s). + */ + int64_t last_discard_sample; + + /** + * Number of internally decoded frames, used internally in libavformat, do not access + * its lifetime differs from info which is why it is not in that structure. + */ + int nb_decoded_frames; + + /** + * Timestamp offset added to timestamps before muxing + * NOT PART OF PUBLIC API + */ + int64_t mux_ts_offset; + + /** + * Internal data to check for wrapping of the time stamp + */ + int64_t pts_wrap_reference; + + /** + * Options for behavior, when a wrap is detected. + * + * Defined by AV_PTS_WRAP_ values. + * + * If correction is enabled, there are two possibilities: + * If the first time stamp is near the wrap point, the wrap offset + * will be subtracted, which will create negative time stamps. + * Otherwise the offset will be added. + */ + int pts_wrap_behavior; + + /** + * Internal data to prevent doing update_initial_durations() twice + */ + int update_initial_durations_done; + + /** + * Internal data to generate dts from pts + */ + int64_t pts_reorder_error[MAX_REORDER_DELAY+1]; + uint8_t pts_reorder_error_count[MAX_REORDER_DELAY+1]; + + /** + * Internal data to analyze DTS and detect faulty mpeg streams + */ + int64_t last_dts_for_order_check; + uint8_t dts_ordered; + uint8_t dts_misordered; + + /** + * Internal data to inject global side data + */ + int inject_global_side_data; + + /***************************************************************** + * All fields above this line are not part of the public API. + * Fields below are part of the public API and ABI again. + ***************************************************************** + */ + + /** + * String containing paris of key and values describing recommended encoder configuration. + * Paris are separated by ','. + * Keys are separated from values by '='. + */ + char *recommended_encoder_configuration; + + /** + * display aspect ratio (0 if unknown) + * - encoding: unused + * - decoding: Set by libavformat to calculate sample_aspect_ratio internally + */ + AVRational display_aspect_ratio; + + struct FFFrac *priv_pts; + + /** + * An opaque field for libavformat internal usage. + * Must not be accessed in any way by callers. + */ + AVStreamInternal *internal; + + /* + * Codec parameters associated with this stream. Allocated and freed by + * libavformat in avformat_new_stream() and avformat_free_context() + * respectively. + * + * - demuxing: filled by libavformat on stream creation or in + * avformat_find_stream_info() + * - muxing: filled by the caller before avformat_write_header() + */ + AVCodecParameters *codecpar; +} AVStream; + +AVRational av_stream_get_r_frame_rate(const AVStream *s); +void av_stream_set_r_frame_rate(AVStream *s, AVRational r); +struct AVCodecParserContext *av_stream_get_parser(const AVStream *s); +char* av_stream_get_recommended_encoder_configuration(const AVStream *s); +void av_stream_set_recommended_encoder_configuration(AVStream *s, char *configuration); + +/** + * Returns the pts of the last muxed packet + its duration + * + * the retuned value is undefined when used with a demuxer. + */ +int64_t av_stream_get_end_pts(const AVStream *st); + +#define AV_PROGRAM_RUNNING 1 + +/** + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVProgram) must not be used outside libav*. + */ +typedef struct AVProgram { + int id; + int flags; + enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller + unsigned int *stream_index; + unsigned int nb_stream_indexes; + AVDictionary *metadata; + + int program_num; + int pmt_pid; + int pcr_pid; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int64_t start_time; + int64_t end_time; + + int64_t pts_wrap_reference; ///< reference dts for wrap detection + int pts_wrap_behavior; ///< behavior on wrap detection +} AVProgram; + +#define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present + (streams are added dynamically) */ + +typedef struct AVChapter { + int id; ///< unique ID to identify the chapter + AVRational time_base; ///< time base in which the start/end timestamps are specified + int64_t start, end; ///< chapter start/end time in time_base units + AVDictionary *metadata; +} AVChapter; + + +/** + * Callback used by devices to communicate with application. + */ +typedef int (*av_format_control_message)(struct AVFormatContext *s, int type, + void *data, size_t data_size); + +typedef int (*AVOpenCallback)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * The duration of a video can be estimated through various ways, and this enum can be used + * to know how the duration was estimated. + */ +enum AVDurationEstimationMethod { + AVFMT_DURATION_FROM_PTS, ///< Duration accurately estimated from PTSes + AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration + AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate) +}; + +typedef struct AVFormatInternal AVFormatInternal; + +/** + * Format I/O context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVFormatContext) must not be used outside libav*, use + * avformat_alloc_context() to create an AVFormatContext. + * + * Fields can be accessed through AVOptions (av_opt*), + * the name string used matches the associated command line parameter name and + * can be found in libavformat/options_table.h. + * The AVOption/command line parameter names differ in some cases from the C + * structure field names for historic reasons or brevity. + */ +typedef struct AVFormatContext { + /** + * A class for logging and @ref avoptions. Set by avformat_alloc_context(). + * Exports (de)muxer private options if they exist. + */ + const AVClass *av_class; + + /** + * The input container format. + * + * Demuxing only, set by avformat_open_input(). + */ + struct AVInputFormat *iformat; + + /** + * The output container format. + * + * Muxing only, must be set by the caller before avformat_write_header(). + */ + struct AVOutputFormat *oformat; + + /** + * Format private data. This is an AVOptions-enabled struct + * if and only if iformat/oformat.priv_class is not NULL. + * + * - muxing: set by avformat_write_header() + * - demuxing: set by avformat_open_input() + */ + void *priv_data; + + /** + * I/O context. + * + * - demuxing: either set by the user before avformat_open_input() (then + * the user must close it manually) or set by avformat_open_input(). + * - muxing: set by the user before avformat_write_header(). The caller must + * take care of closing / freeing the IO context. + * + * Do NOT set this field if AVFMT_NOFILE flag is set in + * iformat/oformat.flags. In such a case, the (de)muxer will handle + * I/O in some other way and this field will be NULL. + */ + AVIOContext *pb; + + /* stream info */ + /** + * Flags signalling stream properties. A combination of AVFMTCTX_*. + * Set by libavformat. + */ + int ctx_flags; + + /** + * Number of elements in AVFormatContext.streams. + * + * Set by avformat_new_stream(), must not be modified by any other code. + */ + unsigned int nb_streams; + /** + * A list of all streams in the file. New streams are created with + * avformat_new_stream(). + * + * - demuxing: streams are created by libavformat in avformat_open_input(). + * If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also + * appear in av_read_frame(). + * - muxing: streams are created by the user before avformat_write_header(). + * + * Freed by libavformat in avformat_free_context(). + */ + AVStream **streams; + + /** + * input or output filename + * + * - demuxing: set by avformat_open_input() + * - muxing: may be set by the caller before avformat_write_header() + */ + char filename[1024]; + + /** + * Position of the first frame of the component, in + * AV_TIME_BASE fractional seconds. NEVER set this value directly: + * It is deduced from the AVStream values. + * + * Demuxing only, set by libavformat. + */ + int64_t start_time; + + /** + * Duration of the stream, in AV_TIME_BASE fractional + * seconds. Only set this value if you know none of the individual stream + * durations and also do not set any of them. This is deduced from the + * AVStream values if not set. + * + * Demuxing only, set by libavformat. + */ + int64_t duration; + + /** + * Total stream bitrate in bit/s, 0 if not + * available. Never set it directly if the file_size and the + * duration are known as FFmpeg can compute it automatically. + */ + int64_t bit_rate; + + unsigned int packet_size; + int max_delay; + + /** + * Flags modifying the (de)muxer behaviour. A combination of AVFMT_FLAG_*. + * Set by the user before avformat_open_input() / avformat_write_header(). + */ + int flags; +#define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames. +#define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index. +#define AVFMT_FLAG_NONBLOCK 0x0004 ///< Do not block when reading packets from input. +#define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS +#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container +#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled +#define AVFMT_FLAG_NOBUFFER 0x0040 ///< Do not buffer frames when possible +#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it. +#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted +#define AVFMT_FLAG_FLUSH_PACKETS 0x0200 ///< Flush the AVIOContext every packet. +/** + * When muxing, try to avoid writing any random/volatile data to the output. + * This includes any random IDs, real-time timestamps/dates, muxer version, etc. + * + * This flag is mainly intended for testing. + */ +#define AVFMT_FLAG_BITEXACT 0x0400 +#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload +#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) +#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) +#if FF_API_LAVF_KEEPSIDE_FLAG +#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate. Deprecated, will be the default. +#endif +#define AVFMT_FLAG_FAST_SEEK 0x80000 ///< Enable fast, but inaccurate seeks for some formats +#define AVFMT_FLAG_SHORTEST 0x100000 ///< Stop muxing when the shortest stream stops. +#define AVFMT_FLAG_AUTO_BSF 0x200000 ///< Wait for packet data before writing a header, and add bitstream filters as requested by the muxer + + /** + * Maximum size of the data read from input for determining + * the input container format. + * Demuxing only, set by the caller before avformat_open_input(). + */ + int64_t probesize; + + /** + * Maximum duration (in AV_TIME_BASE units) of the data read + * from input in avformat_find_stream_info(). + * Demuxing only, set by the caller before avformat_find_stream_info(). + * Can be set to 0 to let avformat choose using a heuristic. + */ + int64_t max_analyze_duration; + + const uint8_t *key; + int keylen; + + unsigned int nb_programs; + AVProgram **programs; + + /** + * Forced video codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID video_codec_id; + + /** + * Forced audio codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID audio_codec_id; + + /** + * Forced subtitle codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID subtitle_codec_id; + + /** + * Maximum amount of memory in bytes to use for the index of each stream. + * If the index exceeds this size, entries will be discarded as + * needed to maintain a smaller size. This can lead to slower or less + * accurate seeking (depends on demuxer). + * Demuxers for which a full in-memory index is mandatory will ignore + * this. + * - muxing: unused + * - demuxing: set by user + */ + unsigned int max_index_size; + + /** + * Maximum amount of memory in bytes to use for buffering frames + * obtained from realtime capture devices. + */ + unsigned int max_picture_buffer; + + /** + * Number of chapters in AVChapter array. + * When muxing, chapters are normally written in the file header, + * so nb_chapters should normally be initialized before write_header + * is called. Some muxers (e.g. mov and mkv) can also write chapters + * in the trailer. To write chapters in the trailer, nb_chapters + * must be zero when write_header is called and non-zero when + * write_trailer is called. + * - muxing: set by user + * - demuxing: set by libavformat + */ + unsigned int nb_chapters; + AVChapter **chapters; + + /** + * Metadata that applies to the whole file. + * + * - demuxing: set by libavformat in avformat_open_input() + * - muxing: may be set by the caller before avformat_write_header() + * + * Freed by libavformat in avformat_free_context(). + */ + AVDictionary *metadata; + + /** + * Start time of the stream in real world time, in microseconds + * since the Unix epoch (00:00 1st January 1970). That is, pts=0 in the + * stream was captured at this real world time. + * - muxing: Set by the caller before avformat_write_header(). If set to + * either 0 or AV_NOPTS_VALUE, then the current wall-time will + * be used. + * - demuxing: Set by libavformat. AV_NOPTS_VALUE if unknown. Note that + * the value may become known after some number of frames + * have been received. + */ + int64_t start_time_realtime; + + /** + * The number of frames used for determining the framerate in + * avformat_find_stream_info(). + * Demuxing only, set by the caller before avformat_find_stream_info(). + */ + int fps_probe_size; + + /** + * Error recognition; higher values will detect more errors but may + * misdetect some more or less valid parts as errors. + * Demuxing only, set by the caller before avformat_open_input(). + */ + int error_recognition; + + /** + * Custom interrupt callbacks for the I/O layer. + * + * demuxing: set by the user before avformat_open_input(). + * muxing: set by the user before avformat_write_header() + * (mainly useful for AVFMT_NOFILE formats). The callback + * should also be passed to avio_open2() if it's used to + * open the file. + */ + AVIOInterruptCB interrupt_callback; + + /** + * Flags to enable debugging. + */ + int debug; +#define FF_FDEBUG_TS 0x0001 + + /** + * Maximum buffering duration for interleaving. + * + * To ensure all the streams are interleaved correctly, + * av_interleaved_write_frame() will wait until it has at least one packet + * for each stream before actually writing any packets to the output file. + * When some streams are "sparse" (i.e. there are large gaps between + * successive packets), this can result in excessive buffering. + * + * This field specifies the maximum difference between the timestamps of the + * first and the last packet in the muxing queue, above which libavformat + * will output a packet regardless of whether it has queued a packet for all + * the streams. + * + * Muxing only, set by the caller before avformat_write_header(). + */ + int64_t max_interleave_delta; + + /** + * Allow non-standard and experimental extension + * @see AVCodecContext.strict_std_compliance + */ + int strict_std_compliance; + + /** + * Flags for the user to detect events happening on the file. Flags must + * be cleared by the user once the event has been handled. + * A combination of AVFMT_EVENT_FLAG_*. + */ + int event_flags; +#define AVFMT_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata. +#define AVSTREAM_EVENT_FLAG_HLS_KEY_ERROR 0x0002 ///< The call resulted in fetch encrtypt hls key failed + + /** + * Maximum number of packets to read while waiting for the first timestamp. + * Decoding only. + */ + int max_ts_probe; + + /** + * Avoid negative timestamps during muxing. + * Any value of the AVFMT_AVOID_NEG_TS_* constants. + * Note, this only works when using av_interleaved_write_frame. (interleave_packet_per_dts is in use) + * - muxing: Set by user + * - demuxing: unused + */ + int avoid_negative_ts; +#define AVFMT_AVOID_NEG_TS_AUTO -1 ///< Enabled when required by target format +#define AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE 1 ///< Shift timestamps so they are non negative +#define AVFMT_AVOID_NEG_TS_MAKE_ZERO 2 ///< Shift timestamps so that they start at 0 + + /** + * Transport stream id. + * This will be moved into demuxer private options. Thus no API/ABI compatibility + */ + int ts_id; + + /** + * Audio preload in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user + * - decoding: unused + */ + int audio_preload; + + /** + * Max chunk time in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user + * - decoding: unused + */ + int max_chunk_duration; + + /** + * Max chunk size in bytes + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user + * - decoding: unused + */ + int max_chunk_size; + + /** + * forces the use of wallclock timestamps as pts/dts of packets + * This has undefined results in the presence of B frames. + * - encoding: unused + * - decoding: Set by user + */ + int use_wallclock_as_timestamps; + + /** + * avio flags, used to force AVIO_FLAG_DIRECT. + * - encoding: unused + * - decoding: Set by user + */ + int avio_flags; + + /** + * The duration field can be estimated through various ways, and this field can be used + * to know how the duration was estimated. + * - encoding: unused + * - decoding: Read by user + */ + enum AVDurationEstimationMethod duration_estimation_method; + + /** + * Skip initial bytes when opening stream + * - encoding: unused + * - decoding: Set by user + */ + int64_t skip_initial_bytes; + + /** + * Correct single timestamp overflows + * - encoding: unused + * - decoding: Set by user + */ + unsigned int correct_ts_overflow; + + /** + * Force seeking to any (also non key) frames. + * - encoding: unused + * - decoding: Set by user + */ + int seek2any; + + /** + * Flush the I/O context after each packet. + * - encoding: Set by user + * - decoding: unused + */ + int flush_packets; + + /** + * format probing score. + * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes + * the format. + * - encoding: unused + * - decoding: set by avformat, read by user + */ + int probe_score; + + /** + * number of bytes to read maximally to identify format. + * - encoding: unused + * - decoding: set by user + */ + int format_probesize; + + /** + * ',' separated list of allowed decoders. + * If NULL then all are allowed + * - encoding: unused + * - decoding: set by user + */ + char *codec_whitelist; + + /** + * ',' separated list of allowed demuxers. + * If NULL then all are allowed + * - encoding: unused + * - decoding: set by user + */ + char *format_whitelist; + + /** + * An opaque field for libavformat internal usage. + * Must not be accessed in any way by callers. + */ + AVFormatInternal *internal; + + /** + * IO repositioned flag. + * This is set by avformat when the underlaying IO context read pointer + * is repositioned, for example when doing byte based seeking. + * Demuxers can use the flag to detect such changes. + */ + int io_repositioned; + + /** + * Forced video codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user + */ + AVCodec *video_codec; + + /** + * Forced audio codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user + */ + AVCodec *audio_codec; + + /** + * Forced subtitle codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user + */ + AVCodec *subtitle_codec; + + /** + * Forced data codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user + */ + AVCodec *data_codec; + + /** + * Number of bytes to be written as padding in a metadata header. + * Demuxing: Unused. + * Muxing: Set by user via av_format_set_metadata_header_padding. + */ + int metadata_header_padding; + + /** + * User data. + * This is a place for some private data of the user. + */ + void *opaque; + + /** + * Callback used by devices to communicate with application. + */ + av_format_control_message control_message_cb; + + /** + * Output timestamp offset, in microseconds. + * Muxing: set by user + */ + int64_t output_ts_offset; + + /** + * dump format separator. + * can be ", " or "\n " or anything else + * - muxing: Set by user. + * - demuxing: Set by user. + */ + uint8_t *dump_separator; + + /** + * Forced Data codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID data_codec_id; + +#if FF_API_OLD_OPEN_CALLBACKS + /** + * Called to open further IO contexts when needed for demuxing. + * + * This can be set by the user application to perform security checks on + * the URLs before opening them. + * The function should behave like avio_open2(), AVFormatContext is provided + * as contextual information and to reach AVFormatContext.opaque. + * + * If NULL then some simple checks are used together with avio_open2(). + * + * Must not be accessed directly from outside avformat. + * @See av_format_set_open_cb() + * + * Demuxing: Set by user. + * + * @deprecated Use io_open and io_close. + */ + attribute_deprecated + int (*open_cb)(struct AVFormatContext *s, AVIOContext **p, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options); +#endif + + /** + * ',' separated list of allowed protocols. + * - encoding: unused + * - decoding: set by user + */ + char *protocol_whitelist; + + /* + * A callback for opening new IO streams. + * + * Whenever a muxer or a demuxer needs to open an IO stream (typically from + * avformat_open_input() for demuxers, but for certain formats can happen at + * other times as well), it will call this callback to obtain an IO context. + * + * @param s the format context + * @param pb on success, the newly opened IO context should be returned here + * @param url the url to open + * @param flags a combination of AVIO_FLAG_* + * @param options a dictionary of additional options, with the same + * semantics as in avio_open2() + * @return 0 on success, a negative AVERROR code on failure + * + * @note Certain muxers and demuxers do nesting, i.e. they open one or more + * additional internal format contexts. Thus the AVFormatContext pointer + * passed to this callback may be different from the one facing the caller. + * It will, however, have the same 'opaque' field. + */ + int (*io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, + int flags, AVDictionary **options); + + /** + * A callback for closing the streams opened with AVFormatContext.io_open(). + */ + void (*io_close)(struct AVFormatContext *s, AVIOContext *pb); + + /** + * ',' separated list of disallowed protocols. + * - encoding: unused + * - decoding: set by user + */ + char *protocol_blacklist; + + /** + * The maximum number of streams. + * - encoding: unused + * - decoding: set by user + */ + int max_streams; +} AVFormatContext; + +/** + * Accessors for some AVFormatContext fields. These used to be provided for ABI + * compatibility, and do not need to be used anymore. + */ +int av_format_get_probe_score(const AVFormatContext *s); +AVCodec * av_format_get_video_codec(const AVFormatContext *s); +void av_format_set_video_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_audio_codec(const AVFormatContext *s); +void av_format_set_audio_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_subtitle_codec(const AVFormatContext *s); +void av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_data_codec(const AVFormatContext *s); +void av_format_set_data_codec(AVFormatContext *s, AVCodec *c); +int av_format_get_metadata_header_padding(const AVFormatContext *s); +void av_format_set_metadata_header_padding(AVFormatContext *s, int c); +void * av_format_get_opaque(const AVFormatContext *s); +void av_format_set_opaque(AVFormatContext *s, void *opaque); +av_format_control_message av_format_get_control_message_cb(const AVFormatContext *s); +void av_format_set_control_message_cb(AVFormatContext *s, av_format_control_message callback); +#if FF_API_OLD_OPEN_CALLBACKS +attribute_deprecated AVOpenCallback av_format_get_open_cb(const AVFormatContext *s); +attribute_deprecated void av_format_set_open_cb(AVFormatContext *s, AVOpenCallback callback); +#endif + +/** + * This function will cause global side data to be injected in the next packet + * of each stream as well as after any subsequent seek. + */ +void av_format_inject_global_side_data(AVFormatContext *s); + +/** + * Returns the method used to set ctx->duration. + * + * @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE. + */ +enum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx); + +typedef struct AVPacketList { + AVPacket pkt; + struct AVPacketList *next; +} AVPacketList; + + +/** + * @defgroup lavf_core Core functions + * @ingroup libavf + * + * Functions for querying libavformat capabilities, allocating core structures, + * etc. + * @{ + */ + +/** + * Return the LIBAVFORMAT_VERSION_INT constant. + */ +unsigned avformat_version(void); + +/** + * Return the libavformat build-time configuration. + */ +const char *avformat_configuration(void); + +/** + * Return the libavformat license. + */ +const char *avformat_license(void); + +/** + * Initialize libavformat and register all the muxers, demuxers and + * protocols. If you do not call this function, then you can select + * exactly which formats you want to support. + * + * @see av_register_input_format() + * @see av_register_output_format() + */ +void av_register_all(void); + +void av_register_input_format(AVInputFormat *format); +void av_register_output_format(AVOutputFormat *format); + +/** + * Do global initialization of network components. This is optional, + * but recommended, since it avoids the overhead of implicitly + * doing the setup for each session. + * + * Calling this function will become mandatory if using network + * protocols at some major version bump. + */ +int avformat_network_init(void); + +/** + * Undo the initialization done by avformat_network_init. + */ +int avformat_network_deinit(void); + +/** + * If f is NULL, returns the first registered input format, + * if f is non-NULL, returns the next registered input format after f + * or NULL if f is the last one. + */ +AVInputFormat *av_iformat_next(const AVInputFormat *f); + +/** + * If f is NULL, returns the first registered output format, + * if f is non-NULL, returns the next registered output format after f + * or NULL if f is the last one. + */ +AVOutputFormat *av_oformat_next(const AVOutputFormat *f); + +/** + * Allocate an AVFormatContext. + * avformat_free_context() can be used to free the context and everything + * allocated by the framework within it. + */ +AVFormatContext *avformat_alloc_context(void); + +/** + * Free an AVFormatContext and all its streams. + * @param s context to free + */ +void avformat_free_context(AVFormatContext *s); + +/** + * Get the AVClass for AVFormatContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avformat_get_class(void); + +/** + * Add a new stream to a media file. + * + * When demuxing, it is called by the demuxer in read_header(). If the + * flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also + * be called in read_packet(). + * + * When muxing, should be called by the user before avformat_write_header(). + * + * User is required to call avcodec_close() and avformat_free_context() to + * clean up the allocation by avformat_new_stream(). + * + * @param s media file handle + * @param c If non-NULL, the AVCodecContext corresponding to the new stream + * will be initialized to use this codec. This is needed for e.g. codec-specific + * defaults to be set, so codec should be provided if it is known. + * + * @return newly created stream or NULL on error. + */ +AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c); + +/** + * Wrap an existing array as stream side data. + * + * @param st stream + * @param type side information type + * @param data the side data array. It must be allocated with the av_malloc() + * family of functions. The ownership of the data is transferred to + * st. + * @param size side information size + * @return zero on success, a negative AVERROR code on failure. On failure, + * the stream is unchanged and the data remains owned by the caller. + */ +int av_stream_add_side_data(AVStream *st, enum AVPacketSideDataType type, + uint8_t *data, size_t size); + +/** + * Allocate new information from stream. + * + * @param stream stream + * @param type desired side information type + * @param size side information size + * @return pointer to fresh allocated data or NULL otherwise + */ +uint8_t *av_stream_new_side_data(AVStream *stream, + enum AVPacketSideDataType type, int size); +/** + * Get side information from stream. + * + * @param stream stream + * @param type desired side information type + * @param size pointer for side information size to store (optional) + * @return pointer to data if present or NULL otherwise + */ +#if FF_API_NOCONST_GET_SIDE_DATA +uint8_t *av_stream_get_side_data(AVStream *stream, + enum AVPacketSideDataType type, int *size); +#else +uint8_t *av_stream_get_side_data(const AVStream *stream, + enum AVPacketSideDataType type, int *size); +#endif + +AVProgram *av_new_program(AVFormatContext *s, int id); + +/** + * @} + */ + + +/** + * Allocate an AVFormatContext for an output format. + * avformat_free_context() can be used to free the context and + * everything allocated by the framework within it. + * + * @param *ctx is set to the created format context, or to NULL in + * case of failure + * @param oformat format to use for allocating the context, if NULL + * format_name and filename are used instead + * @param format_name the name of output format to use for allocating the + * context, if NULL filename is used instead + * @param filename the name of the filename to use for allocating the + * context, may be NULL + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + */ +int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat, + const char *format_name, const char *filename); + +/** + * @addtogroup lavf_decoding + * @{ + */ + +/** + * Find AVInputFormat based on the short name of the input format. + */ +AVInputFormat *av_find_input_format(const char *short_name); + +/** + * Guess the file format. + * + * @param pd data to be probed + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + */ +AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened); + +/** + * Guess the file format. + * + * @param pd data to be probed + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_max A probe score larger that this is required to accept a + * detection, the variable is set to the actual detection + * score afterwards. + * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended + * to retry with a larger probe buffer. + */ +AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_ret The score of the best detection. + */ +AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret); + +/** + * Probe a bytestream to determine the input format. Each time a probe returns + * with a score that is too low, the probe buffer size is increased and another + * attempt is made. When the maximum probe size is reached, the input format + * with the highest score is returned. + * + * @param pb the bytestream to probe + * @param fmt the input format is put here + * @param url the url of the stream + * @param logctx the log context + * @param offset the offset within the bytestream to probe from + * @param max_probe_size the maximum probe buffer size (zero for default) + * @return the score in case of success, a negative value corresponding to an + * the maximal score is AVPROBE_SCORE_MAX + * AVERROR code otherwise + */ +int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, + const char *url, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Like av_probe_input_buffer2() but returns 0 on success + */ +int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, + const char *url, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Open an input stream and read the header. The codecs are not opened. + * The stream must be closed with avformat_close_input(). + * + * @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context). + * May be a pointer to NULL, in which case an AVFormatContext is allocated by this + * function and written into ps. + * Note that a user-supplied AVFormatContext will be freed on failure. + * @param url URL of the stream to open. + * @param fmt If non-NULL, this parameter forces a specific input format. + * Otherwise the format is autodetected. + * @param options A dictionary filled with AVFormatContext and demuxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return 0 on success, a negative AVERROR on failure. + * + * @note If you want to use custom IO, preallocate the format context and set its pb field. + */ +int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options); + +int avforamt_open_get_last_event_flags(void); +void avforamt_open_set_last_event_flags(int flags); + +attribute_deprecated +int av_demuxer_open(AVFormatContext *ic); + +/** + * Read packets of a media file to get stream information. This + * is useful for file formats with no headers such as MPEG. This + * function also computes the real framerate in case of MPEG-2 repeat + * frame mode. + * The logical file position is not changed by this function; + * examined packets may be buffered for later processing. + * + * @param ic media file handle + * @param options If non-NULL, an ic.nb_streams long array of pointers to + * dictionaries, where i-th member contains options for + * codec corresponding to i-th stream. + * On return each dictionary will be filled with options that were not found. + * @return >=0 if OK, AVERROR_xxx on error + * + * @note this function isn't guaranteed to open all the codecs, so + * options being non-empty at return is a perfectly normal behavior. + * + * @todo Let the user decide somehow what information is needed so that + * we do not waste time getting stuff the user does not need. + */ +int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options); + +/** + * Find the programs which belong to a given stream. + * + * @param ic media file handle + * @param last the last found program, the search will start after this + * program, or from the beginning if it is NULL + * @param s stream index + * @return the next program which belongs to s, NULL if no program is found or + * the last program is not among the programs of ic. + */ +AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s); + +void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx); + +/** + * Find the "best" stream in the file. + * The best stream is determined according to various heuristics as the most + * likely to be what the user expects. + * If the decoder parameter is non-NULL, av_find_best_stream will find the + * default decoder for the stream's codec; streams for which no decoder can + * be found are ignored. + * + * @param ic media file handle + * @param type stream type: video, audio, subtitles, etc. + * @param wanted_stream_nb user-requested stream number, + * or -1 for automatic selection + * @param related_stream try to find a stream related (eg. in the same + * program) to this one, or -1 if none + * @param decoder_ret if non-NULL, returns the decoder for the + * selected stream + * @param flags flags; none are currently defined + * @return the non-negative stream number in case of success, + * AVERROR_STREAM_NOT_FOUND if no stream with the requested type + * could be found, + * AVERROR_DECODER_NOT_FOUND if streams were found but no decoder + * @note If av_find_best_stream returns successfully and decoder_ret is not + * NULL, then *decoder_ret is guaranteed to be set to a valid AVCodec. + */ +int av_find_best_stream(AVFormatContext *ic, + enum AVMediaType type, + int wanted_stream_nb, + int related_stream, + AVCodec **decoder_ret, + int flags); + +/** + * Return the next frame of a stream. + * This function returns what is stored in the file, and does not validate + * that what is there are valid frames for the decoder. It will split what is + * stored in the file into frames and return one for each call. It will not + * omit invalid data between valid frames so as to give the decoder the maximum + * information possible for decoding. + * + * If pkt->buf is NULL, then the packet is valid until the next + * av_read_frame() or until avformat_close_input(). Otherwise the packet + * is valid indefinitely. In both cases the packet must be freed with + * av_packet_unref when it is no longer needed. For video, the packet contains + * exactly one frame. For audio, it contains an integer number of frames if each + * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames + * have a variable size (e.g. MPEG audio), then it contains one frame. + * + * pkt->pts, pkt->dts and pkt->duration are always set to correct + * values in AVStream.time_base units (and guessed if the format cannot + * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format + * has B-frames, so it is better to rely on pkt->dts if you do not + * decompress the payload. + * + * @return 0 if OK, < 0 on error or end of file + */ +int av_read_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Seek to the keyframe at timestamp. + * 'timestamp' in 'stream_index'. + * + * @param s media file handle + * @param stream_index If stream_index is (-1), a default + * stream is selected, and timestamp is automatically converted + * from AV_TIME_BASE units to the stream specific time_base. + * @param timestamp Timestamp in AVStream.time_base units + * or, if no stream is specified, in AV_TIME_BASE units. + * @param flags flags which select direction and seeking mode + * @return >= 0 on success + */ +int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, + int flags); + +/** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + * + * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and + * are the file position (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames + * in the stream with stream_index (this may not be supported by all demuxers). + * Otherwise all timestamps are in units of the stream selected by stream_index + * or if stream_index is -1, in AV_TIME_BASE units. + * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as + * keyframes (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored. + * + * @param s media file handle + * @param stream_index index of the stream which is used as time base reference + * @param min_ts smallest acceptable timestamp + * @param ts target timestamp + * @param max_ts largest acceptable timestamp + * @param flags flags + * @return >=0 on success, error code otherwise + * + * @note This is part of the new seek API which is still under construction. + * Thus do not use this yet. It may change at any time, do not expect + * ABI compatibility yet! + */ +int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); + +/** + * Discard all internally buffered data. This can be useful when dealing with + * discontinuities in the byte stream. Generally works only with formats that + * can resync. This includes headerless formats like MPEG-TS/TS but should also + * work with NUT, Ogg and in a limited way AVI for example. + * + * The set of streams, the detected duration, stream parameters and codecs do + * not change when calling this function. If you want a complete reset, it's + * better to open a new AVFormatContext. + * + * This does not flush the AVIOContext (s->pb). If necessary, call + * avio_flush(s->pb) before calling this function. + * + * @param s media file handle + * @return >=0 on success, error code otherwise + */ +int avformat_flush(AVFormatContext *s); + +/** + * @prarm s + * @param stream_index + * @param timestamp current playing timestamp + * @param bufpos the latest discard AVPacket pos + * @return 0 on success + */ +int avformat_sync(AVFormatContext *s, int stream_index, int64_t timestamp, int64_t *bufpos); + +/** + * Start playing a network-based stream (e.g. RTSP stream) at the + * current position. + */ +int av_read_play(AVFormatContext *s); + +/** + * Pause a network-based stream (e.g. RTSP stream). + * + * Use av_read_play() to resume it. + */ +int av_read_pause(AVFormatContext *s); + +/** + * Close an opened input AVFormatContext. Free it and all its contents + * and set *s to NULL. + */ +void avformat_close_input(AVFormatContext **s); +/** + * @} + */ + +#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward +#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes +#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes +#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number + +/** + * @addtogroup lavf_encoding + * @{ + */ + +#define AVSTREAM_INIT_IN_WRITE_HEADER 0 ///< stream parameters initialized in avformat_write_header +#define AVSTREAM_INIT_IN_INIT_OUTPUT 1 ///< stream parameters initialized in avformat_init_output + +/** + * Allocate the stream private data and write the stream header to + * an output media file. + * + * @param s Media file handle, must be allocated with avformat_alloc_context(). + * Its oformat field must be set to the desired output format; + * Its pb field must be set to an already opened AVIOContext. + * @param options An AVDictionary filled with AVFormatContext and muxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return AVSTREAM_INIT_IN_WRITE_HEADER on success if the codec had not already been fully initialized in avformat_init, + * AVSTREAM_INIT_IN_INIT_OUTPUT on success if the codec had already been fully initialized in avformat_init, + * negative AVERROR on failure. + * + * @see av_opt_find, av_dict_set, avio_open, av_oformat_next, avformat_init_output. + */ +av_warn_unused_result +int avformat_write_header(AVFormatContext *s, AVDictionary **options); + +/** + * Allocate the stream private data and initialize the codec, but do not write the header. + * May optionally be used before avformat_write_header to initialize stream parameters + * before actually writing the header. + * If using this function, do not pass the same options to avformat_write_header. + * + * @param s Media file handle, must be allocated with avformat_alloc_context(). + * Its oformat field must be set to the desired output format; + * Its pb field must be set to an already opened AVIOContext. + * @param options An AVDictionary filled with AVFormatContext and muxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return AVSTREAM_INIT_IN_WRITE_HEADER on success if the codec requires avformat_write_header to fully initialize, + * AVSTREAM_INIT_IN_INIT_OUTPUT on success if the codec has been fully initialized, + * negative AVERROR on failure. + * + * @see av_opt_find, av_dict_set, avio_open, av_oformat_next, avformat_write_header. + */ +av_warn_unused_result +int avformat_init_output(AVFormatContext *s, AVDictionary **options); + +/** + * Write a packet to an output media file. + * + * This function passes the packet directly to the muxer, without any buffering + * or reordering. The caller is responsible for correctly interleaving the + * packets if the format requires it. Callers that want libavformat to handle + * the interleaving should call av_interleaved_write_frame() instead of this + * function. + * + * @param s media file handle + * @param pkt The packet containing the data to be written. Note that unlike + * av_interleaved_write_frame(), this function does not take + * ownership of the packet passed to it (though some muxers may make + * an internal reference to the input packet). + *
+ * This parameter can be NULL (at any time, not just at the end), in + * order to immediately flush data buffered within the muxer, for + * muxers that buffer up data internally before writing it to the + * output. + *
+ * Packet's @ref AVPacket.stream_index "stream_index" field must be + * set to the index of the corresponding stream in @ref + * AVFormatContext.streams "s->streams". + *
+ * The timestamps (@ref AVPacket.pts "pts", @ref AVPacket.dts "dts") + * must be set to correct values in the stream's timebase (unless the + * output format is flagged with the AVFMT_NOTIMESTAMPS flag, then + * they can be set to AV_NOPTS_VALUE). + * The dts for subsequent packets passed to this function must be strictly + * increasing when compared in their respective timebases (unless the + * output format is flagged with the AVFMT_TS_NONSTRICT, then they + * merely have to be nondecreasing). @ref AVPacket.duration + * "duration") should also be set if known. + * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush + * + * @see av_interleaved_write_frame() + */ +int av_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write a packet to an output media file ensuring correct interleaving. + * + * This function will buffer the packets internally as needed to make sure the + * packets in the output file are properly interleaved in the order of + * increasing dts. Callers doing their own interleaving should call + * av_write_frame() instead of this function. + * + * Using this function instead of av_write_frame() can give muxers advance + * knowledge of future packets, improving e.g. the behaviour of the mp4 + * muxer for VFR content in fragmenting mode. + * + * @param s media file handle + * @param pkt The packet containing the data to be written. + *
+ * If the packet is reference-counted, this function will take + * ownership of this reference and unreference it later when it sees + * fit. + * The caller must not access the data through this reference after + * this function returns. If the packet is not reference-counted, + * libavformat will make a copy. + *
+ * This parameter can be NULL (at any time, not just at the end), to + * flush the interleaving queues. + *
+ * Packet's @ref AVPacket.stream_index "stream_index" field must be + * set to the index of the corresponding stream in @ref + * AVFormatContext.streams "s->streams". + *
+ * The timestamps (@ref AVPacket.pts "pts", @ref AVPacket.dts "dts") + * must be set to correct values in the stream's timebase (unless the + * output format is flagged with the AVFMT_NOTIMESTAMPS flag, then + * they can be set to AV_NOPTS_VALUE). + * The dts for subsequent packets in one stream must be strictly + * increasing (unless the output format is flagged with the + * AVFMT_TS_NONSTRICT, then they merely have to be nondecreasing). + * @ref AVPacket.duration "duration") should also be set if known. + * + * @return 0 on success, a negative AVERROR on error. Libavformat will always + * take care of freeing the packet, even if this function fails. + * + * @see av_write_frame(), AVFormatContext.max_interleave_delta + */ +int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write an uncoded frame to an output media file. + * + * The frame must be correctly interleaved according to the container + * specification; if not, then av_interleaved_write_frame() must be used. + * + * See av_interleaved_write_frame() for details. + */ +int av_write_uncoded_frame(AVFormatContext *s, int stream_index, + AVFrame *frame); + +/** + * Write an uncoded frame to an output media file. + * + * If the muxer supports it, this function makes it possible to write an AVFrame + * structure directly, without encoding it into a packet. + * It is mostly useful for devices and similar special muxers that use raw + * video or PCM data and will not serialize it into a byte stream. + * + * To test whether it is possible to use it with a given muxer and stream, + * use av_write_uncoded_frame_query(). + * + * The caller gives up ownership of the frame and must not access it + * afterwards. + * + * @return >=0 for success, a negative code on error + */ +int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index, + AVFrame *frame); + +/** + * Test whether a muxer supports uncoded frame. + * + * @return >=0 if an uncoded frame can be written to that muxer and stream, + * <0 if not + */ +int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index); + +/** + * Write the stream trailer to an output media file and free the + * file private data. + * + * May only be called after a successful call to avformat_write_header. + * + * @param s media file handle + * @return 0 if OK, AVERROR_xxx on error + */ +int av_write_trailer(AVFormatContext *s); + +/** + * Return the output format in the list of registered output formats + * which best matches the provided parameters, or return NULL if + * there is no match. + * + * @param short_name if non-NULL checks if short_name matches with the + * names of the registered formats + * @param filename if non-NULL checks if filename terminates with the + * extensions of the registered formats + * @param mime_type if non-NULL checks if mime_type matches with the + * MIME type of the registered formats + */ +AVOutputFormat *av_guess_format(const char *short_name, + const char *filename, + const char *mime_type); + +/** + * Guess the codec ID based upon muxer and filename. + */ +enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, + const char *filename, const char *mime_type, + enum AVMediaType type); + +/** + * Get timing information for the data currently output. + * The exact meaning of "currently output" depends on the format. + * It is mostly relevant for devices that have an internal buffer and/or + * work in real time. + * @param s media file handle + * @param stream stream in the media file + * @param[out] dts DTS of the last packet output for the stream, in stream + * time_base units + * @param[out] wall absolute time when that packet whas output, + * in microsecond + * @return 0 if OK, AVERROR(ENOSYS) if the format does not support it + * Note: some formats or devices may not allow to measure dts and wall + * atomically. + */ +int av_get_output_timestamp(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + + +/** + * @} + */ + + +/** + * @defgroup lavf_misc Utility functions + * @ingroup libavf + * @{ + * + * Miscellaneous utility functions related to both muxing and demuxing + * (or neither). + */ + +/** + * Send a nice hexadecimal dump of a buffer to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param buf buffer + * @param size buffer size + * + * @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2 + */ +void av_hex_dump(FILE *f, const uint8_t *buf, int size); + +/** + * Send a nice hexadecimal dump of a buffer to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param buf buffer + * @param size buffer size + * + * @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2 + */ +void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size); + +/** + * Send a nice dump of a packet to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void av_pkt_dump2(FILE *f, const AVPacket *pkt, int dump_payload, const AVStream *st); + + +/** + * Send a nice dump of a packet to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, + const AVStream *st); + +/** + * Get the AVCodecID for the given codec tag tag. + * If no codec id is found returns AV_CODEC_ID_NONE. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param tag codec tag to match to a codec ID + */ +enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag); + +/** + * Get the codec tag for the given codec id id. + * If no codec tag is found returns 0. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param id codec ID to match to a codec tag + */ +unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id); + +/** + * Get the codec tag for the given codec id. + * + * @param tags list of supported codec_id - codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param id codec id that should be searched for in the list + * @param tag A pointer to the found tag + * @return 0 if id was not found in tags, > 0 if it was found + */ +int av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id, + unsigned int *tag); + +int av_find_default_stream_index(AVFormatContext *s); + +/** + * Get the index for a specific timestamp. + * + * @param st stream that the timestamp belongs to + * @param timestamp timestamp to retrieve the index for + * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond + * to the timestamp which is <= the requested one, if backward + * is 0, then it will be >= + * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise + * @return < 0 if no such timestamp could be found + */ +int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags); + +/** + * Add an index entry into a sorted list. Update the entry if the list + * already contains it. + * + * @param timestamp timestamp in the time base of the given stream + */ +int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, + int size, int distance, int flags); + + +/** + * Split a URL string into components. + * + * The pointers to buffers for storing individual components may be null, + * in order to ignore that component. Buffers for components not found are + * set to empty strings. If the port is not found, it is set to a negative + * value. + * + * @param proto the buffer for the protocol + * @param proto_size the size of the proto buffer + * @param authorization the buffer for the authorization + * @param authorization_size the size of the authorization buffer + * @param hostname the buffer for the host name + * @param hostname_size the size of the hostname buffer + * @param port_ptr a pointer to store the port number in + * @param path the buffer for the path + * @param path_size the size of the path buffer + * @param url the URL to split + */ +void av_url_split(char *proto, int proto_size, + char *authorization, int authorization_size, + char *hostname, int hostname_size, + int *port_ptr, + char *path, int path_size, + const char *url); + + +/** + * Print detailed information about the input or output format, such as + * duration, bitrate, streams, container, programs, metadata, side data, + * codec and time base. + * + * @param ic the context to analyze + * @param index index of the stream to dump information about + * @param url the URL to print, such as source or destination file + * @param is_output Select whether the specified context is an input(0) or output(1) + */ +void av_dump_format(AVFormatContext *ic, + int index, + const char *url, + int is_output); + + +#define AV_FRAME_FILENAME_FLAGS_MULTIPLE 1 ///< Allow multiple %d + +/** + * Return in 'buf' the path with '%d' replaced by a number. + * + * Also handles the '%0nd' format where 'n' is the total number + * of digits and '%%'. + * + * @param buf destination buffer + * @param buf_size destination buffer size + * @param path numbered sequence string + * @param number frame number + * @param flags AV_FRAME_FILENAME_FLAGS_* + * @return 0 if OK, -1 on format error + */ +int av_get_frame_filename2(char *buf, int buf_size, + const char *path, int number, int flags); + +int av_get_frame_filename(char *buf, int buf_size, + const char *path, int number); + +/** + * Check whether filename actually is a numbered sequence generator. + * + * @param filename possible numbered sequence string + * @return 1 if a valid numbered sequence string, 0 otherwise + */ +int av_filename_number_test(const char *filename); + +/** + * Generate an SDP for an RTP session. + * + * Note, this overwrites the id values of AVStreams in the muxer contexts + * for getting unique dynamic payload types. + * + * @param ac array of AVFormatContexts describing the RTP streams. If the + * array is composed by only one context, such context can contain + * multiple AVStreams (one AVStream per RTP stream). Otherwise, + * all the contexts in the array (an AVCodecContext per RTP stream) + * must contain only one AVStream. + * @param n_files number of AVCodecContexts contained in ac + * @param buf buffer where the SDP will be stored (must be allocated by + * the caller) + * @param size the size of the buffer + * @return 0 if OK, AVERROR_xxx on error + */ +int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size); + +/** + * Return a positive value if the given filename has one of the given + * extensions, 0 otherwise. + * + * @param filename file name to check against the given extensions + * @param extensions a comma-separated list of filename extensions + */ +int av_match_ext(const char *filename, const char *extensions); + +/** + * Test if the given container can store a codec. + * + * @param ofmt container to check for compatibility + * @param codec_id codec to potentially store in container + * @param std_compliance standards compliance level, one of FF_COMPLIANCE_* + * + * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot. + * A negative number if this information is not available. + */ +int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id, + int std_compliance); + +/** + * @defgroup riff_fourcc RIFF FourCCs + * @{ + * Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are + * meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the + * following code: + * @code + * uint32_t tag = MKTAG('H', '2', '6', '4'); + * const struct AVCodecTag *table[] = { avformat_get_riff_video_tags(), 0 }; + * enum AVCodecID id = av_codec_get_id(table, tag); + * @endcode + */ +/** + * @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID. + */ +const struct AVCodecTag *avformat_get_riff_video_tags(void); +/** + * @return the table mapping RIFF FourCCs for audio to AVCodecID. + */ +const struct AVCodecTag *avformat_get_riff_audio_tags(void); +/** + * @return the table mapping MOV FourCCs for video to libavcodec AVCodecID. + */ +const struct AVCodecTag *avformat_get_mov_video_tags(void); +/** + * @return the table mapping MOV FourCCs for audio to AVCodecID. + */ +const struct AVCodecTag *avformat_get_mov_audio_tags(void); + +/** + * @} + */ + +/** + * Guess the sample aspect ratio of a frame, based on both the stream and the + * frame aspect ratio. + * + * Since the frame aspect ratio is set by the codec but the stream aspect ratio + * is set by the demuxer, these two may not be equal. This function tries to + * return the value that you should use if you would like to display the frame. + * + * Basic logic is to use the stream aspect ratio if it is set to something sane + * otherwise use the frame aspect ratio. This way a container setting, which is + * usually easy to modify can override the coded value in the frames. + * + * @param format the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame with the aspect ratio to be determined + * @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea + */ +AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame); + +/** + * Guess the frame rate, based on both the container and codec information. + * + * @param ctx the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame for which the frame rate should be determined, may be NULL + * @return the guessed (valid) frame rate, 0/1 if no idea + */ +AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame); + +/** + * Check if the stream st contained in s is matched by the stream specifier + * spec. + * + * See the "stream specifiers" chapter in the documentation for the syntax + * of spec. + * + * @return >0 if st is matched by spec; + * 0 if st is not matched by spec; + * AVERROR code if spec is invalid + * + * @note A stream specifier can match several streams in the format. + */ +int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, + const char *spec); + +int avformat_queue_attached_pictures(AVFormatContext *s); + +/** + * Apply a list of bitstream filters to a packet. + * + * @param codec AVCodecContext, usually from an AVStream + * @param pkt the packet to apply filters to. If, on success, the returned + * packet has size == 0 and side_data_elems == 0, it indicates that + * the packet should be dropped + * @param bsfc a NULL-terminated list of filters to apply + * @return >=0 on success; + * AVERROR code on failure + */ +#if FF_API_OLD_BSF +attribute_deprecated +int av_apply_bitstream_filters(AVCodecContext *codec, AVPacket *pkt, + AVBitStreamFilterContext *bsfc); +#endif + +enum AVTimebaseSource { + AVFMT_TBCF_AUTO = -1, + AVFMT_TBCF_DECODER, + AVFMT_TBCF_DEMUXER, +#if FF_API_R_FRAME_RATE + AVFMT_TBCF_R_FRAMERATE, +#endif +}; + +/** + * Transfer internal timing information from one stream to another. + * + * This function is useful when doing stream copy. + * + * @param ofmt target output format for ost + * @param ost output stream which needs timings copy and adjustments + * @param ist reference input stream to copy timings from + * @param copy_tb define from where the stream codec timebase needs to be imported + */ +int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, + AVStream *ost, const AVStream *ist, + enum AVTimebaseSource copy_tb); + +/** + * Get the internal codec timebase from a stream. + * + * @param st input stream to extract the timebase from + */ +AVRational av_stream_get_codec_timebase(const AVStream *st); + +/** + * @} + */ + +#endif /* AVFORMAT_AVFORMAT_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/avio.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/avio.h new file mode 100644 index 0000000..6f4ed84 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/avio.h @@ -0,0 +1,827 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef AVFORMAT_AVIO_H +#define AVFORMAT_AVIO_H + +/** + * @file + * @ingroup lavf_io + * Buffered I/O operations + */ + +#include + +#include "libavutil/common.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "libavformat/version.h" + +/** + * Seeking works like for a local file. + */ +#define AVIO_SEEKABLE_NORMAL (1 << 0) + +/** + * Seeking by timestamp with avio_seek_time() is possible. + */ +#define AVIO_SEEKABLE_TIME (1 << 1) + +/** + * Callback for checking whether to abort blocking functions. + * AVERROR_EXIT is returned in this case by the interrupted + * function. During blocking operations, callback is called with + * opaque as parameter. If the callback returns 1, the + * blocking operation will be aborted. + * + * No members can be added to this struct without a major bump, if + * new elements have been added after this struct in AVFormatContext + * or AVIOContext. + */ +typedef struct AVIOInterruptCB { + int (*callback)(void*); + void *opaque; +} AVIOInterruptCB; + +/** + * Directory entry types. + */ +enum AVIODirEntryType { + AVIO_ENTRY_UNKNOWN, + AVIO_ENTRY_BLOCK_DEVICE, + AVIO_ENTRY_CHARACTER_DEVICE, + AVIO_ENTRY_DIRECTORY, + AVIO_ENTRY_NAMED_PIPE, + AVIO_ENTRY_SYMBOLIC_LINK, + AVIO_ENTRY_SOCKET, + AVIO_ENTRY_FILE, + AVIO_ENTRY_SERVER, + AVIO_ENTRY_SHARE, + AVIO_ENTRY_WORKGROUP, +}; + +/** + * Describes single entry of the directory. + * + * Only name and type fields are guaranteed be set. + * Rest of fields are protocol or/and platform dependent and might be unknown. + */ +typedef struct AVIODirEntry { + char *name; /**< Filename */ + int type; /**< Type of the entry */ + int utf8; /**< Set to 1 when name is encoded with UTF-8, 0 otherwise. + Name can be encoded with UTF-8 even though 0 is set. */ + int64_t size; /**< File size in bytes, -1 if unknown. */ + int64_t modification_timestamp; /**< Time of last modification in microseconds since unix + epoch, -1 if unknown. */ + int64_t access_timestamp; /**< Time of last access in microseconds since unix epoch, + -1 if unknown. */ + int64_t status_change_timestamp; /**< Time of last status change in microseconds since unix + epoch, -1 if unknown. */ + int64_t user_id; /**< User ID of owner, -1 if unknown. */ + int64_t group_id; /**< Group ID of owner, -1 if unknown. */ + int64_t filemode; /**< Unix file mode, -1 if unknown. */ +} AVIODirEntry; + +typedef struct AVIODirContext { + struct URLContext *url_context; +} AVIODirContext; + +/** + * Different data types that can be returned via the AVIO + * write_data_type callback. + */ +enum AVIODataMarkerType { + /** + * Header data; this needs to be present for the stream to be decodeable. + */ + AVIO_DATA_MARKER_HEADER, + /** + * A point in the output bytestream where a decoder can start decoding + * (i.e. a keyframe). A demuxer/decoder given the data flagged with + * AVIO_DATA_MARKER_HEADER, followed by any AVIO_DATA_MARKER_SYNC_POINT, + * should give decodeable results. + */ + AVIO_DATA_MARKER_SYNC_POINT, + /** + * A point in the output bytestream where a demuxer can start parsing + * (for non self synchronizing bytestream formats). That is, any + * non-keyframe packet start point. + */ + AVIO_DATA_MARKER_BOUNDARY_POINT, + /** + * This is any, unlabelled data. It can either be a muxer not marking + * any positions at all, it can be an actual boundary/sync point + * that the muxer chooses not to mark, or a later part of a packet/fragment + * that is cut into multiple write callbacks due to limited IO buffer size. + */ + AVIO_DATA_MARKER_UNKNOWN, + /** + * Trailer data, which doesn't contain actual content, but only for + * finalizing the output file. + */ + AVIO_DATA_MARKER_TRAILER +}; + +/** + * Bytestream IO Context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVIOContext) must not be used outside libav*. + * + * @note None of the function pointers in AVIOContext should be called + * directly, they should only be set by the client application + * when implementing custom I/O. Normally these are set to the + * function pointers specified in avio_alloc_context() + */ +typedef struct AVIOContext { + /** + * A class for private options. + * + * If this AVIOContext is created by avio_open2(), av_class is set and + * passes the options down to protocols. + * + * If this AVIOContext is manually allocated, then av_class may be set by + * the caller. + * + * warning -- this field can be NULL, be sure to not pass this AVIOContext + * to any av_opt_* functions in that case. + */ + const AVClass *av_class; + + /* + * The following shows the relationship between buffer, buf_ptr, buf_end, buf_size, + * and pos, when reading and when writing (since AVIOContext is used for both): + * + ********************************************************************************** + * READING + ********************************************************************************** + * + * | buffer_size | + * |---------------------------------------| + * | | + * + * buffer buf_ptr buf_end + * +---------------+-----------------------+ + * |/ / / / / / / /|/ / / / / / /| | + * read buffer: |/ / consumed / | to be read /| | + * |/ / / / / / / /|/ / / / / / /| | + * +---------------+-----------------------+ + * + * pos + * +-------------------------------------------+-----------------+ + * input file: | | | + * +-------------------------------------------+-----------------+ + * + * + ********************************************************************************** + * WRITING + ********************************************************************************** + * + * | buffer_size | + * |-------------------------------| + * | | + * + * buffer buf_ptr buf_end + * +-------------------+-----------+ + * |/ / / / / / / / / /| | + * write buffer: | / to be flushed / | | + * |/ / / / / / / / / /| | + * +-------------------+-----------+ + * + * pos + * +--------------------------+-----------------------------------+ + * output file: | | | + * +--------------------------+-----------------------------------+ + * + */ + unsigned char *buffer; /**< Start of the buffer. */ + int buffer_size; /**< Maximum buffer size */ + unsigned char *buf_ptr; /**< Current position in the buffer */ + unsigned char *buf_end; /**< End of the data, may be less than + buffer+buffer_size if the read function returned + less data than requested, e.g. for streams where + no more data has been received yet. */ + void *opaque; /**< A private pointer, passed to the read/write/seek/... + functions. */ + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size); + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size); + int64_t (*seek)(void *opaque, int64_t offset, int whence); + int64_t pos; /**< position in the file of the current buffer */ + int must_flush; /**< true if the next seek should flush */ + int eof_reached; /**< true if eof reached */ + int write_flag; /**< true if open for writing */ + int max_packet_size; + unsigned long checksum; + unsigned char *checksum_ptr; + unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size); + int error; /**< contains the error code or 0 if no error happened */ + /** + * Pause or resume playback for network streaming protocols - e.g. MMS. + */ + int (*read_pause)(void *opaque, int pause); + /** + * Seek to a given timestamp in stream with the specified stream_index. + * Needed for some network streaming protocols which don't support seeking + * to byte position. + */ + int64_t (*read_seek)(void *opaque, int stream_index, + int64_t timestamp, int flags); + /** + * A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable. + */ + int seekable; + + /** + * max filesize, used to limit allocations + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t maxsize; + + /** + * avio_read and avio_write should if possible be satisfied directly + * instead of going through a buffer, and avio_seek will always + * call the underlying seek function directly. + */ + int direct; + + /** + * Bytes read statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t bytes_read; + + /** + * seek statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int seek_count; + + /** + * writeout statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int writeout_count; + + /** + * Original buffer size + * used internally after probing and ensure seekback to reset the buffer size + * This field is internal to libavformat and access from outside is not allowed. + */ + int orig_buffer_size; + + /** + * Threshold to favor readahead over seek. + * This is current internal only, do not use from outside. + */ + int short_seek_threshold; + + /** + * ',' separated list of allowed protocols. + */ + const char *protocol_whitelist; + + /** + * ',' separated list of disallowed protocols. + */ + const char *protocol_blacklist; + + /** + * A callback that is used instead of write_packet. + */ + int (*write_data_type)(void *opaque, uint8_t *buf, int buf_size, + enum AVIODataMarkerType type, int64_t time); + /** + * If set, don't call write_data_type separately for AVIO_DATA_MARKER_BOUNDARY_POINT, + * but ignore them and treat them as AVIO_DATA_MARKER_UNKNOWN (to avoid needlessly + * small chunks of data returned from the callback). + */ + int ignore_boundary_point; + + /** + * Internal, not meant to be used from outside of AVIOContext. + */ + enum AVIODataMarkerType current_type; + int64_t last_time; + + /** + * A callback that is used instead of short_seek_threshold. + * This is current internal only, do not use from outside. + */ + int (*short_seek_get)(void *opaque); +} AVIOContext; + +/** + * Return the name of the protocol that will handle the passed URL. + * + * NULL is returned if no protocol could be found for the given URL. + * + * @return Name of the protocol or NULL. + */ +const char *avio_find_protocol_name(const char *url); + +/** + * Return AVIO_FLAG_* access flags corresponding to the access permissions + * of the resource in url, or a negative value corresponding to an + * AVERROR code in case of failure. The returned access flags are + * masked by the value in flags. + * + * @note This function is intrinsically unsafe, in the sense that the + * checked resource may change its existence or permission status from + * one call to another. Thus you should not trust the returned value, + * unless you are sure that no other processes are accessing the + * checked resource. + */ +int avio_check(const char *url, int flags); + +/** + * Move or rename a resource. + * + * @note url_src and url_dst should share the same protocol and authority. + * + * @param url_src url to resource to be moved + * @param url_dst new url to resource if the operation succeeded + * @return >=0 on success or negative on error. + */ +int avpriv_io_move(const char *url_src, const char *url_dst); + +/** + * Delete a resource. + * + * @param url resource to be deleted. + * @return >=0 on success or negative on error. + */ +int avpriv_io_delete(const char *url); + +/** + * Open directory for reading. + * + * @param s directory read context. Pointer to a NULL pointer must be passed. + * @param url directory to be listed. + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dictionary + * containing options that were not found. May be NULL. + * @return >=0 on success or negative on error. + */ +int avio_open_dir(AVIODirContext **s, const char *url, AVDictionary **options); + +/** + * Get next directory entry. + * + * Returned entry must be freed with avio_free_directory_entry(). In particular + * it may outlive AVIODirContext. + * + * @param s directory read context. + * @param[out] next next entry or NULL when no more entries. + * @return >=0 on success or negative on error. End of list is not considered an + * error. + */ +int avio_read_dir(AVIODirContext *s, AVIODirEntry **next); + +/** + * Close directory. + * + * @note Entries created using avio_read_dir() are not deleted and must be + * freeded with avio_free_directory_entry(). + * + * @param s directory read context. + * @return >=0 on success or negative on error. + */ +int avio_close_dir(AVIODirContext **s); + +/** + * Free entry allocated by avio_read_dir(). + * + * @param entry entry to be freed. + */ +void avio_free_directory_entry(AVIODirEntry **entry); + +/** + * Allocate and initialize an AVIOContext for buffered I/O. It must be later + * freed with av_free(). + * + * @param buffer Memory block for input/output operations via AVIOContext. + * The buffer must be allocated with av_malloc() and friends. + * It may be freed and replaced with a new buffer by libavformat. + * AVIOContext.buffer holds the buffer currently in use, + * which must be later freed with av_free(). + * @param buffer_size The buffer size is very important for performance. + * For protocols with fixed blocksize it should be set to this blocksize. + * For others a typical size is a cache page, e.g. 4kb. + * @param write_flag Set to 1 if the buffer should be writable, 0 otherwise. + * @param opaque An opaque pointer to user-specific data. + * @param read_packet A function for refilling the buffer, may be NULL. + * @param write_packet A function for writing the buffer contents, may be NULL. + * The function may not change the input buffers content. + * @param seek A function for seeking to specified byte position, may be NULL. + * + * @return Allocated AVIOContext or NULL on failure. + */ +AVIOContext *avio_alloc_context( + unsigned char *buffer, + int buffer_size, + int write_flag, + void *opaque, + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size), + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size), + int64_t (*seek)(void *opaque, int64_t offset, int whence)); + +void avio_w8(AVIOContext *s, int b); +void avio_write(AVIOContext *s, const unsigned char *buf, int size); +void avio_wl64(AVIOContext *s, uint64_t val); +void avio_wb64(AVIOContext *s, uint64_t val); +void avio_wl32(AVIOContext *s, unsigned int val); +void avio_wb32(AVIOContext *s, unsigned int val); +void avio_wl24(AVIOContext *s, unsigned int val); +void avio_wb24(AVIOContext *s, unsigned int val); +void avio_wl16(AVIOContext *s, unsigned int val); +void avio_wb16(AVIOContext *s, unsigned int val); + +/** + * Write a NULL-terminated string. + * @return number of bytes written. + */ +int avio_put_str(AVIOContext *s, const char *str); + +/** + * Convert an UTF-8 string to UTF-16LE and write it. + * @param s the AVIOContext + * @param str NULL-terminated UTF-8 string + * + * @return number of bytes written. + */ +int avio_put_str16le(AVIOContext *s, const char *str); + +/** + * Convert an UTF-8 string to UTF-16BE and write it. + * @param s the AVIOContext + * @param str NULL-terminated UTF-8 string + * + * @return number of bytes written. + */ +int avio_put_str16be(AVIOContext *s, const char *str); + +/** + * Mark the written bytestream as a specific type. + * + * Zero-length ranges are omitted from the output. + * + * @param time the stream time the current bytestream pos corresponds to + * (in AV_TIME_BASE units), or AV_NOPTS_VALUE if unknown or not + * applicable + * @param type the kind of data written starting at the current pos + */ +void avio_write_marker(AVIOContext *s, int64_t time, enum AVIODataMarkerType type); + +/** + * ORing this as the "whence" parameter to a seek function causes it to + * return the filesize without seeking anywhere. Supporting this is optional. + * If it is not supported then the seek function will return <0. + */ +#define AVSEEK_SIZE 0x10000 + +/** + * Passing this flag as the "whence" parameter to a seek function causes it to + * seek by any means (like reopening and linear reading) or other normally unreasonable + * means that can be extremely slow. + * This may be ignored by the seek code. + */ +#define AVSEEK_FORCE 0x20000 + +/** + * fseek() equivalent for AVIOContext. + * @return new position or AVERROR. + */ +int64_t avio_seek(AVIOContext *s, int64_t offset, int whence); + +/** + * Skip given number of bytes forward + * @return new position or AVERROR. + */ +int64_t avio_skip(AVIOContext *s, int64_t offset); + +/** + * ftell() equivalent for AVIOContext. + * @return position or AVERROR. + */ +static av_always_inline int64_t avio_tell(AVIOContext *s) +{ + return avio_seek(s, 0, SEEK_CUR); +} + +/** + * Get the filesize. + * @return filesize or AVERROR + */ +int64_t avio_size(AVIOContext *s); + +/** + * feof() equivalent for AVIOContext. + * @return non zero if and only if end of file + */ +int avio_feof(AVIOContext *s); +#if FF_API_URL_FEOF +/** + * @deprecated use avio_feof() + */ +attribute_deprecated +int url_feof(AVIOContext *s); +#endif + +/** @warning Writes up to 4 KiB per call */ +int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Force flushing of buffered data. + * + * For write streams, force the buffered data to be immediately written to the output, + * without to wait to fill the internal buffer. + * + * For read streams, discard all currently buffered data, and advance the + * reported file position to that of the underlying stream. This does not + * read new data, and does not perform any seeks. + */ +void avio_flush(AVIOContext *s); + +/** + * Read size bytes from AVIOContext into buf. + * @return number of bytes read or AVERROR + */ +int avio_read(AVIOContext *s, unsigned char *buf, int size); + +/** + * @name Functions for reading from AVIOContext + * @{ + * + * @note return 0 if EOF, so you cannot use it if EOF handling is + * necessary + */ +int avio_r8 (AVIOContext *s); +unsigned int avio_rl16(AVIOContext *s); +unsigned int avio_rl24(AVIOContext *s); +unsigned int avio_rl32(AVIOContext *s); +uint64_t avio_rl64(AVIOContext *s); +unsigned int avio_rb16(AVIOContext *s); +unsigned int avio_rb24(AVIOContext *s); +unsigned int avio_rb32(AVIOContext *s); +uint64_t avio_rb64(AVIOContext *s); +/** + * @} + */ + +/** + * Read a string from pb into buf. The reading will terminate when either + * a NULL character was encountered, maxlen bytes have been read, or nothing + * more can be read from pb. The result is guaranteed to be NULL-terminated, it + * will be truncated if buf is too small. + * Note that the string is not interpreted or validated in any way, it + * might get truncated in the middle of a sequence for multi-byte encodings. + * + * @return number of bytes read (is always <= maxlen). + * If reading ends on EOF or error, the return value will be one more than + * bytes actually read. + */ +int avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen); + +/** + * Read a UTF-16 string from pb and convert it to UTF-8. + * The reading will terminate when either a null or invalid character was + * encountered or maxlen bytes have been read. + * @return number of bytes read (is always <= maxlen) + */ +int avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen); +int avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen); + + +/** + * @name URL open modes + * The flags argument to avio_open must be one of the following + * constants, optionally ORed with other flags. + * @{ + */ +#define AVIO_FLAG_READ 1 /**< read-only */ +#define AVIO_FLAG_WRITE 2 /**< write-only */ +#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */ +/** + * @} + */ + +/** + * Use non-blocking mode. + * If this flag is set, operations on the context will return + * AVERROR(EAGAIN) if they can not be performed immediately. + * If this flag is not set, operations on the context will never return + * AVERROR(EAGAIN). + * Note that this flag does not affect the opening/connecting of the + * context. Connecting a protocol will always block if necessary (e.g. on + * network protocols) but never hang (e.g. on busy devices). + * Warning: non-blocking protocols is work-in-progress; this flag may be + * silently ignored. + */ +#define AVIO_FLAG_NONBLOCK 8 + +/** + * Use direct mode. + * avio_read and avio_write should if possible be satisfied directly + * instead of going through a buffer, and avio_seek will always + * call the underlying seek function directly. + */ +#define AVIO_FLAG_DIRECT 0x8000 + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param url resource to access + * @param flags flags which control how the resource indicated by url + * is to be opened + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int avio_open(AVIOContext **s, const char *url, int flags); + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param url resource to access + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb an interrupt callback to be used at the protocols level + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int avio_open2(AVIOContext **s, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Close the resource accessed by the AVIOContext s and free it. + * This function can only be used if s was opened by avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see avio_closep + */ +int avio_close(AVIOContext *s); + +/** + * Close the resource accessed by the AVIOContext *s, free it + * and set the pointer pointing to it to NULL. + * This function can only be used if s was opened by avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see avio_close + */ +int avio_closep(AVIOContext **s); + + +/** + * Open a write only memory stream. + * + * @param s new IO context + * @return zero if no error. + */ +int avio_open_dyn_buf(AVIOContext **s); + +/** + * Return the written size and a pointer to the buffer. + * The AVIOContext stream is left intact. + * The buffer must NOT be freed. + * No padding is added to the buffer. + * + * @param s IO context + * @param pbuffer pointer to a byte buffer + * @return the length of the byte buffer + */ +int avio_get_dyn_buf(AVIOContext *s, uint8_t **pbuffer); + +/** + * Return the written size and a pointer to the buffer. The buffer + * must be freed with av_free(). + * Padding of AV_INPUT_BUFFER_PADDING_SIZE is added to the buffer. + * + * @param s IO context + * @param pbuffer pointer to a byte buffer + * @return the length of the byte buffer + */ +int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer); + +/** + * Iterate through names of available protocols. + * + * @param opaque A private pointer representing current protocol. + * It must be a pointer to NULL on first iteration and will + * be updated by successive calls to avio_enum_protocols. + * @param output If set to 1, iterate over output protocols, + * otherwise over input protocols. + * + * @return A static string containing the name of current protocol or NULL + */ +const char *avio_enum_protocols(void **opaque, int output); + +/** + * Pause and resume playing - only meaningful if using a network streaming + * protocol (e.g. MMS). + * + * @param h IO context from which to call the read_pause function pointer + * @param pause 1 for pause, 0 for resume + */ +int avio_pause(AVIOContext *h, int pause); + +/** + * Seek to a given timestamp relative to some component stream. + * Only meaningful if using a network streaming protocol (e.g. MMS.). + * + * @param h IO context from which to call the seek function pointers + * @param stream_index The stream index that the timestamp is relative to. + * If stream_index is (-1) the timestamp should be in AV_TIME_BASE + * units from the beginning of the presentation. + * If a stream_index >= 0 is used and the protocol does not support + * seeking based on component streams, the call will fail. + * @param timestamp timestamp in AVStream.time_base units + * or if there is no stream specified then in AV_TIME_BASE units. + * @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE + * and AVSEEK_FLAG_ANY. The protocol may silently ignore + * AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will + * fail if used and not supported. + * @return >= 0 on success + * @see AVInputFormat::read_seek + */ +int64_t avio_seek_time(AVIOContext *h, int stream_index, + int64_t timestamp, int flags); + +/* Avoid a warning. The header can not be included because it breaks c++. */ +struct AVBPrint; + +/** + * Read contents of h into print buffer, up to max_size bytes, or up to EOF. + * + * @return 0 for success (max_size bytes read or EOF reached), negative error + * code otherwise + */ +int avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size); + +/** + * Accept and allocate a client context on a server context. + * @param s the server context + * @param c the client context, must be unallocated + * @return >= 0 on success or a negative value corresponding + * to an AVERROR on failure + */ +int avio_accept(AVIOContext *s, AVIOContext **c); + +/** + * Perform one step of the protocol handshake to accept a new client. + * This function must be called on a client returned by avio_accept() before + * using it as a read/write context. + * It is separate from avio_accept() because it may block. + * A step of the handshake is defined by places where the application may + * decide to change the proceedings. + * For example, on a protocol with a request header and a reply header, each + * one can constitute a step because the application may use the parameters + * from the request to change parameters in the reply; or each individual + * chunk of the request can constitute a step. + * If the handshake is already finished, avio_handshake() does nothing and + * returns 0 immediately. + * + * @param c the client context to perform the handshake on + * @return 0 on a complete and successful handshake + * > 0 if the handshake progressed, but is not complete + * < 0 for an AVERROR code + */ +int avio_handshake(AVIOContext *c); +#endif /* AVFORMAT_AVIO_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/hevc.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/hevc.h new file mode 100644 index 0000000..796eaf4 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/hevc.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2014 Tim Walker + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * internal header for HEVC (de)muxer utilities + */ + +#ifndef AVFORMAT_HEVC_H +#define AVFORMAT_HEVC_H + +#include +#include "avio.h" + +/** + * Writes Annex B formatted HEVC NAL units to the provided AVIOContext. + * + * The NAL units are converted to an MP4-compatible format (start code prefixes + * are replaced by 4-byte size fields, as per ISO/IEC 14496-15). + * + * If filter_ps is non-zero, any HEVC parameter sets found in the input will be + * discarded, and *ps_count will be set to the number of discarded PS NAL units. + * + * @param pb address of the AVIOContext where the data shall be written + * @param buf_in address of the buffer holding the input data + * @param size size (in bytes) of the input buffer + * @param filter_ps whether to write parameter set NAL units to the output (0) + * or to discard them (non-zero) + * @param ps_count address of the variable where the number of discarded + * parameter set NAL units shall be written, may be NULL + * @return the amount (in bytes) of data written in case of success, a negative + * value corresponding to an AVERROR code in case of failure + */ +int ff_hevc_annexb2mp4(AVIOContext *pb, const uint8_t *buf_in, + int size, int filter_ps, int *ps_count); + +/** + * Writes Annex B formatted HEVC NAL units to a data buffer. + * + * The NAL units are converted to an MP4-compatible format (start code prefixes + * are replaced by 4-byte size fields, as per ISO/IEC 14496-15). + * + * If filter_ps is non-zero, any HEVC parameter sets found in the input will be + * discarded, and *ps_count will be set to the number of discarded PS NAL units. + * + * On output, *size holds the size (in bytes) of the output data buffer. + * + * @param buf_in address of the buffer holding the input data + * @param size address of the variable holding the size (in bytes) of the input + * buffer (on input) and of the output buffer (on output) + * @param buf_out address of the variable holding the address of the output + * buffer + * @param filter_ps whether to write parameter set NAL units to the output (0) + * or to discard them (non-zero) + * @param ps_count address of the variable where the number of discarded + * parameter set NAL units shall be written, may be NULL + * @return the amount (in bytes) of data written in case of success, a negative + * value corresponding to an AVERROR code in case of failure + */ +int ff_hevc_annexb2mp4_buf(const uint8_t *buf_in, uint8_t **buf_out, + int *size, int filter_ps, int *ps_count); + +/** + * Writes HEVC extradata (parameter sets, declarative SEI NAL units) to the + * provided AVIOContext. + * + * If the extradata is Annex B format, it gets converted to hvcC format before + * writing. + * + * @param pb address of the AVIOContext where the hvcC shall be written + * @param data address of the buffer holding the data needed to write the hvcC + * @param size size (in bytes) of the data buffer + * @param ps_array_completeness whether all parameter sets are in the hvcC (1) + * or there may be additional parameter sets in the bitstream (0) + * @return >=0 in case of success, a negative value corresponding to an AVERROR + * code in case of failure + */ +int ff_isom_write_hvcc(AVIOContext *pb, const uint8_t *data, + int size, int ps_array_completeness); + +#endif /* AVFORMAT_HEVC_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/internal.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/internal.h new file mode 100644 index 0000000..63a1724 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/internal.h @@ -0,0 +1,674 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_INTERNAL_H +#define AVFORMAT_INTERNAL_H + +#include + +#include "libavutil/bprint.h" +#include "avformat.h" +#include "os_support.h" + +#define MAX_URL_SIZE 4096 + +/** size of probe buffer, for guessing file type from file contents */ +#define PROBE_BUF_MIN 2048 +#define PROBE_BUF_MAX (1 << 20) + +#define MAX_PROBE_PACKETS 2500 + +#ifdef DEBUG +# define hex_dump_debug(class, buf, size) av_hex_dump_log(class, AV_LOG_DEBUG, buf, size) +#else +# define hex_dump_debug(class, buf, size) do { if (0) av_hex_dump_log(class, AV_LOG_DEBUG, buf, size); } while(0) +#endif + +typedef struct AVCodecTag { + enum AVCodecID id; + unsigned int tag; +} AVCodecTag; + +typedef struct CodecMime{ + char str[32]; + enum AVCodecID id; +} CodecMime; + +/*************************************************/ +/* fractional numbers for exact pts handling */ + +/** + * The exact value of the fractional number is: 'val + num / den'. + * num is assumed to be 0 <= num < den. + */ +typedef struct FFFrac { + int64_t val, num, den; +} FFFrac; + + +struct AVFormatInternal { + /** + * Number of streams relevant for interleaving. + * Muxing only. + */ + int nb_interleaved_streams; + + /** + * This buffer is only needed when packets were already buffered but + * not decoded, for example to get the codec parameters in MPEG + * streams. + */ + struct AVPacketList *packet_buffer; + struct AVPacketList *packet_buffer_end; + + /* av_seek_frame() support */ + int64_t data_offset; /**< offset of the first packet */ + + /** + * Raw packets from the demuxer, prior to parsing and decoding. + * This buffer is used for buffering packets until the codec can + * be identified, as parsing cannot be done without knowing the + * codec. + */ + struct AVPacketList *raw_packet_buffer; + struct AVPacketList *raw_packet_buffer_end; + /** + * Packets split by the parser get queued here. + */ + struct AVPacketList *parse_queue; + struct AVPacketList *parse_queue_end; + /** + * Remaining size available for raw_packet_buffer, in bytes. + */ +#define RAW_PACKET_BUFFER_SIZE 2500000 + int raw_packet_buffer_remaining_size; + + /** + * Offset to remap timestamps to be non-negative. + * Expressed in timebase units. + * @see AVStream.mux_ts_offset + */ + int64_t offset; + + /** + * Timebase for the timestamp offset. + */ + AVRational offset_timebase; + +#if FF_API_COMPUTE_PKT_FIELDS2 + int missing_ts_warning; +#endif + + int inject_global_side_data; + + int avoid_negative_ts_use_pts; + + /** + * Whether or not a header has already been written + */ + int header_written; + int write_header_ret; + + /** + * Timestamp of the end of the shortest stream. + */ + int64_t shortest_end; + + /** + * Whether or not avformat_init_output has already been called + */ + int initialized; + + /** + * Whether or not avformat_init_output fully initialized streams + */ + int streams_initialized; + + /** + * ID3v2 tag useful for MP3 demuxing + */ + AVDictionary *id3v2_meta; +}; + +struct AVStreamInternal { + /** + * Set to 1 if the codec allows reordering, so pts can be different + * from dts. + */ + int reorder; + + /** + * bitstream filters to run on stream + * - encoding: Set by muxer using ff_stream_add_bitstream_filter + * - decoding: unused + */ + AVBSFContext **bsfcs; + int nb_bsfcs; + + /** + * Whether or not check_bitstream should still be run on each packet + */ + int bitstream_checked; + + /** + * The codec context used by avformat_find_stream_info, the parser, etc. + */ + AVCodecContext *avctx; + /** + * 1 if avctx has been initialized with the values from the codec parameters + */ + int avctx_inited; + + enum AVCodecID orig_codec_id; + + /** + * Whether the internal avctx needs to be updated from codecpar (after a late change to codecpar) + */ + int need_context_update; +}; + +#ifdef __GNUC__ +#define dynarray_add(tab, nb_ptr, elem)\ +do {\ + __typeof__(tab) _tab = (tab);\ + __typeof__(elem) _elem = (elem);\ + (void)sizeof(**_tab == _elem); /* check that types are compatible */\ + av_dynarray_add(_tab, nb_ptr, _elem);\ +} while(0) +#else +#define dynarray_add(tab, nb_ptr, elem)\ +do {\ + av_dynarray_add((tab), nb_ptr, (elem));\ +} while(0) +#endif + +struct tm *ff_brktimegm(time_t secs, struct tm *tm); + +char *ff_data_to_hex(char *buf, const uint8_t *src, int size, int lowercase); + +/** + * Parse a string of hexadecimal strings. Any space between the hexadecimal + * digits is ignored. + * + * @param data if non-null, the parsed data is written to this pointer + * @param p the string to parse + * @return the number of bytes written (or to be written, if data is null) + */ +int ff_hex_to_data(uint8_t *data, const char *p); + +/** + * Add packet to AVFormatContext->packet_buffer list, determining its + * interleaved position using compare() function argument. + * @return 0, or < 0 on error + */ +int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt, + int (*compare)(AVFormatContext *, AVPacket *, AVPacket *)); + +void ff_read_frame_flush(AVFormatContext *s); + +#define NTP_OFFSET 2208988800ULL +#define NTP_OFFSET_US (NTP_OFFSET * 1000000ULL) + +/** Get the current time since NTP epoch in microseconds. */ +uint64_t ff_ntp_time(void); + +/** + * Append the media-specific SDP fragment for the media stream c + * to the buffer buff. + * + * Note, the buffer needs to be initialized, since it is appended to + * existing content. + * + * @param buff the buffer to append the SDP fragment to + * @param size the size of the buff buffer + * @param st the AVStream of the media to describe + * @param idx the global stream index + * @param dest_addr the destination address of the media stream, may be NULL + * @param dest_type the destination address type, may be NULL + * @param port the destination port of the media stream, 0 if unknown + * @param ttl the time to live of the stream, 0 if not multicast + * @param fmt the AVFormatContext, which might contain options modifying + * the generated SDP + */ +void ff_sdp_write_media(char *buff, int size, AVStream *st, int idx, + const char *dest_addr, const char *dest_type, + int port, int ttl, AVFormatContext *fmt); + +/** + * Write a packet to another muxer than the one the user originally + * intended. Useful when chaining muxers, where one muxer internally + * writes a received packet to another muxer. + * + * @param dst the muxer to write the packet to + * @param dst_stream the stream index within dst to write the packet to + * @param pkt the packet to be written + * @param src the muxer the packet originally was intended for + * @param interleave 0->use av_write_frame, 1->av_interleaved_write_frame + * @return the value av_write_frame returned + */ +int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt, + AVFormatContext *src, int interleave); + +/** + * Get the length in bytes which is needed to store val as v. + */ +int ff_get_v_length(uint64_t val); + +/** + * Put val using a variable number of bytes. + */ +void ff_put_v(AVIOContext *bc, uint64_t val); + +/** + * Read a whole line of text from AVIOContext. Stop reading after reaching + * either a \\n, a \\0 or EOF. The returned string is always \\0-terminated, + * and may be truncated if the buffer is too small. + * + * @param s the read-only AVIOContext + * @param buf buffer to store the read line + * @param maxlen size of the buffer + * @return the length of the string written in the buffer, not including the + * final \\0 + */ +int ff_get_line(AVIOContext *s, char *buf, int maxlen); + +#define SPACE_CHARS " \t\r\n" + +/** + * Callback function type for ff_parse_key_value. + * + * @param key a pointer to the key + * @param key_len the number of bytes that belong to the key, including the '=' + * char + * @param dest return the destination pointer for the value in *dest, may + * be null to ignore the value + * @param dest_len the length of the *dest buffer + */ +typedef void (*ff_parse_key_val_cb)(void *context, const char *key, + int key_len, char **dest, int *dest_len); +/** + * Parse a string with comma-separated key=value pairs. The value strings + * may be quoted and may contain escaped characters within quoted strings. + * + * @param str the string to parse + * @param callback_get_buf function that returns where to store the + * unescaped value string. + * @param context the opaque context pointer to pass to callback_get_buf + */ +void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf, + void *context); + +/** + * Find stream index based on format-specific stream ID + * @return stream index, or < 0 on error + */ +int ff_find_stream_index(AVFormatContext *s, int id); + +/** + * Internal version of av_index_search_timestamp + */ +int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries, + int64_t wanted_timestamp, int flags); + +/** + * Internal version of av_add_index_entry + */ +int ff_add_index_entry(AVIndexEntry **index_entries, + int *nb_index_entries, + unsigned int *index_entries_allocated_size, + int64_t pos, int64_t timestamp, int size, int distance, int flags); + +void ff_configure_buffers_for_index(AVFormatContext *s, int64_t time_tolerance); + +/** + * Add a new chapter. + * + * @param s media file handle + * @param id unique ID for this chapter + * @param start chapter start time in time_base units + * @param end chapter end time in time_base units + * @param title chapter title + * + * @return AVChapter or NULL on error + */ +AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, + int64_t start, int64_t end, const char *title); + +/** + * Ensure the index uses less memory than the maximum specified in + * AVFormatContext.max_index_size by discarding entries if it grows + * too large. + */ +void ff_reduce_index(AVFormatContext *s, int stream_index); + +enum AVCodecID ff_guess_image2_codec(const char *filename); + +/** + * Perform a binary search using av_index_search_timestamp() and + * AVInputFormat.read_timestamp(). + * + * @param target_ts target timestamp in the time base of the given stream + * @param stream_index stream number + */ +int ff_seek_frame_binary(AVFormatContext *s, int stream_index, + int64_t target_ts, int flags); + +/** + * Update cur_dts of all streams based on the given timestamp and AVStream. + * + * Stream ref_st unchanged, others set cur_dts in their native time base. + * Only needed for timestamp wrapping or if (dts not set and pts!=dts). + * @param timestamp new dts expressed in time_base of param ref_st + * @param ref_st reference stream giving time_base of param timestamp + */ +void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp); + +int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos, + int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )); + +/** + * Perform a binary search using read_timestamp(). + * + * @param target_ts target timestamp in the time base of the given stream + * @param stream_index stream number + */ +int64_t ff_gen_search(AVFormatContext *s, int stream_index, + int64_t target_ts, int64_t pos_min, + int64_t pos_max, int64_t pos_limit, + int64_t ts_min, int64_t ts_max, + int flags, int64_t *ts_ret, + int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )); + +/** + * Set the time base and wrapping info for a given stream. This will be used + * to interpret the stream's timestamps. If the new time base is invalid + * (numerator or denominator are non-positive), it leaves the stream + * unchanged. + * + * @param s stream + * @param pts_wrap_bits number of bits effectively used by the pts + * (used for wrap control) + * @param pts_num time base numerator + * @param pts_den time base denominator + */ +void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, + unsigned int pts_num, unsigned int pts_den); + +/** + * Add side data to a packet for changing parameters to the given values. + * Parameters set to 0 aren't included in the change. + */ +int ff_add_param_change(AVPacket *pkt, int32_t channels, + uint64_t channel_layout, int32_t sample_rate, + int32_t width, int32_t height); + +/** + * Set the timebase for each stream from the corresponding codec timebase and + * print it. + */ +int ff_framehash_write_header(AVFormatContext *s); + +/** + * Read a transport packet from a media file. + * + * @param s media file handle + * @param pkt is filled + * @return 0 if OK, AVERROR_xxx on error + */ +int ff_read_packet(AVFormatContext *s, AVPacket *pkt); + +/** + * Interleave a packet per dts in an output media file. + * + * Packets with pkt->destruct == av_destruct_packet will be freed inside this + * function, so they cannot be used after it. Note that calling av_packet_unref() + * on them is still safe. + * + * @param s media file handle + * @param out the interleaved packet will be output here + * @param pkt the input packet + * @param flush 1 if no further packets are available as input and all + * remaining packets should be output + * @return 1 if a packet was output, 0 if no packet could be output, + * < 0 if an error occurred + */ +int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, + AVPacket *pkt, int flush); + +void ff_free_stream(AVFormatContext *s, AVStream *st); + +/** + * Return the frame duration in seconds. Return 0 if not available. + */ +void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st, + AVCodecParserContext *pc, AVPacket *pkt); + +unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id); + +enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag); + +/** + * Select a PCM codec based on the given parameters. + * + * @param bps bits-per-sample + * @param flt floating-point + * @param be big-endian + * @param sflags signed flags. each bit corresponds to one byte of bit depth. + * e.g. the 1st bit indicates if 8-bit should be signed or + * unsigned, the 2nd bit indicates if 16-bit should be signed or + * unsigned, etc... This is useful for formats such as WAVE where + * only 8-bit is unsigned and all other bit depths are signed. + * @return a PCM codec id or AV_CODEC_ID_NONE + */ +enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags); + +/** + * Chooses a timebase for muxing the specified stream. + * + * The chosen timebase allows sample accurate timestamps based + * on the framerate or sample rate for audio streams. It also is + * at least as precise as 1/min_precision would be. + */ +AVRational ff_choose_timebase(AVFormatContext *s, AVStream *st, int min_precision); + +/** + * Chooses a timebase for muxing the specified stream. + */ +enum AVChromaLocation ff_choose_chroma_location(AVFormatContext *s, AVStream *st); + +/** + * Generate standard extradata for AVC-Intra based on width/height and field + * order. + */ +int ff_generate_avci_extradata(AVStream *st); + +/** + * Add a bitstream filter to a stream. + * + * @param st output stream to add a filter to + * @param name the name of the filter to add + * @param args filter-specific argument string + * @return >0 on success; + * AVERROR code on failure + */ +int ff_stream_add_bitstream_filter(AVStream *st, const char *name, const char *args); + +/** + * Copy encoding parameters from source to destination stream + * + * @param dst pointer to destination AVStream + * @param src pointer to source AVStream + * @return >=0 on success, AVERROR code on error + */ +int ff_stream_encode_params_copy(AVStream *dst, const AVStream *src); + +/** + * Wrap errno on rename() error. + * + * @param oldpath source path + * @param newpath destination path + * @return 0 or AVERROR on failure + */ +static inline int ff_rename(const char *oldpath, const char *newpath, void *logctx) +{ + int ret = 0; + if (rename(oldpath, newpath) == -1) { + ret = AVERROR(errno); + if (logctx) + av_log(logctx, AV_LOG_ERROR, "failed to rename file %s to %s\n", oldpath, newpath); + } + return ret; +} + +/** + * Allocate extradata with additional AV_INPUT_BUFFER_PADDING_SIZE at end + * which is always set to 0. + * + * @param size size of extradata + * @return 0 if OK, AVERROR_xxx on error + */ +int ff_alloc_extradata(AVCodecParameters *par, int size); + +/** + * Allocate extradata with additional AV_INPUT_BUFFER_PADDING_SIZE at end + * which is always set to 0 and fill it from pb. + * + * @param size size of extradata + * @return >= 0 if OK, AVERROR_xxx on error + */ +int ff_get_extradata(AVFormatContext *s, AVCodecParameters *par, AVIOContext *pb, int size); + +/** + * add frame for rfps calculation. + * + * @param dts timestamp of the i-th frame + * @return 0 if OK, AVERROR_xxx on error + */ +int ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t dts); + +void ff_rfps_calculate(AVFormatContext *ic); + +/** + * Flags for AVFormatContext.write_uncoded_frame() + */ +enum AVWriteUncodedFrameFlags { + + /** + * Query whether the feature is possible on this stream. + * The frame argument is ignored. + */ + AV_WRITE_UNCODED_FRAME_QUERY = 0x0001, + +}; + +/** + * Copies the whilelists from one context to the other + */ +int ff_copy_whiteblacklists(AVFormatContext *dst, const AVFormatContext *src); + +int ffio_open2_wrapper(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Returned by demuxers to indicate that data was consumed but discarded + * (ignored streams or junk data). The framework will re-call the demuxer. + */ +#define FFERROR_REDO FFERRTAG('R','E','D','O') + +/** + * Utility function to open IO stream of output format. + * + * @param s AVFormatContext + * @param url URL or file name to open for writing + * @options optional options which will be passed to io_open callback + * @return >=0 on success, negative AVERROR in case of failure + */ +int ff_format_output_open(AVFormatContext *s, const char *url, AVDictionary **options); + +/* + * A wrapper around AVFormatContext.io_close that should be used + * instead of calling the pointer directly. + */ +void ff_format_io_close(AVFormatContext *s, AVIOContext **pb); + +/** + * Parse creation_time in AVFormatContext metadata if exists and warn if the + * parsing fails. + * + * @param s AVFormatContext + * @param timestamp parsed timestamp in microseconds, only set on successful parsing + * @param return_seconds set this to get the number of seconds in timestamp instead of microseconds + * @return 1 if OK, 0 if the metadata was not present, AVERROR(EINVAL) on parse error + */ +int ff_parse_creation_time_metadata(AVFormatContext *s, int64_t *timestamp, int return_seconds); + +/** + * Standardize creation_time metadata in AVFormatContext to an ISO-8601 + * timestamp string. + * + * @param s AVFormatContext + * @return <0 on error + */ +int ff_standardize_creation_time(AVFormatContext *s); + +#define CONTAINS_PAL 2 +/** + * Reshuffles the lines to use the user specified stride. + * + * @param ppkt input and output packet + * @return negative error code or + * 0 if no new packet was allocated + * non-zero if a new packet was allocated and ppkt has to be freed + * CONTAINS_PAL if in addition to a new packet the old contained a palette + */ +int ff_reshuffle_raw_rgb(AVFormatContext *s, AVPacket **ppkt, AVCodecParameters *par, int expected_stride); + +/** + * Retrieves the palette from a packet, either from side data, or + * appended to the video data in the packet itself (raw video only). + * It is commonly used after a call to ff_reshuffle_raw_rgb(). + * + * Use 0 for the ret parameter to check for side data only. + * + * @param pkt pointer to packet before calling ff_reshuffle_raw_rgb() + * @param ret return value from ff_reshuffle_raw_rgb(), or 0 + * @param palette pointer to palette buffer + * @return negative error code or + * 1 if the packet has a palette, else 0 + */ +int ff_get_packet_palette(AVFormatContext *s, AVPacket *pkt, int ret, uint32_t *palette); + +/** + * Finalize buf into extradata and set its size appropriately. + */ +int ff_bprint_to_codecpar_extradata(AVCodecParameters *par, struct AVBPrint *buf); + +/** + * Find the next packet in the interleaving queue for the given stream. + * The pkt parameter is filled in with the queued packet, including + * references to the data (which the caller is not allowed to keep or + * modify). + * + * @return 0 if a packet was found, a negative value if no packet was found + */ +int ff_interleaved_peek(AVFormatContext *s, int stream, + AVPacket *pkt, int add_offset); + +#endif /* AVFORMAT_INTERNAL_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/url.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/url.h new file mode 100644 index 0000000..910f1e0 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/url.h @@ -0,0 +1,342 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * unbuffered private I/O API + */ + +#ifndef AVFORMAT_URL_H +#define AVFORMAT_URL_H + +#include "avio.h" +#include "libavformat/version.h" + +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#define URL_PROTOCOL_FLAG_NESTED_SCHEME 1 /*< The protocol name can be the first part of a nested protocol scheme */ +#define URL_PROTOCOL_FLAG_NETWORK 2 /*< The protocol uses network */ + +extern const AVClass ffurl_context_class; + +typedef struct URLContext { + const AVClass *av_class; /**< information for av_log(). Set by url_open(). */ + const struct URLProtocol *prot; + void *priv_data; + char *filename; /**< specified URL */ + int flags; + int max_packet_size; /**< if non zero, the stream is packetized with this max packet size */ + int is_streamed; /**< true if streamed (no seek possible), default = false */ + int is_connected; + AVIOInterruptCB interrupt_callback; + int64_t rw_timeout; /**< maximum time to wait for (network) read/write operation completion, in mcs */ + const char *protocol_whitelist; + const char *protocol_blacklist; +} URLContext; + +typedef struct URLProtocol { + const char *name; + int (*url_open)( URLContext *h, const char *url, int flags); + /** + * This callback is to be used by protocols which open further nested + * protocols. options are then to be passed to ffurl_open()/ffurl_connect() + * for those nested protocols. + */ + int (*url_open2)(URLContext *h, const char *url, int flags, AVDictionary **options); + int (*url_accept)(URLContext *s, URLContext **c); + int (*url_handshake)(URLContext *c); + + /** + * Read data from the protocol. + * If data is immediately available (even less than size), EOF is + * reached or an error occurs (including EINTR), return immediately. + * Otherwise: + * In non-blocking mode, return AVERROR(EAGAIN) immediately. + * In blocking mode, wait for data/EOF/error with a short timeout (0.1s), + * and return AVERROR(EAGAIN) on timeout. + * Checking interrupt_callback, looping on EINTR and EAGAIN and until + * enough data has been read is left to the calling function; see + * retry_transfer_wrapper in avio.c. + */ + int (*url_read)( URLContext *h, unsigned char *buf, int size); + int (*url_write)(URLContext *h, const unsigned char *buf, int size); + int64_t (*url_seek)( URLContext *h, int64_t pos, int whence); + int (*url_close)(URLContext *h); + int (*url_read_pause)(URLContext *h, int pause); + int64_t (*url_read_seek)(URLContext *h, int stream_index, + int64_t timestamp, int flags); + int (*url_get_file_handle)(URLContext *h); + int (*url_get_multi_file_handle)(URLContext *h, int **handles, + int *numhandles); + int (*url_get_short_seek)(URLContext *h); + int (*url_shutdown)(URLContext *h, int flags); + int priv_data_size; + const AVClass *priv_data_class; + int flags; + int (*url_check)(URLContext *h, int mask); + int (*url_open_dir)(URLContext *h); + int (*url_read_dir)(URLContext *h, AVIODirEntry **next); + int (*url_close_dir)(URLContext *h); + int (*url_delete)(URLContext *h); + int (*url_move)(URLContext *h_src, URLContext *h_dst); + const char *default_whitelist; +} URLProtocol; + +/** + * Create a URLContext for accessing to the resource indicated by + * url, but do not initiate the connection yet. + * + * @param puc pointer to the location where, in case of success, the + * function puts the pointer to the created URLContext + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb interrupt callback to use for the URLContext, may be + * NULL + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int ffurl_alloc(URLContext **puc, const char *filename, int flags, + const AVIOInterruptCB *int_cb); + +/** + * Connect an URLContext that has been allocated by ffurl_alloc + * + * @param options A dictionary filled with options for nested protocols, + * i.e. it will be passed to url_open2() for protocols implementing it. + * This parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + */ +int ffurl_connect(URLContext *uc, AVDictionary **options); + +/** + * Create an URLContext for accessing to the resource indicated by + * url, and open it. + * + * @param puc pointer to the location where, in case of success, the + * function puts the pointer to the created URLContext + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb interrupt callback to use for the URLContext, may be + * NULL + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + * @param parent An enclosing URLContext, whose generic options should + * be applied to this URLContext as well. + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int ffurl_open_whitelist(URLContext **puc, const char *filename, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options, + const char *whitelist, const char* blacklist, + URLContext *parent); + +int ffurl_open(URLContext **puc, const char *filename, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Accept an URLContext c on an URLContext s + * + * @param s server context + * @param c client context, must be unallocated. + * @return >= 0 on success, ff_neterrno() on failure. + */ +int ffurl_accept(URLContext *s, URLContext **c); + +/** + * Perform one step of the protocol handshake to accept a new client. + * See avio_handshake() for details. + * Implementations should try to return decreasing values. + * If the protocol uses an underlying protocol, the underlying handshake is + * usually the first step, and the return value can be: + * (largest value for this protocol) + (return value from other protocol) + * + * @param c the client context + * @return >= 0 on success or a negative value corresponding + * to an AVERROR code on failure + */ +int ffurl_handshake(URLContext *c); + +/** + * Read up to size bytes from the resource accessed by h, and store + * the read bytes in buf. + * + * @return The number of bytes actually read, or a negative value + * corresponding to an AVERROR code in case of error. A value of zero + * indicates that it is not possible to read more from the accessed + * resource (except if the value of the size argument is also zero). + */ +int ffurl_read(URLContext *h, unsigned char *buf, int size); + +/** + * Read as many bytes as possible (up to size), calling the + * read function multiple times if necessary. + * This makes special short-read handling in applications + * unnecessary, if the return value is < size then it is + * certain there was either an error or the end of file was reached. + */ +int ffurl_read_complete(URLContext *h, unsigned char *buf, int size); + +/** + * Write size bytes from buf to the resource accessed by h. + * + * @return the number of bytes actually written, or a negative value + * corresponding to an AVERROR code in case of failure + */ +int ffurl_write(URLContext *h, const unsigned char *buf, int size); + +/** + * Change the position that will be used by the next read/write + * operation on the resource accessed by h. + * + * @param pos specifies the new position to set + * @param whence specifies how pos should be interpreted, it must be + * one of SEEK_SET (seek from the beginning), SEEK_CUR (seek from the + * current position), SEEK_END (seek from the end), or AVSEEK_SIZE + * (return the filesize of the requested resource, pos is ignored). + * @return a negative value corresponding to an AVERROR code in case + * of failure, or the resulting file position, measured in bytes from + * the beginning of the file. You can use this feature together with + * SEEK_CUR to read the current file position. + */ +int64_t ffurl_seek(URLContext *h, int64_t pos, int whence); + +/** + * Close the resource accessed by the URLContext h, and free the + * memory used by it. Also set the URLContext pointer to NULL. + * + * @return a negative value if an error condition occurred, 0 + * otherwise + */ +int ffurl_closep(URLContext **h); +int ffurl_close(URLContext *h); + +/** + * Return the filesize of the resource accessed by h, AVERROR(ENOSYS) + * if the operation is not supported by h, or another negative value + * corresponding to an AVERROR error code in case of failure. + */ +int64_t ffurl_size(URLContext *h); + +/** + * Return the file descriptor associated with this URL. For RTP, this + * will return only the RTP file descriptor, not the RTCP file descriptor. + * + * @return the file descriptor associated with this URL, or <0 on error. + */ +int ffurl_get_file_handle(URLContext *h); + +/** + * Return the file descriptors associated with this URL. + * + * @return 0 on success or <0 on error. + */ +int ffurl_get_multi_file_handle(URLContext *h, int **handles, int *numhandles); + +/** + * Return the current short seek threshold value for this URL. + * + * @return threshold (>0) on success or <=0 on error. + */ +int ffurl_get_short_seek(URLContext *h); + +/** + * Signal the URLContext that we are done reading or writing the stream. + * + * @param h pointer to the resource + * @param flags flags which control how the resource indicated by url + * is to be shutdown + * + * @return a negative value if an error condition occurred, 0 + * otherwise + */ +int ffurl_shutdown(URLContext *h, int flags); + +/** + * Check if the user has requested to interrupt a blocking function + * associated with cb. + */ +int ff_check_interrupt(AVIOInterruptCB *cb); + +/* udp.c */ +int ff_udp_set_remote_url(URLContext *h, const char *uri); +int ff_udp_get_local_port(URLContext *h); + +/** + * Assemble a URL string from components. This is the reverse operation + * of av_url_split. + * + * Note, this requires networking to be initialized, so the caller must + * ensure ff_network_init has been called. + * + * @see av_url_split + * + * @param str the buffer to fill with the url + * @param size the size of the str buffer + * @param proto the protocol identifier, if null, the separator + * after the identifier is left out, too + * @param authorization an optional authorization string, may be null. + * An empty string is treated the same as a null string. + * @param hostname the host name string + * @param port the port number, left out from the string if negative + * @param fmt a generic format string for everything to add after the + * host/port, may be null + * @return the number of characters written to the destination buffer + */ +int ff_url_join(char *str, int size, const char *proto, + const char *authorization, const char *hostname, + int port, const char *fmt, ...) av_printf_format(7, 8); + +/** + * Convert a relative url into an absolute url, given a base url. + * + * @param buf the buffer where output absolute url is written + * @param size the size of buf + * @param base the base url, may be equal to buf. + * @param rel the new url, which is interpreted relative to base + */ +void ff_make_absolute_url(char *buf, int size, const char *base, + const char *rel); + +/** + * Allocate directory entry with default values. + * + * @return entry or NULL on error + */ +AVIODirEntry *ff_alloc_dir_entry(void); + +const AVClass *ff_urlcontext_child_class_next(const AVClass *prev); + +/** + * Construct a list of protocols matching a given whitelist and/or blacklist. + * + * @param whitelist a comma-separated list of allowed protocol names or NULL. If + * this is a non-empty string, only protocols in this list will + * be included. + * @param blacklist a comma-separated list of forbidden protocol names or NULL. + * If this is a non-empty string, all protocols in this list + * will be excluded. + * + * @return a NULL-terminated array of matching protocols. The array must be + * freed by the caller. + */ +const URLProtocol **ffurl_get_protocols(const char *whitelist, + const char *blacklist); + +#endif /* AVFORMAT_URL_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/version.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/version.h new file mode 100644 index 0000000..fc054ee --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavformat/version.h @@ -0,0 +1,105 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_VERSION_H +#define AVFORMAT_VERSION_H + +/** + * @file + * @ingroup libavf + * Libavformat version macros + */ + +#include "libavutil/version.h" + +// Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium) +// Also please add any ticket numbers that you believe might be affected here +#define LIBAVFORMAT_VERSION_MAJOR 57 +#define LIBAVFORMAT_VERSION_MINOR 71 +#define LIBAVFORMAT_VERSION_MICRO 100 + +#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT + +#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @note, when bumping the major version it is recommended to manually + * disable each FF_API_* in its own commit instead of disabling them all + * at once through the bump. This improves the git bisect-ability of the change. + * + */ +#ifndef FF_API_LAVF_BITEXACT +#define FF_API_LAVF_BITEXACT (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LAVF_FRAC +#define FF_API_LAVF_FRAC (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LAVF_CODEC_TB +#define FF_API_LAVF_CODEC_TB (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_URL_FEOF +#define FF_API_URL_FEOF (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LAVF_FMT_RAWPICTURE +#define FF_API_LAVF_FMT_RAWPICTURE (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_COMPUTE_PKT_FIELDS2 +#define FF_API_COMPUTE_PKT_FIELDS2 (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_OLD_OPEN_CALLBACKS +#define FF_API_OLD_OPEN_CALLBACKS (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LAVF_AVCTX +#define FF_API_LAVF_AVCTX (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_NOCONST_GET_SIDE_DATA +#define FF_API_NOCONST_GET_SIDE_DATA (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_HTTP_USER_AGENT +#define FF_API_HTTP_USER_AGENT (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_HLS_WRAP +#define FF_API_HLS_WRAP (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LAVF_MERGE_SD +#define FF_API_LAVF_MERGE_SD (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LAVF_KEEPSIDE_FLAG +#define FF_API_LAVF_KEEPSIDE_FLAG (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_OLD_ROTATE_API +#define FF_API_OLD_ROTATE_API (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif + + +#ifndef FF_API_R_FRAME_RATE +#define FF_API_R_FRAME_RATE 1 +#endif +#endif /* AVFORMAT_VERSION_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/adler32.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/adler32.h new file mode 100644 index 0000000..a1f035b --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/adler32.h @@ -0,0 +1,60 @@ +/* + * copyright (c) 2006 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_adler32 + * Public header for Adler-32 hash function implementation. + */ + +#ifndef AVUTIL_ADLER32_H +#define AVUTIL_ADLER32_H + +#include +#include "attributes.h" + +/** + * @defgroup lavu_adler32 Adler-32 + * @ingroup lavu_hash + * Adler-32 hash function implementation. + * + * @{ + */ + +/** + * Calculate the Adler32 checksum of a buffer. + * + * Passing the return value to a subsequent av_adler32_update() call + * allows the checksum of multiple buffers to be calculated as though + * they were concatenated. + * + * @param adler initial checksum value + * @param buf pointer to input buffer + * @param len size of input buffer + * @return updated checksum + */ +unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf, + unsigned int len) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_ADLER32_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/aes.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/aes.h new file mode 100644 index 0000000..09efbda --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/aes.h @@ -0,0 +1,65 @@ +/* + * copyright (c) 2007 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AES_H +#define AVUTIL_AES_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_aes AES + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_aes_size; + +struct AVAES; + +/** + * Allocate an AVAES context. + */ +struct AVAES *av_aes_alloc(void); + +/** + * Initialize an AVAES context. + * @param key_bits 128, 192 or 256 + * @param decrypt 0 for encryption, 1 for decryption + */ +int av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * @param count number of 16 byte blocks + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_AES_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/aes_ctr.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/aes_ctr.h new file mode 100644 index 0000000..f596fa6 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/aes_ctr.h @@ -0,0 +1,83 @@ +/* + * AES-CTR cipher + * Copyright (c) 2015 Eran Kornblau + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AES_CTR_H +#define AVUTIL_AES_CTR_H + +#include + +#include "attributes.h" +#include "version.h" + +#define AES_CTR_KEY_SIZE (16) +#define AES_CTR_IV_SIZE (8) + +struct AVAESCTR; + +/** + * Allocate an AVAESCTR context. + */ +struct AVAESCTR *av_aes_ctr_alloc(void); + +/** + * Initialize an AVAESCTR context. + * @param key encryption key, must have a length of AES_CTR_KEY_SIZE + */ +int av_aes_ctr_init(struct AVAESCTR *a, const uint8_t *key); + +/** + * Release an AVAESCTR context. + */ +void av_aes_ctr_free(struct AVAESCTR *a); + +/** + * Process a buffer using a previously initialized context. + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param size the size of src and dst + */ +void av_aes_ctr_crypt(struct AVAESCTR *a, uint8_t *dst, const uint8_t *src, int size); + +/** + * Get the current iv + */ +const uint8_t* av_aes_ctr_get_iv(struct AVAESCTR *a); + +/** + * Generate a random iv + */ +void av_aes_ctr_set_random_iv(struct AVAESCTR *a); + +/** + * Forcefully change the iv + */ +void av_aes_ctr_set_iv(struct AVAESCTR *a, const uint8_t* iv); + +/** + * Increment the top 64 bit of the iv (performed after each frame) + */ +void av_aes_ctr_increment_iv(struct AVAESCTR *a); + +/** + * @} + */ + +#endif /* AVUTIL_AES_CTR_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/application.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/application.h new file mode 100644 index 0000000..0cf0b3f --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/application.h @@ -0,0 +1,133 @@ +/* + * copyright (c) 2016 Zhang Rui + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_APPLICATION_H +#define AVUTIL_APPLICATION_H + +#include "libavutil/log.h" + +#define AVAPP_EVENT_WILL_HTTP_OPEN 1 //AVAppHttpEvent +#define AVAPP_EVENT_DID_HTTP_OPEN 2 //AVAppHttpEvent +#define AVAPP_EVENT_WILL_HTTP_SEEK 3 //AVAppHttpEvent +#define AVAPP_EVENT_DID_HTTP_SEEK 4 //AVAppHttpEvent + +#define AVAPP_EVENT_ASYNC_STATISTIC 0x11000 //AVAppAsyncStatistic +#define AVAPP_EVENT_ASYNC_READ_SPEED 0x11001 //AVAppAsyncReadSpeed +#define AVAPP_EVENT_IO_TRAFFIC 0x12204 //AVAppIOTraffic + +#define AVAPP_CTRL_WILL_TCP_OPEN 0x20001 //AVAppTcpIOControl +#define AVAPP_CTRL_DID_TCP_OPEN 0x20002 //AVAppTcpIOControl + +#define AVAPP_CTRL_WILL_HTTP_OPEN 0x20003 //AVAppIOControl +#define AVAPP_CTRL_WILL_LIVE_OPEN 0x20005 //AVAppIOControl + +#define AVAPP_CTRL_WILL_CONCAT_SEGMENT_OPEN 0x20007 //AVAppIOControl + +#define AVAPP_CTRL_WILL_DNS_RESOLVE 0x20021 //AVAppDnsControl +#define AVAPP_CTRL_DID_DNS_RESOLVE 0x20022 //AVAppDnsControl + + +typedef struct AVAppIOControl { + size_t size; + char url[4096]; /* in, out */ + int segment_index; /* in, default = 0 */ + int retry_counter; /* in */ + + int is_handled; /* out, default = false */ + int is_url_changed; /* out, default = false */ +} AVAppIOControl; + +typedef struct AVAppTcpIOControl { + int error; + int family; + char ip[96]; + int port; + int fd; +} AVAppTcpIOControl; + +typedef struct AVAppAsyncStatistic { + size_t size; + int64_t buf_backwards; + int64_t buf_forwards; + int64_t buf_capacity; +} AVAppAsyncStatistic; + +typedef struct AVAppAsyncReadSpeed { + size_t size; + int is_full_speed; + int64_t io_bytes; + int64_t elapsed_milli; +} AVAppAsyncReadSpeed; + +typedef struct AVAppHttpEvent +{ + void *obj; + char url[4096]; + int64_t offset; + int error; + int http_code; +} AVAppHttpEvent; + +typedef struct AVAppIOTraffic +{ + void *obj; + int bytes; +} AVAppIOTraffic; + +typedef struct AVAppDnsControl +{ + char url[4096]; + int error; +} AVAppDnsControl; + +typedef struct AVApplicationContext AVApplicationContext; +struct AVApplicationContext { + const AVClass *av_class; /**< information for av_log(). Set by av_application_open(). */ + void *opaque; /**< user data. */ + + int (*func_on_app_event)(AVApplicationContext *h, int event_type ,void *obj, size_t size); +}; + +int av_application_alloc(AVApplicationContext **ph, void *opaque); +int av_application_open(AVApplicationContext **ph, void *opaque); +void av_application_close(AVApplicationContext *h); +void av_application_closep(AVApplicationContext **ph); + +void av_application_on_http_event(AVApplicationContext *h, int event_type, AVAppHttpEvent *event); +void av_application_will_http_open(AVApplicationContext *h, void *obj, const char *url); +void av_application_did_http_open(AVApplicationContext *h, void *obj, const char *url, int error, int http_code); +void av_application_will_http_seek(AVApplicationContext *h, void *obj, const char *url, int64_t offset); +void av_application_did_http_seek(AVApplicationContext *h, void *obj, const char *url, int64_t offset, int error, int http_code); + +void av_application_did_io_tcp_read(AVApplicationContext *h, void *obj, int bytes); + +int av_application_on_io_control(AVApplicationContext *h, int event_type, AVAppIOControl *control); + +int av_application_on_tcp_will_open(AVApplicationContext *h); +int av_application_on_tcp_did_open(AVApplicationContext *h, int error, int fd, AVAppTcpIOControl *control); + +int av_application_on_dns_will_resolve(AVApplicationContext *h, const char *url); +int av_application_on_dns_did_resolve(AVApplicationContext *h, const char *url, int error); + +void av_application_on_async_statistic(AVApplicationContext *h, AVAppAsyncStatistic *statistic); +void av_application_on_async_read_speed(AVApplicationContext *h, AVAppAsyncReadSpeed *speed); + + +#endif /* AVUTIL_APPLICATION_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/arm64/avconfig.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/arm64/avconfig.h new file mode 100644 index 0000000..f10aa61 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/arm64/avconfig.h @@ -0,0 +1,6 @@ +/* Generated by ffconf */ +#ifndef AVUTIL_AVCONFIG_H +#define AVUTIL_AVCONFIG_H +#define AV_HAVE_BIGENDIAN 0 +#define AV_HAVE_FAST_UNALIGNED 1 +#endif /* AVUTIL_AVCONFIG_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/arm64/ffversion.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/arm64/ffversion.h new file mode 100644 index 0000000..7ab8f1a --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/arm64/ffversion.h @@ -0,0 +1,5 @@ +/* Automatically generated by version.sh, do not manually edit! */ +#ifndef AVUTIL_FFVERSION_H +#define AVUTIL_FFVERSION_H +#define FFMPEG_VERSION "ff3.3--fx0.8.0--20210325--fix_android11_crash" +#endif /* AVUTIL_FFVERSION_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/armv7/avconfig.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/armv7/avconfig.h new file mode 100644 index 0000000..f10aa61 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/armv7/avconfig.h @@ -0,0 +1,6 @@ +/* Generated by ffconf */ +#ifndef AVUTIL_AVCONFIG_H +#define AVUTIL_AVCONFIG_H +#define AV_HAVE_BIGENDIAN 0 +#define AV_HAVE_FAST_UNALIGNED 1 +#endif /* AVUTIL_AVCONFIG_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/armv7/ffversion.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/armv7/ffversion.h new file mode 100644 index 0000000..7ab8f1a --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/armv7/ffversion.h @@ -0,0 +1,5 @@ +/* Automatically generated by version.sh, do not manually edit! */ +#ifndef AVUTIL_FFVERSION_H +#define AVUTIL_FFVERSION_H +#define FFMPEG_VERSION "ff3.3--fx0.8.0--20210325--fix_android11_crash" +#endif /* AVUTIL_FFVERSION_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/attributes.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/attributes.h new file mode 100644 index 0000000..54d1901 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/attributes.h @@ -0,0 +1,167 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Macro definitions for various function/variable attributes + */ + +#ifndef AVUTIL_ATTRIBUTES_H +#define AVUTIL_ATTRIBUTES_H + +#ifdef __GNUC__ +# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y)) +# define AV_GCC_VERSION_AT_MOST(x,y) (__GNUC__ < (x) || __GNUC__ == (x) && __GNUC_MINOR__ <= (y)) +#else +# define AV_GCC_VERSION_AT_LEAST(x,y) 0 +# define AV_GCC_VERSION_AT_MOST(x,y) 0 +#endif + +#ifndef av_always_inline +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_always_inline __attribute__((always_inline)) inline +#elif defined(_MSC_VER) +# define av_always_inline __forceinline +#else +# define av_always_inline inline +#endif +#endif + +#ifndef av_extern_inline +#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__) +# define av_extern_inline extern inline +#else +# define av_extern_inline inline +#endif +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,4) +# define av_warn_unused_result __attribute__((warn_unused_result)) +#else +# define av_warn_unused_result +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_noinline __attribute__((noinline)) +#elif defined(_MSC_VER) +# define av_noinline __declspec(noinline) +#else +# define av_noinline +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_pure __attribute__((pure)) +#else +# define av_pure +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,6) +# define av_const __attribute__((const)) +#else +# define av_const +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,3) +# define av_cold __attribute__((cold)) +#else +# define av_cold +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__) +# define av_flatten __attribute__((flatten)) +#else +# define av_flatten +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define attribute_deprecated __attribute__((deprecated)) +#elif defined(_MSC_VER) +# define attribute_deprecated __declspec(deprecated) +#else +# define attribute_deprecated +#endif + +/** + * Disable warnings about deprecated features + * This is useful for sections of code kept for backward compatibility and + * scheduled for removal. + */ +#ifndef AV_NOWARN_DEPRECATED +#if AV_GCC_VERSION_AT_LEAST(4,6) +# define AV_NOWARN_DEPRECATED(code) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \ + code \ + _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +# define AV_NOWARN_DEPRECATED(code) \ + __pragma(warning(push)) \ + __pragma(warning(disable : 4996)) \ + code; \ + __pragma(warning(pop)) +#else +# define AV_NOWARN_DEPRECATED(code) code +#endif +#endif + +#if defined(__GNUC__) || defined(__clang__) +# define av_unused __attribute__((unused)) +#else +# define av_unused +#endif + +/** + * Mark a variable as used and prevent the compiler from optimizing it + * away. This is useful for variables accessed only from inline + * assembler without the compiler being aware. + */ +#if AV_GCC_VERSION_AT_LEAST(3,1) || defined(__clang__) +# define av_used __attribute__((used)) +#else +# define av_used +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,3) +# define av_alias __attribute__((may_alias)) +#else +# define av_alias +#endif + +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__) +# define av_uninit(x) x=x +#else +# define av_uninit(x) x +#endif + +#ifdef __GNUC__ +# define av_builtin_constant_p __builtin_constant_p +# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos))) +#else +# define av_builtin_constant_p(x) 0 +# define av_printf_format(fmtpos, attrpos) +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,5) +# define av_noreturn __attribute__((noreturn)) +#else +# define av_noreturn +#endif + +#endif /* AVUTIL_ATTRIBUTES_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/audio_fifo.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/audio_fifo.h new file mode 100644 index 0000000..d8a9194 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/audio_fifo.h @@ -0,0 +1,187 @@ +/* + * Audio FIFO + * Copyright (c) 2012 Justin Ruggles + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio FIFO Buffer + */ + +#ifndef AVUTIL_AUDIO_FIFO_H +#define AVUTIL_AUDIO_FIFO_H + +#include "avutil.h" +#include "fifo.h" +#include "samplefmt.h" + +/** + * @addtogroup lavu_audio + * @{ + * + * @defgroup lavu_audiofifo Audio FIFO Buffer + * @{ + */ + +/** + * Context for an Audio FIFO Buffer. + * + * - Operates at the sample level rather than the byte level. + * - Supports multiple channels with either planar or packed sample format. + * - Automatic reallocation when writing to a full buffer. + */ +typedef struct AVAudioFifo AVAudioFifo; + +/** + * Free an AVAudioFifo. + * + * @param af AVAudioFifo to free + */ +void av_audio_fifo_free(AVAudioFifo *af); + +/** + * Allocate an AVAudioFifo. + * + * @param sample_fmt sample format + * @param channels number of channels + * @param nb_samples initial allocation size, in samples + * @return newly allocated AVAudioFifo, or NULL on error + */ +AVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, + int nb_samples); + +/** + * Reallocate an AVAudioFifo. + * + * @param af AVAudioFifo to reallocate + * @param nb_samples new allocation size, in samples + * @return 0 if OK, or negative AVERROR code on failure + */ +av_warn_unused_result +int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples); + +/** + * Write data to an AVAudioFifo. + * + * The AVAudioFifo will be reallocated automatically if the available space + * is less than nb_samples. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to write to + * @param data audio data plane pointers + * @param nb_samples number of samples to write + * @return number of samples actually written, or negative AVERROR + * code on failure. If successful, the number of samples + * actually written will always be nb_samples. + */ +int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Peek data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to peek + * @return number of samples actually peek, or negative AVERROR code + * on failure. The number of samples actually peek will not + * be greater than nb_samples, and will only be less than + * nb_samples if av_audio_fifo_size is less than nb_samples. + */ +int av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Peek data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to peek + * @param offset offset from current read position + * @return number of samples actually peek, or negative AVERROR code + * on failure. The number of samples actually peek will not + * be greater than nb_samples, and will only be less than + * nb_samples if av_audio_fifo_size is less than nb_samples. + */ +int av_audio_fifo_peek_at(AVAudioFifo *af, void **data, int nb_samples, int offset); + +/** + * Read data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to read + * @return number of samples actually read, or negative AVERROR code + * on failure. The number of samples actually read will not + * be greater than nb_samples, and will only be less than + * nb_samples if av_audio_fifo_size is less than nb_samples. + */ +int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Drain data from an AVAudioFifo. + * + * Removes the data without reading it. + * + * @param af AVAudioFifo to drain + * @param nb_samples number of samples to drain + * @return 0 if OK, or negative AVERROR code on failure + */ +int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples); + +/** + * Reset the AVAudioFifo buffer. + * + * This empties all data in the buffer. + * + * @param af AVAudioFifo to reset + */ +void av_audio_fifo_reset(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for reading. + * + * @param af the AVAudioFifo to query + * @return number of samples available for reading + */ +int av_audio_fifo_size(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for writing. + * + * @param af the AVAudioFifo to query + * @return number of samples available for writing + */ +int av_audio_fifo_space(AVAudioFifo *af); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_AUDIO_FIFO_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/avassert.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/avassert.h new file mode 100644 index 0000000..46f3fea --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/avassert.h @@ -0,0 +1,75 @@ +/* + * copyright (c) 2010 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple assert() macros that are a bit more flexible than ISO C assert(). + * @author Michael Niedermayer + */ + +#ifndef AVUTIL_AVASSERT_H +#define AVUTIL_AVASSERT_H + +#include +#include "avutil.h" +#include "log.h" + +/** + * assert() equivalent, that is always enabled. + */ +#define av_assert0(cond) do { \ + if (!(cond)) { \ + av_log(NULL, AV_LOG_PANIC, "Assertion %s failed at %s:%d\n", \ + AV_STRINGIFY(cond), __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) + + +/** + * assert() equivalent, that does not lie in speed critical code. + * These asserts() thus can be enabled without fearing speed loss. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0 +#define av_assert1(cond) av_assert0(cond) +#else +#define av_assert1(cond) ((void)0) +#endif + + +/** + * assert() equivalent, that does lie in speed critical code. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1 +#define av_assert2(cond) av_assert0(cond) +#define av_assert2_fpu() av_assert0_fpu() +#else +#define av_assert2(cond) ((void)0) +#define av_assert2_fpu() ((void)0) +#endif + +/** + * Assert that floating point opperations can be executed. + * + * This will av_assert0() that the cpu is not in MMX state on X86 + */ +void av_assert0_fpu(void); + +#endif /* AVUTIL_AVASSERT_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/avconfig.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/avconfig.h new file mode 100644 index 0000000..c851904 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/avconfig.h @@ -0,0 +1,46 @@ +/* + * avconfig.h + * + * Copyright (c) 2013 Bilibili + * Copyright (c) 2013 Zhang Rui + * + * This file is part of ijkPlayer. + * + * ijkPlayer is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * ijkPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with ijkPlayer; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#if defined(__aarch64__) +# include "arm64/avconfig.h" +#elif defined(__x86_64__) +# include "x86_64/avconfig.h" +#elif defined(__arm__) + +# if defined(__ARM_ARCH_7S__) +# include "armv7s/avconfig.h" +# elif defined(__ARM_ARCH) +# if __ARM_ARCH == 7 +# include "armv7/avconfig.h" +# else +# error Unsupport ARM architecture +# endif +# else +# error Unsupport ARM architecture +# endif + +#elif defined(__i386__) +# include "i386/avconfig.h" +#else +# error Unsupport architecture +#endif diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/avstring.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/avstring.h new file mode 100644 index 0000000..dd28769 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/avstring.h @@ -0,0 +1,402 @@ +/* + * Copyright (c) 2007 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVSTRING_H +#define AVUTIL_AVSTRING_H + +#include +#include +#include "attributes.h" + +/** + * @addtogroup lavu_string + * @{ + */ + +/** + * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to + * the address of the first character in str after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int av_strstart(const char *str, const char *pfx, const char **ptr); + +/** + * Return non-zero if pfx is a prefix of str independent of case. If + * it is, *ptr is set to the address of the first character in str + * after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int av_stristart(const char *str, const char *pfx, const char **ptr); + +/** + * Locate the first case-independent occurrence in the string haystack + * of the string needle. A zero-length string needle is considered to + * match at the start of haystack. + * + * This function is a case-insensitive version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *av_stristr(const char *haystack, const char *needle); + +/** + * Locate the first occurrence of the string needle in the string haystack + * where not more than hay_length characters are searched. A zero-length + * string needle is considered to match at the start of haystack. + * + * This function is a length-limited version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @param hay_length length of string to search in + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *av_strnstr(const char *haystack, const char *needle, size_t hay_length); + +/** + * Copy the string src to dst, but no more than size - 1 bytes, and + * null-terminate dst. + * + * This function is the same as BSD strlcpy(). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the length of src + * + * @warning since the return value is the length of src, src absolutely + * _must_ be a properly 0-terminated string, otherwise this will read beyond + * the end of the buffer and possibly crash. + */ +size_t av_strlcpy(char *dst, const char *src, size_t size); + +/** + * Append the string src to the string dst, but to a total length of + * no more than size - 1 bytes, and null-terminate dst. + * + * This function is similar to BSD strlcat(), but differs when + * size <= strlen(dst). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the total length of src and dst + * + * @warning since the return value use the length of src and dst, these + * absolutely _must_ be a properly 0-terminated strings, otherwise this + * will read beyond the end of the buffer and possibly crash. + */ +size_t av_strlcat(char *dst, const char *src, size_t size); + +/** + * Append output to a string, according to a format. Never write out of + * the destination buffer, and always put a terminating 0 within + * the buffer. + * @param dst destination buffer (string to which the output is + * appended) + * @param size total size of the destination buffer + * @param fmt printf-compatible format string, specifying how the + * following parameters are used + * @return the length of the string that would have been generated + * if enough space had been available + */ +size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4); + +/** + * Get the count of continuous non zero chars starting from the beginning. + * + * @param len maximum number of characters to check in the string, that + * is the maximum value which is returned by the function + */ +static inline size_t av_strnlen(const char *s, size_t len) +{ + size_t i; + for (i = 0; i < len && s[i]; i++) + ; + return i; +} + +/** + * Print arguments following specified format into a large enough auto + * allocated buffer. It is similar to GNU asprintf(). + * @param fmt printf-compatible format string, specifying how the + * following parameters are used. + * @return the allocated string + * @note You have to free the string yourself with av_free(). + */ +char *av_asprintf(const char *fmt, ...) av_printf_format(1, 2); + +/** + * Convert a number to an av_malloced string. + */ +char *av_d2str(double d); + +/** + * Unescape the given string until a non escaped terminating char, + * and return the token corresponding to the unescaped string. + * + * The normal \ and ' escaping is supported. Leading and trailing + * whitespaces are removed, unless they are escaped with '\' or are + * enclosed between ''. + * + * @param buf the buffer to parse, buf will be updated to point to the + * terminating char + * @param term a 0-terminated list of terminating chars + * @return the malloced unescaped string, which must be av_freed by + * the user, NULL in case of allocation failure + */ +char *av_get_token(const char **buf, const char *term); + +/** + * Split the string into several tokens which can be accessed by + * successive calls to av_strtok(). + * + * A token is defined as a sequence of characters not belonging to the + * set specified in delim. + * + * On the first call to av_strtok(), s should point to the string to + * parse, and the value of saveptr is ignored. In subsequent calls, s + * should be NULL, and saveptr should be unchanged since the previous + * call. + * + * This function is similar to strtok_r() defined in POSIX.1. + * + * @param s the string to parse, may be NULL + * @param delim 0-terminated list of token delimiters, must be non-NULL + * @param saveptr user-provided pointer which points to stored + * information necessary for av_strtok() to continue scanning the same + * string. saveptr is updated to point to the next character after the + * first delimiter found, or to NULL if the string was terminated + * @return the found token, or NULL when no token is found + */ +char *av_strtok(char *s, const char *delim, char **saveptr); + +/** + * Locale-independent conversion of ASCII isdigit. + */ +static inline av_const int av_isdigit(int c) +{ + return c >= '0' && c <= '9'; +} + +/** + * Locale-independent conversion of ASCII isgraph. + */ +static inline av_const int av_isgraph(int c) +{ + return c > 32 && c < 127; +} + +/** + * Locale-independent conversion of ASCII isspace. + */ +static inline av_const int av_isspace(int c) +{ + return c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' || + c == '\v'; +} + +/** + * Locale-independent conversion of ASCII characters to uppercase. + */ +static inline av_const int av_toupper(int c) +{ + if (c >= 'a' && c <= 'z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII characters to lowercase. + */ +static inline av_const int av_tolower(int c) +{ + if (c >= 'A' && c <= 'Z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII isxdigit. + */ +static inline av_const int av_isxdigit(int c) +{ + c = av_tolower(c); + return av_isdigit(c) || (c >= 'a' && c <= 'f'); +} + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int av_strcasecmp(const char *a, const char *b); + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int av_strncasecmp(const char *a, const char *b, size_t n); + + +/** + * Thread safe basename. + * @param path the path, on DOS both \ and / are considered separators. + * @return pointer to the basename substring. + */ +const char *av_basename(const char *path); + +/** + * Thread safe dirname. + * @param path the path, on DOS both \ and / are considered separators. + * @return the path with the separator replaced by the string terminator or ".". + * @note the function may change the input string. + */ +const char *av_dirname(char *path); + +/** + * Match instances of a name in a comma-separated list of names. + * List entries are checked from the start to the end of the names list, + * the first match ends further processing. If an entry prefixed with '-' + * matches, then 0 is returned. The "ALL" list entry is considered to + * match all names. + * + * @param name Name to look for. + * @param names List of names. + * @return 1 on match, 0 otherwise. + */ +int av_match_name(const char *name, const char *names); + +/** + * Append path component to the existing path. + * Path separator '/' is placed between when needed. + * Resulting string have to be freed with av_free(). + * @param path base path + * @param component component to be appended + * @return new path or NULL on error. + */ +char *av_append_path_component(const char *path, const char *component); + +enum AVEscapeMode { + AV_ESCAPE_MODE_AUTO, ///< Use auto-selected escaping mode. + AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping. + AV_ESCAPE_MODE_QUOTE, ///< Use single-quote escaping. +}; + +/** + * Consider spaces special and escape them even in the middle of the + * string. + * + * This is equivalent to adding the whitespace characters to the special + * characters lists, except it is guaranteed to use the exact same list + * of whitespace characters as the rest of libavutil. + */ +#define AV_ESCAPE_FLAG_WHITESPACE (1 << 0) + +/** + * Escape only specified special characters. + * Without this flag, escape also any characters that may be considered + * special by av_get_token(), such as the single quote. + */ +#define AV_ESCAPE_FLAG_STRICT (1 << 1) + +/** + * Escape string in src, and put the escaped string in an allocated + * string in *dst, which must be freed with av_free(). + * + * @param dst pointer where an allocated string is put + * @param src string to escape, must be non-NULL + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_ macros + * @return the length of the allocated string, or a negative error code in case of error + * @see av_bprint_escape() + */ +av_warn_unused_result +int av_escape(char **dst, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +#define AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES 1 ///< accept codepoints over 0x10FFFF +#define AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS 2 ///< accept non-characters - 0xFFFE and 0xFFFF +#define AV_UTF8_FLAG_ACCEPT_SURROGATES 4 ///< accept UTF-16 surrogates codes +#define AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES 8 ///< exclude control codes not accepted by XML + +#define AV_UTF8_FLAG_ACCEPT_ALL \ + AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES|AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS|AV_UTF8_FLAG_ACCEPT_SURROGATES + +/** + * Read and decode a single UTF-8 code point (character) from the + * buffer in *buf, and update *buf to point to the next byte to + * decode. + * + * In case of an invalid byte sequence, the pointer will be updated to + * the next byte after the invalid sequence and the function will + * return an error code. + * + * Depending on the specified flags, the function will also fail in + * case the decoded code point does not belong to a valid range. + * + * @note For speed-relevant code a carefully implemented use of + * GET_UTF8() may be preferred. + * + * @param codep pointer used to return the parsed code in case of success. + * The value in *codep is set even in case the range check fails. + * @param bufp pointer to the address the first byte of the sequence + * to decode, updated by the function to point to the + * byte next after the decoded sequence + * @param buf_end pointer to the end of the buffer, points to the next + * byte past the last in the buffer. This is used to + * avoid buffer overreads (in case of an unfinished + * UTF-8 sequence towards the end of the buffer). + * @param flags a collection of AV_UTF8_FLAG_* flags + * @return >= 0 in case a sequence was successfully read, a negative + * value in case of invalid sequence + */ +av_warn_unused_result +int av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end, + unsigned int flags); + +/** + * Check if a name is in a list. + * @returns 0 if not found, or the 1 based index where it has been found in the + * list. + */ +int av_match_list(const char *name, const char *list, char separator); + +/** + * @} + */ + +#endif /* AVUTIL_AVSTRING_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/avutil.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/avutil.h new file mode 100644 index 0000000..4d63315 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/avutil.h @@ -0,0 +1,365 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVUTIL_H +#define AVUTIL_AVUTIL_H + +/** + * @file + * @ingroup lavu + * Convenience header that includes @ref lavu "libavutil"'s core. + */ + +/** + * @mainpage + * + * @section ffmpeg_intro Introduction + * + * This document describes the usage of the different libraries + * provided by FFmpeg. + * + * @li @ref libavc "libavcodec" encoding/decoding library + * @li @ref lavfi "libavfilter" graph-based frame editing library + * @li @ref libavf "libavformat" I/O and muxing/demuxing library + * @li @ref lavd "libavdevice" special devices muxing/demuxing library + * @li @ref lavu "libavutil" common utility library + * @li @ref lswr "libswresample" audio resampling, format conversion and mixing + * @li @ref lpp "libpostproc" post processing library + * @li @ref libsws "libswscale" color conversion and scaling library + * + * @section ffmpeg_versioning Versioning and compatibility + * + * Each of the FFmpeg libraries contains a version.h header, which defines a + * major, minor and micro version number with the + * LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO} macros. The major version + * number is incremented with backward incompatible changes - e.g. removing + * parts of the public API, reordering public struct members, etc. The minor + * version number is incremented for backward compatible API changes or major + * new features - e.g. adding a new public function or a new decoder. The micro + * version number is incremented for smaller changes that a calling program + * might still want to check for - e.g. changing behavior in a previously + * unspecified situation. + * + * FFmpeg guarantees backward API and ABI compatibility for each library as long + * as its major version number is unchanged. This means that no public symbols + * will be removed or renamed. Types and names of the public struct members and + * values of public macros and enums will remain the same (unless they were + * explicitly declared as not part of the public API). Documented behavior will + * not change. + * + * In other words, any correct program that works with a given FFmpeg snapshot + * should work just as well without any changes with any later snapshot with the + * same major versions. This applies to both rebuilding the program against new + * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program + * links against. + * + * However, new public symbols may be added and new members may be appended to + * public structs whose size is not part of public ABI (most public structs in + * FFmpeg). New macros and enum values may be added. Behavior in undocumented + * situations may change slightly (and be documented). All those are accompanied + * by an entry in doc/APIchanges and incrementing either the minor or micro + * version number. + */ + +/** + * @defgroup lavu libavutil + * Common code shared across all FFmpeg libraries. + * + * @note + * libavutil is designed to be modular. In most cases, in order to use the + * functions provided by one component of libavutil you must explicitly include + * the specific header containing that feature. If you are only using + * media-related components, you could simply include libavutil/avutil.h, which + * brings in most of the "core" components. + * + * @{ + * + * @defgroup lavu_crypto Crypto and Hashing + * + * @{ + * @} + * + * @defgroup lavu_math Mathematics + * @{ + * + * @} + * + * @defgroup lavu_string String Manipulation + * + * @{ + * + * @} + * + * @defgroup lavu_mem Memory Management + * + * @{ + * + * @} + * + * @defgroup lavu_data Data Structures + * @{ + * + * @} + * + * @defgroup lavu_video Video related + * + * @{ + * + * @} + * + * @defgroup lavu_audio Audio related + * + * @{ + * + * @} + * + * @defgroup lavu_error Error Codes + * + * @{ + * + * @} + * + * @defgroup lavu_log Logging Facility + * + * @{ + * + * @} + * + * @defgroup lavu_misc Other + * + * @{ + * + * @defgroup preproc_misc Preprocessor String Macros + * + * @{ + * + * @} + * + * @defgroup version_utils Library Version Macros + * + * @{ + * + * @} + */ + + +/** + * @addtogroup lavu_ver + * @{ + */ + +/** + * Return the LIBAVUTIL_VERSION_INT constant. + */ +unsigned avutil_version(void); + +/** + * Return an informative version string. This usually is the actual release + * version number or a git commit description. This string has no fixed format + * and can change any time. It should never be parsed by code. + */ +const char *av_version_info(void); + +/** + * Return the libavutil build-time configuration. + */ +const char *avutil_configuration(void); + +/** + * Return the libavutil license. + */ +const char *avutil_license(void); + +/** + * @} + */ + +/** + * @addtogroup lavu_media Media Type + * @brief Media Type + */ + +enum AVMediaType { + AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA + AVMEDIA_TYPE_VIDEO, + AVMEDIA_TYPE_AUDIO, + AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous + AVMEDIA_TYPE_SUBTITLE, + AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse + AVMEDIA_TYPE_NB +}; + +/** + * Return a string describing the media_type enum, NULL if media_type + * is unknown. + */ +const char *av_get_media_type_string(enum AVMediaType media_type); + +/** + * @defgroup lavu_const Constants + * @{ + * + * @defgroup lavu_enc Encoding specific + * + * @note those definition should move to avcodec + * @{ + */ + +#define FF_LAMBDA_SHIFT 7 +#define FF_LAMBDA_SCALE (1< + +/** + * @defgroup lavu_base64 Base64 + * @ingroup lavu_crypto + * @{ + */ + +/** + * Decode a base64-encoded string. + * + * @param out buffer for decoded data + * @param in null-terminated input string + * @param out_size size in bytes of the out buffer, must be at + * least 3/4 of the length of in, that is AV_BASE64_DECODE_SIZE(strlen(in)) + * @return number of bytes written, or a negative value in case of + * invalid input + */ +int av_base64_decode(uint8_t *out, const char *in, int out_size); + +/** + * Calculate the output size in bytes needed to decode a base64 string + * with length x to a data buffer. + */ +#define AV_BASE64_DECODE_SIZE(x) ((x) * 3LL / 4) + +/** + * Encode data to base64 and null-terminate. + * + * @param out buffer for encoded data + * @param out_size size in bytes of the out buffer (including the + * null terminator), must be at least AV_BASE64_SIZE(in_size) + * @param in input buffer containing the data to encode + * @param in_size size in bytes of the in buffer + * @return out or NULL in case of error + */ +char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size); + +/** + * Calculate the output size needed to base64-encode x bytes to a + * null-terminated string. + */ +#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1) + + /** + * @} + */ + +#endif /* AVUTIL_BASE64_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/blowfish.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/blowfish.h new file mode 100644 index 0000000..9e289a4 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/blowfish.h @@ -0,0 +1,82 @@ +/* + * Blowfish algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BLOWFISH_H +#define AVUTIL_BLOWFISH_H + +#include + +/** + * @defgroup lavu_blowfish Blowfish + * @ingroup lavu_crypto + * @{ + */ + +#define AV_BF_ROUNDS 16 + +typedef struct AVBlowfish { + uint32_t p[AV_BF_ROUNDS + 2]; + uint32_t s[4][256]; +} AVBlowfish; + +/** + * Allocate an AVBlowfish context. + */ +AVBlowfish *av_blowfish_alloc(void); + +/** + * Initialize an AVBlowfish context. + * + * @param ctx an AVBlowfish context + * @param key a key + * @param key_len length of the key + */ +void av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param xl left four bytes halves of input to be encrypted + * @param xr right four bytes halves of input to be encrypted + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr, + int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_BLOWFISH_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/bprint.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/bprint.h new file mode 100644 index 0000000..c09b1ac --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/bprint.h @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BPRINT_H +#define AVUTIL_BPRINT_H + +#include + +#include "attributes.h" +#include "avstring.h" + +/** + * Define a structure with extra padding to a fixed size + * This helps ensuring binary compatibility with future versions. + */ + +#define FF_PAD_STRUCTURE(name, size, ...) \ +struct ff_pad_helper_##name { __VA_ARGS__ }; \ +typedef struct name { \ + __VA_ARGS__ \ + char reserved_padding[size - sizeof(struct ff_pad_helper_##name)]; \ +} name; + +/** + * Buffer to print data progressively + * + * The string buffer grows as necessary and is always 0-terminated. + * The content of the string is never accessed, and thus is + * encoding-agnostic and can even hold binary data. + * + * Small buffers are kept in the structure itself, and thus require no + * memory allocation at all (unless the contents of the buffer is needed + * after the structure goes out of scope). This is almost as lightweight as + * declaring a local "char buf[512]". + * + * The length of the string can go beyond the allocated size: the buffer is + * then truncated, but the functions still keep account of the actual total + * length. + * + * In other words, buf->len can be greater than buf->size and records the + * total length of what would have been to the buffer if there had been + * enough memory. + * + * Append operations do not need to be tested for failure: if a memory + * allocation fails, data stop being appended to the buffer, but the length + * is still updated. This situation can be tested with + * av_bprint_is_complete(). + * + * The size_max field determines several possible behaviours: + * + * size_max = -1 (= UINT_MAX) or any large value will let the buffer be + * reallocated as necessary, with an amortized linear cost. + * + * size_max = 0 prevents writing anything to the buffer: only the total + * length is computed. The write operations can then possibly be repeated in + * a buffer with exactly the necessary size + * (using size_init = size_max = len + 1). + * + * size_max = 1 is automatically replaced by the exact size available in the + * structure itself, thus ensuring no dynamic memory allocation. The + * internal buffer is large enough to hold a reasonable paragraph of text, + * such as the current paragraph. + */ + +FF_PAD_STRUCTURE(AVBPrint, 1024, + char *str; /**< string so far */ + unsigned len; /**< length so far */ + unsigned size; /**< allocated memory */ + unsigned size_max; /**< maximum allocated memory */ + char reserved_internal_buffer[1]; +) + +/** + * Convenience macros for special values for av_bprint_init() size_max + * parameter. + */ +#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1) +#define AV_BPRINT_SIZE_AUTOMATIC 1 +#define AV_BPRINT_SIZE_COUNT_ONLY 0 + +/** + * Init a print buffer. + * + * @param buf buffer to init + * @param size_init initial size (including the final 0) + * @param size_max maximum size; + * 0 means do not write anything, just count the length; + * 1 is replaced by the maximum value for automatic storage; + * any large value means that the internal buffer will be + * reallocated as needed up to that limit; -1 is converted to + * UINT_MAX, the largest limit possible. + * Check also AV_BPRINT_SIZE_* macros. + */ +void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max); + +/** + * Init a print buffer using a pre-existing buffer. + * + * The buffer will not be reallocated. + * + * @param buf buffer structure to init + * @param buffer byte buffer to use for the string data + * @param size size of buffer + */ +void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size); + +/** + * Append a formatted string to a print buffer. + */ +void av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Append a formatted string to a print buffer. + */ +void av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg); + +/** + * Append char c n times to a print buffer. + */ +void av_bprint_chars(AVBPrint *buf, char c, unsigned n); + +/** + * Append data to a print buffer. + * + * param buf bprint buffer to use + * param data pointer to data + * param size size of data + */ +void av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size); + +struct tm; +/** + * Append a formatted date and time to a print buffer. + * + * param buf bprint buffer to use + * param fmt date and time format string, see strftime() + * param tm broken-down time structure to translate + * + * @note due to poor design of the standard strftime function, it may + * produce poor results if the format string expands to a very long text and + * the bprint buffer is near the limit stated by the size_max option. + */ +void av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm); + +/** + * Allocate bytes in the buffer for external use. + * + * @param[in] buf buffer structure + * @param[in] size required size + * @param[out] mem pointer to the memory area + * @param[out] actual_size size of the memory area after allocation; + * can be larger or smaller than size + */ +void av_bprint_get_buffer(AVBPrint *buf, unsigned size, + unsigned char **mem, unsigned *actual_size); + +/** + * Reset the string to "" but keep internal allocated data. + */ +void av_bprint_clear(AVBPrint *buf); + +/** + * Test if the print buffer is complete (not truncated). + * + * It may have been truncated due to a memory allocation failure + * or the size_max limit (compare size and size_max if necessary). + */ +static inline int av_bprint_is_complete(const AVBPrint *buf) +{ + return buf->len < buf->size; +} + +/** + * Finalize a print buffer. + * + * The print buffer can no longer be used afterwards, + * but the len and size fields are still valid. + * + * @arg[out] ret_str if not NULL, used to return a permanent copy of the + * buffer contents, or NULL if memory allocation fails; + * if NULL, the buffer is discarded and freed + * @return 0 for success or error code (probably AVERROR(ENOMEM)) + */ +int av_bprint_finalize(AVBPrint *buf, char **ret_str); + +/** + * Escape the content in src and append it to dstbuf. + * + * @param dstbuf already inited destination bprint buffer + * @param src string containing the text to escape + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_* macros + */ +void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +#endif /* AVUTIL_BPRINT_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/bswap.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/bswap.h new file mode 100644 index 0000000..91cb795 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/bswap.h @@ -0,0 +1,109 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * byte swapping routines + */ + +#ifndef AVUTIL_BSWAP_H +#define AVUTIL_BSWAP_H + +#include +#include "libavutil/avconfig.h" +#include "attributes.h" + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_AARCH64 +# include "aarch64/bswap.h" +#elif ARCH_ARM +# include "arm/bswap.h" +#elif ARCH_AVR32 +# include "avr32/bswap.h" +#elif ARCH_SH4 +# include "sh4/bswap.h" +#elif ARCH_X86 +# include "x86/bswap.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff)) +#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16)) +#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32)) + +#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x) + +#ifndef av_bswap16 +static av_always_inline av_const uint16_t av_bswap16(uint16_t x) +{ + x= (x>>8) | (x<<8); + return x; +} +#endif + +#ifndef av_bswap32 +static av_always_inline av_const uint32_t av_bswap32(uint32_t x) +{ + return AV_BSWAP32C(x); +} +#endif + +#ifndef av_bswap64 +static inline uint64_t av_const av_bswap64(uint64_t x) +{ + return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32); +} +#endif + +// be2ne ... big-endian to native-endian +// le2ne ... little-endian to native-endian + +#if AV_HAVE_BIGENDIAN +#define av_be2ne16(x) (x) +#define av_be2ne32(x) (x) +#define av_be2ne64(x) (x) +#define av_le2ne16(x) av_bswap16(x) +#define av_le2ne32(x) av_bswap32(x) +#define av_le2ne64(x) av_bswap64(x) +#define AV_BE2NEC(s, x) (x) +#define AV_LE2NEC(s, x) AV_BSWAPC(s, x) +#else +#define av_be2ne16(x) av_bswap16(x) +#define av_be2ne32(x) av_bswap32(x) +#define av_be2ne64(x) av_bswap64(x) +#define av_le2ne16(x) (x) +#define av_le2ne32(x) (x) +#define av_le2ne64(x) (x) +#define AV_BE2NEC(s, x) AV_BSWAPC(s, x) +#define AV_LE2NEC(s, x) (x) +#endif + +#define AV_BE2NE16C(x) AV_BE2NEC(16, x) +#define AV_BE2NE32C(x) AV_BE2NEC(32, x) +#define AV_BE2NE64C(x) AV_BE2NEC(64, x) +#define AV_LE2NE16C(x) AV_LE2NEC(16, x) +#define AV_LE2NE32C(x) AV_LE2NEC(32, x) +#define AV_LE2NE64C(x) AV_LE2NEC(64, x) + +#endif /* AVUTIL_BSWAP_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/buffer.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/buffer.h new file mode 100644 index 0000000..73b6bd0 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/buffer.h @@ -0,0 +1,291 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_buffer + * refcounted data buffer API + */ + +#ifndef AVUTIL_BUFFER_H +#define AVUTIL_BUFFER_H + +#include + +/** + * @defgroup lavu_buffer AVBuffer + * @ingroup lavu_data + * + * @{ + * AVBuffer is an API for reference-counted data buffers. + * + * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer + * represents the data buffer itself; it is opaque and not meant to be accessed + * by the caller directly, but only through AVBufferRef. However, the caller may + * e.g. compare two AVBuffer pointers to check whether two different references + * are describing the same data buffer. AVBufferRef represents a single + * reference to an AVBuffer and it is the object that may be manipulated by the + * caller directly. + * + * There are two functions provided for creating a new AVBuffer with a single + * reference -- av_buffer_alloc() to just allocate a new buffer, and + * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing + * reference, additional references may be created with av_buffer_ref(). + * Use av_buffer_unref() to free a reference (this will automatically free the + * data once all the references are freed). + * + * The convention throughout this API and the rest of FFmpeg is such that the + * buffer is considered writable if there exists only one reference to it (and + * it has not been marked as read-only). The av_buffer_is_writable() function is + * provided to check whether this is true and av_buffer_make_writable() will + * automatically create a new writable buffer when necessary. + * Of course nothing prevents the calling code from violating this convention, + * however that is safe only when all the existing references are under its + * control. + * + * @note Referencing and unreferencing the buffers is thread-safe and thus + * may be done from multiple threads simultaneously without any need for + * additional locking. + * + * @note Two different references to the same buffer can point to different + * parts of the buffer (i.e. their AVBufferRef.data will not be equal). + */ + +/** + * A reference counted buffer type. It is opaque and is meant to be used through + * references (AVBufferRef). + */ +typedef struct AVBuffer AVBuffer; + +/** + * A reference to a data buffer. + * + * The size of this struct is not a part of the public ABI and it is not meant + * to be allocated directly. + */ +typedef struct AVBufferRef { + AVBuffer *buffer; + + /** + * The data buffer. It is considered writable if and only if + * this is the only reference to the buffer, in which case + * av_buffer_is_writable() returns 1. + */ + uint8_t *data; + /** + * Size of data in bytes. + */ + int size; +} AVBufferRef; + +/** + * Allocate an AVBuffer of the given size using av_malloc(). + * + * @return an AVBufferRef of given size or NULL when out of memory + */ +AVBufferRef *av_buffer_alloc(int size); + +/** + * Same as av_buffer_alloc(), except the returned buffer will be initialized + * to zero. + */ +AVBufferRef *av_buffer_allocz(int size); + +/** + * Always treat the buffer as read-only, even when it has only one + * reference. + */ +#define AV_BUFFER_FLAG_READONLY (1 << 0) + +/** + * Create an AVBuffer from an existing array. + * + * If this function is successful, data is owned by the AVBuffer. The caller may + * only access data through the returned AVBufferRef and references derived from + * it. + * If this function fails, data is left untouched. + * @param data data array + * @param size size of data in bytes + * @param free a callback for freeing this buffer's data + * @param opaque parameter to be got for processing or passed to free + * @param flags a combination of AV_BUFFER_FLAG_* + * + * @return an AVBufferRef referring to data on success, NULL on failure. + */ +AVBufferRef *av_buffer_create(uint8_t *data, int size, + void (*free)(void *opaque, uint8_t *data), + void *opaque, int flags); + +/** + * Default free callback, which calls av_free() on the buffer data. + * This function is meant to be passed to av_buffer_create(), not called + * directly. + */ +void av_buffer_default_free(void *opaque, uint8_t *data); + +/** + * Create a new reference to an AVBuffer. + * + * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on + * failure. + */ +AVBufferRef *av_buffer_ref(AVBufferRef *buf); + +/** + * Free a given reference and automatically free the buffer if there are no more + * references to it. + * + * @param buf the reference to be freed. The pointer is set to NULL on return. + */ +void av_buffer_unref(AVBufferRef **buf); + +/** + * @return 1 if the caller may write to the data referred to by buf (which is + * true if and only if buf is the only reference to the underlying AVBuffer). + * Return 0 otherwise. + * A positive answer is valid until av_buffer_ref() is called on buf. + */ +int av_buffer_is_writable(const AVBufferRef *buf); + +/** + * @return the opaque parameter set by av_buffer_create. + */ +void *av_buffer_get_opaque(const AVBufferRef *buf); + +int av_buffer_get_ref_count(const AVBufferRef *buf); + +/** + * Create a writable reference from a given buffer reference, avoiding data copy + * if possible. + * + * @param buf buffer reference to make writable. On success, buf is either left + * untouched, or it is unreferenced and a new writable AVBufferRef is + * written in its place. On failure, buf is left untouched. + * @return 0 on success, a negative AVERROR on failure. + */ +int av_buffer_make_writable(AVBufferRef **buf); + +/** + * Reallocate a given buffer. + * + * @param buf a buffer reference to reallocate. On success, buf will be + * unreferenced and a new reference with the required size will be + * written in its place. On failure buf will be left untouched. *buf + * may be NULL, then a new buffer is allocated. + * @param size required new buffer size. + * @return 0 on success, a negative AVERROR on failure. + * + * @note the buffer is actually reallocated with av_realloc() only if it was + * initially allocated through av_buffer_realloc(NULL) and there is only one + * reference to it (i.e. the one passed to this function). In all other cases + * a new buffer is allocated and the data is copied. + */ +int av_buffer_realloc(AVBufferRef **buf, int size); + +/** + * @} + */ + +/** + * @defgroup lavu_bufferpool AVBufferPool + * @ingroup lavu_data + * + * @{ + * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers. + * + * Frequently allocating and freeing large buffers may be slow. AVBufferPool is + * meant to solve this in cases when the caller needs a set of buffers of the + * same size (the most obvious use case being buffers for raw video or audio + * frames). + * + * At the beginning, the user must call av_buffer_pool_init() to create the + * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to + * get a reference to a new buffer, similar to av_buffer_alloc(). This new + * reference works in all aspects the same way as the one created by + * av_buffer_alloc(). However, when the last reference to this buffer is + * unreferenced, it is returned to the pool instead of being freed and will be + * reused for subsequent av_buffer_pool_get() calls. + * + * When the caller is done with the pool and no longer needs to allocate any new + * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable. + * Once all the buffers are released, it will automatically be freed. + * + * Allocating and releasing buffers with this API is thread-safe as long as + * either the default alloc callback is used, or the user-supplied one is + * thread-safe. + */ + +/** + * The buffer pool. This structure is opaque and not meant to be accessed + * directly. It is allocated with av_buffer_pool_init() and freed with + * av_buffer_pool_uninit(). + */ +typedef struct AVBufferPool AVBufferPool; + +/** + * Allocate and initialize a buffer pool. + * + * @param size size of each buffer in this pool + * @param alloc a function that will be used to allocate new buffers when the + * pool is empty. May be NULL, then the default allocator will be used + * (av_buffer_alloc()). + * @return newly created buffer pool on success, NULL on error. + */ +AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size)); + +/** + * Allocate and initialize a buffer pool with a more complex allocator. + * + * @param size size of each buffer in this pool + * @param opaque arbitrary user data used by the allocator + * @param alloc a function that will be used to allocate new buffers when the + * pool is empty. + * @param pool_free a function that will be called immediately before the pool + * is freed. I.e. after av_buffer_pool_uninit() is called + * by the caller and all the frames are returned to the pool + * and freed. It is intended to uninitialize the user opaque + * data. + * @return newly created buffer pool on success, NULL on error. + */ +AVBufferPool *av_buffer_pool_init2(int size, void *opaque, + AVBufferRef* (*alloc)(void *opaque, int size), + void (*pool_free)(void *opaque)); + +/** + * Mark the pool as being available for freeing. It will actually be freed only + * once all the allocated buffers associated with the pool are released. Thus it + * is safe to call this function while some of the allocated buffers are still + * in use. + * + * @param pool pointer to the pool to be freed. It will be set to NULL. + */ +void av_buffer_pool_uninit(AVBufferPool **pool); + +/** + * Allocate a new AVBuffer, reusing an old buffer from the pool when available. + * This function may be called simultaneously from multiple threads. + * + * @return a reference to the new buffer on success, NULL on error. + */ +AVBufferRef *av_buffer_pool_get(AVBufferPool *pool); + +/** + * @} + */ + +#endif /* AVUTIL_BUFFER_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/camellia.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/camellia.h new file mode 100644 index 0000000..e674c9b --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/camellia.h @@ -0,0 +1,70 @@ +/* + * An implementation of the CAMELLIA algorithm as mentioned in RFC3713 + * Copyright (c) 2014 Supraja Meedinti + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CAMELLIA_H +#define AVUTIL_CAMELLIA_H + +#include + + +/** + * @file + * @brief Public header for libavutil CAMELLIA algorithm + * @defgroup lavu_camellia CAMELLIA + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_camellia_size; + +struct AVCAMELLIA; + +/** + * Allocate an AVCAMELLIA context + * To free the struct: av_free(ptr) + */ +struct AVCAMELLIA *av_camellia_alloc(void); + +/** + * Initialize an AVCAMELLIA context. + * + * @param ctx an AVCAMELLIA context + * @param key a key of 16, 24, 32 bytes used for encryption/decryption + * @param key_bits number of keybits: possible are 128, 192, 256 + */ +int av_camellia_init(struct AVCAMELLIA *ctx, const uint8_t *key, int key_bits); + +/** + * Encrypt or decrypt a buffer using a previously initialized context + * + * @param ctx an AVCAMELLIA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 16 byte blocks + * @paran iv initialization vector for CBC mode, NULL for ECB mode + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_camellia_crypt(struct AVCAMELLIA *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t* iv, int decrypt); + +/** + * @} + */ +#endif /* AVUTIL_CAMELLIA_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/cast5.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/cast5.h new file mode 100644 index 0000000..ad5b347 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/cast5.h @@ -0,0 +1,80 @@ +/* + * An implementation of the CAST128 algorithm as mentioned in RFC2144 + * Copyright (c) 2014 Supraja Meedinti + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CAST5_H +#define AVUTIL_CAST5_H + +#include + + +/** + * @file + * @brief Public header for libavutil CAST5 algorithm + * @defgroup lavu_cast5 CAST5 + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_cast5_size; + +struct AVCAST5; + +/** + * Allocate an AVCAST5 context + * To free the struct: av_free(ptr) + */ +struct AVCAST5 *av_cast5_alloc(void); +/** + * Initialize an AVCAST5 context. + * + * @param ctx an AVCAST5 context + * @param key a key of 5,6,...16 bytes used for encryption/decryption + * @param key_bits number of keybits: possible are 40,48,...,128 + * @return 0 on success, less than 0 on failure + */ +int av_cast5_init(struct AVCAST5 *ctx, const uint8_t *key, int key_bits); + +/** + * Encrypt or decrypt a buffer using a previously initialized context, ECB mode only + * + * @param ctx an AVCAST5 context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_cast5_crypt(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context + * + * @param ctx an AVCAST5 context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, NULL for ECB mode + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_cast5_crypt2(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); +/** + * @} + */ +#endif /* AVUTIL_CAST5_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/channel_layout.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/channel_layout.h new file mode 100644 index 0000000..50bb8f0 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/channel_layout.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2006 Michael Niedermayer + * Copyright (c) 2008 Peter Ross + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CHANNEL_LAYOUT_H +#define AVUTIL_CHANNEL_LAYOUT_H + +#include + +/** + * @file + * audio channel layout utility functions + */ + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * @defgroup channel_masks Audio channel masks + * + * A channel layout is a 64-bits integer with a bit set for every channel. + * The number of bits set must be equal to the number of channels. + * The value 0 means that the channel layout is not known. + * @note this data structure is not powerful enough to handle channels + * combinations that have the same channel multiple times, such as + * dual-mono. + * + * @{ + */ +#define AV_CH_FRONT_LEFT 0x00000001 +#define AV_CH_FRONT_RIGHT 0x00000002 +#define AV_CH_FRONT_CENTER 0x00000004 +#define AV_CH_LOW_FREQUENCY 0x00000008 +#define AV_CH_BACK_LEFT 0x00000010 +#define AV_CH_BACK_RIGHT 0x00000020 +#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040 +#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080 +#define AV_CH_BACK_CENTER 0x00000100 +#define AV_CH_SIDE_LEFT 0x00000200 +#define AV_CH_SIDE_RIGHT 0x00000400 +#define AV_CH_TOP_CENTER 0x00000800 +#define AV_CH_TOP_FRONT_LEFT 0x00001000 +#define AV_CH_TOP_FRONT_CENTER 0x00002000 +#define AV_CH_TOP_FRONT_RIGHT 0x00004000 +#define AV_CH_TOP_BACK_LEFT 0x00008000 +#define AV_CH_TOP_BACK_CENTER 0x00010000 +#define AV_CH_TOP_BACK_RIGHT 0x00020000 +#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix. +#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT. +#define AV_CH_WIDE_LEFT 0x0000000080000000ULL +#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL +#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL +#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL +#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL + +/** Channel mask value used for AVCodecContext.request_channel_layout + to indicate that the user requests the channel order of the decoder output + to be the native codec channel order. */ +#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL + +/** + * @} + * @defgroup channel_mask_c Audio channel layouts + * @{ + * */ +#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT) +#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_HEXADECAGONAL (AV_CH_LAYOUT_OCTAGONAL|AV_CH_WIDE_LEFT|AV_CH_WIDE_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT) +#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT) + +enum AVMatrixEncoding { + AV_MATRIX_ENCODING_NONE, + AV_MATRIX_ENCODING_DOLBY, + AV_MATRIX_ENCODING_DPLII, + AV_MATRIX_ENCODING_DPLIIX, + AV_MATRIX_ENCODING_DPLIIZ, + AV_MATRIX_ENCODING_DOLBYEX, + AV_MATRIX_ENCODING_DOLBYHEADPHONE, + AV_MATRIX_ENCODING_NB +}; + +/** + * Return a channel layout id that matches name, or 0 if no match is found. + * + * name can be one or several of the following notations, + * separated by '+' or '|': + * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0, + * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix); + * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC, + * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR); + * - a number of channels, in decimal, followed by 'c', yielding + * the default channel layout for that number of channels (@see + * av_get_default_channel_layout); + * - a channel layout mask, in hexadecimal starting with "0x" (see the + * AV_CH_* macros). + * + * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7" + */ +uint64_t av_get_channel_layout(const char *name); + +/** + * Return a channel layout and the number of channels based on the specified name. + * + * This function is similar to (@see av_get_channel_layout), but can also parse + * unknown channel layout specifications. + * + * @param[in] name channel layout specification string + * @param[out] channel_layout parsed channel layout (0 if unknown) + * @param[out] nb_channels number of channels + * + * @return 0 on success, AVERROR(EINVAL) if the parsing fails. + */ +int av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, int* nb_channels); + +/** + * Return a description of a channel layout. + * If nb_channels is <= 0, it is guessed from the channel_layout. + * + * @param buf put here the string containing the channel layout + * @param buf_size size in bytes of the buffer + */ +void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout); + +struct AVBPrint; +/** + * Append a description of a channel layout to a bprint buffer. + */ +void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout); + +/** + * Return the number of channels in the channel layout. + */ +int av_get_channel_layout_nb_channels(uint64_t channel_layout); + +/** + * Return default channel layout for a given number of channels. + */ +int64_t av_get_default_channel_layout(int nb_channels); + +/** + * Get the index of a channel in channel_layout. + * + * @param channel a channel layout describing exactly one channel which must be + * present in channel_layout. + * + * @return index of channel in channel_layout on success, a negative AVERROR + * on error. + */ +int av_get_channel_layout_channel_index(uint64_t channel_layout, + uint64_t channel); + +/** + * Get the channel with the given index in channel_layout. + */ +uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index); + +/** + * Get the name of a given channel. + * + * @return channel name on success, NULL on error. + */ +const char *av_get_channel_name(uint64_t channel); + +/** + * Get the description of a given channel. + * + * @param channel a channel layout with a single channel + * @return channel description on success, NULL on error + */ +const char *av_get_channel_description(uint64_t channel); + +/** + * Get the value and name of a standard channel layout. + * + * @param[in] index index in an internal list, starting at 0 + * @param[out] layout channel layout mask + * @param[out] name name of the layout + * @return 0 if the layout exists, + * <0 if index is beyond the limits + */ +int av_get_standard_channel_layout(unsigned index, uint64_t *layout, + const char **name); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_CHANNEL_LAYOUT_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/common.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/common.h new file mode 100644 index 0000000..8142b31 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/common.h @@ -0,0 +1,530 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * common internal and external API header + */ + +#ifndef AVUTIL_COMMON_H +#define AVUTIL_COMMON_H + +#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && !defined(UINT64_C) +#error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "attributes.h" +#include "macros.h" +#include "version.h" +#include "libavutil/avconfig.h" + +#if AV_HAVE_BIGENDIAN +# define AV_NE(be, le) (be) +#else +# define AV_NE(be, le) (le) +#endif + +//rounded division & shift +#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b)) +/* assume b>0 */ +#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) +/* Fast a/(1<=0 and b>=0 */ +#define AV_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \ + : ((a) + (1<<(b)) - 1) >> (b)) +/* Backwards compat. */ +#define FF_CEIL_RSHIFT AV_CEIL_RSHIFT + +#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b)) +#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b)) + +/** + * Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they + * are not representable as absolute values of their type. This is the same + * as with *abs() + * @see FFNABS() + */ +#define FFABS(a) ((a) >= 0 ? (a) : (-(a))) +#define FFSIGN(a) ((a) > 0 ? 1 : -1) + +/** + * Negative Absolute value. + * this works for all integers of all types. + * As with many macros, this evaluates its argument twice, it thus must not have + * a sideeffect, that is FFNABS(x++) has undefined behavior. + */ +#define FFNABS(a) ((a) <= 0 ? (a) : (-(a))) + +/** + * Comparator. + * For two numerical expressions x and y, gives 1 if x > y, -1 if x < y, and 0 + * if x == y. This is useful for instance in a qsort comparator callback. + * Furthermore, compilers are able to optimize this to branchless code, and + * there is no risk of overflow with signed types. + * As with many macros, this evaluates its argument multiple times, it thus + * must not have a side-effect. + */ +#define FFDIFFSIGN(x,y) (((x)>(y)) - ((x)<(y))) + +#define FFMAX(a,b) ((a) > (b) ? (a) : (b)) +#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c) +#define FFMIN(a,b) ((a) > (b) ? (b) : (a)) +#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c) + +#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0) +#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0])) + +/* misc math functions */ + +#ifdef HAVE_AV_CONFIG_H +# include "config.h" +# include "intmath.h" +#endif + +/* Pull in unguarded fallback defines at the end of this file. */ +#include "common.h" + +#ifndef av_log2 +av_const int av_log2(unsigned v); +#endif + +#ifndef av_log2_16bit +av_const int av_log2_16bit(unsigned v); +#endif + +/** + * Clip a signed integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int av_clip_c(int a, int amin, int amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed 64bit integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed integer value into the 0-255 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint8_t av_clip_uint8_c(int a) +{ + if (a&(~0xFF)) return (-a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -128,127 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int8_t av_clip_int8_c(int a) +{ + if ((a+0x80U) & ~0xFF) return (a>>31) ^ 0x7F; + else return a; +} + +/** + * Clip a signed integer value into the 0-65535 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint16_t av_clip_uint16_c(int a) +{ + if (a&(~0xFFFF)) return (-a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -32768,32767 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int16_t av_clip_int16_c(int a) +{ + if ((a+0x8000U) & ~0xFFFF) return (a>>31) ^ 0x7FFF; + else return a; +} + +/** + * Clip a signed 64-bit integer value into the -2147483648,2147483647 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a) +{ + if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF); + else return (int32_t)a; +} + +/** + * Clip a signed integer into the -(2^p),(2^p-1) range. + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const int av_clip_intp2_c(int a, int p) +{ + if (((unsigned)a + (1 << p)) & ~((2 << p) - 1)) + return (a >> 31) ^ ((1 << p) - 1); + else + return a; +} + +/** + * Clip a signed integer to an unsigned power of two range. + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p) +{ + if (a & ~((1<> 31 & ((1<= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a double value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const double av_clipd_c(double a, double amin, double amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** Compute ceil(log2(x)). + * @param x value used to compute ceil(log2(x)) + * @return computed ceiling of log2(x) + */ +static av_always_inline av_const int av_ceil_log2_c(int x) +{ + return av_log2((x - 1) << 1); +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount_c(uint32_t x) +{ + x -= (x >> 1) & 0x55555555; + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x + (x >> 4)) & 0x0F0F0F0F; + x += x >> 8; + return (x + (x >> 16)) & 0x3F; +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount64_c(uint64_t x) +{ + return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32)); +} + +static av_always_inline av_const int av_parity_c(uint32_t v) +{ + return av_popcount(v) & 1; +} + +#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24)) +#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) + +/** + * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_BYTE Expression reading one byte from the input. + * Evaluated up to 7 times (4 for the currently + * assigned Unicode range). With a memory buffer + * input, this could be *ptr++. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + * + * @warning ERROR should not contain a loop control statement which + * could interact with the internal while loop, and should force an + * exit from the macro code (e.g. through a goto or a return) in order + * to prevent undefined results. + */ +#define GET_UTF8(val, GET_BYTE, ERROR)\ + val= (GET_BYTE);\ + {\ + uint32_t top = (val & 128) >> 1;\ + if ((val & 0xc0) == 0x80 || val >= 0xFE)\ + ERROR\ + while (val & top) {\ + int tmp= (GET_BYTE) - 128;\ + if(tmp>>6)\ + ERROR\ + val= (val<<6) + tmp;\ + top <<= 5;\ + }\ + val &= (top << 1) - 1;\ + } + +/** + * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_16BIT Expression returning two bytes of UTF-16 data converted + * to native byte order. Evaluated one or two times. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + */ +#define GET_UTF16(val, GET_16BIT, ERROR)\ + val = GET_16BIT;\ + {\ + unsigned int hi = val - 0xD800;\ + if (hi < 0x800) {\ + val = GET_16BIT - 0xDC00;\ + if (val > 0x3FFU || hi > 0x3FFU)\ + ERROR\ + val += (hi<<10) + 0x10000;\ + }\ + }\ + +/** + * @def PUT_UTF8(val, tmp, PUT_BYTE) + * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint8_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_BYTE. + * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination. + * It could be a function or a statement, and uses tmp as the input byte. + * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be + * executed up to 4 times for values in the valid UTF-8 range and up to + * 7 times in the general case, depending on the length of the converted + * Unicode character. + */ +#define PUT_UTF8(val, tmp, PUT_BYTE)\ + {\ + int bytes, shift;\ + uint32_t in = val;\ + if (in < 0x80) {\ + tmp = in;\ + PUT_BYTE\ + } else {\ + bytes = (av_log2(in) + 4) / 5;\ + shift = (bytes - 1) * 6;\ + tmp = (256 - (256 >> bytes)) | (in >> shift);\ + PUT_BYTE\ + while (shift >= 6) {\ + shift -= 6;\ + tmp = 0x80 | ((in >> shift) & 0x3f);\ + PUT_BYTE\ + }\ + }\ + } + +/** + * @def PUT_UTF16(val, tmp, PUT_16BIT) + * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint16_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_16BIT. + * @param PUT_16BIT writes the converted UTF-16 data to any proper destination + * in desired endianness. It could be a function or a statement, and uses tmp + * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;" + * PUT_BYTE will be executed 1 or 2 times depending on input character. + */ +#define PUT_UTF16(val, tmp, PUT_16BIT)\ + {\ + uint32_t in = val;\ + if (in < 0x10000) {\ + tmp = in;\ + PUT_16BIT\ + } else {\ + tmp = 0xD800 | ((in - 0x10000) >> 10);\ + PUT_16BIT\ + tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\ + PUT_16BIT\ + }\ + }\ + + + +#include "mem.h" + +#ifdef HAVE_AV_CONFIG_H +# include "internal.h" +#endif /* HAVE_AV_CONFIG_H */ + +#endif /* AVUTIL_COMMON_H */ + +/* + * The following definitions are outside the multiple inclusion guard + * to ensure they are immediately available in intmath.h. + */ + +#ifndef av_ceil_log2 +# define av_ceil_log2 av_ceil_log2_c +#endif +#ifndef av_clip +# define av_clip av_clip_c +#endif +#ifndef av_clip64 +# define av_clip64 av_clip64_c +#endif +#ifndef av_clip_uint8 +# define av_clip_uint8 av_clip_uint8_c +#endif +#ifndef av_clip_int8 +# define av_clip_int8 av_clip_int8_c +#endif +#ifndef av_clip_uint16 +# define av_clip_uint16 av_clip_uint16_c +#endif +#ifndef av_clip_int16 +# define av_clip_int16 av_clip_int16_c +#endif +#ifndef av_clipl_int32 +# define av_clipl_int32 av_clipl_int32_c +#endif +#ifndef av_clip_intp2 +# define av_clip_intp2 av_clip_intp2_c +#endif +#ifndef av_clip_uintp2 +# define av_clip_uintp2 av_clip_uintp2_c +#endif +#ifndef av_mod_uintp2 +# define av_mod_uintp2 av_mod_uintp2_c +#endif +#ifndef av_sat_add32 +# define av_sat_add32 av_sat_add32_c +#endif +#ifndef av_sat_dadd32 +# define av_sat_dadd32 av_sat_dadd32_c +#endif +#ifndef av_clipf +# define av_clipf av_clipf_c +#endif +#ifndef av_clipd +# define av_clipd av_clipd_c +#endif +#ifndef av_popcount +# define av_popcount av_popcount_c +#endif +#ifndef av_popcount64 +# define av_popcount64 av_popcount64_c +#endif +#ifndef av_parity +# define av_parity av_parity_c +#endif diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/cpu.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/cpu.h new file mode 100644 index 0000000..de05593 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/cpu.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2000, 2001, 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CPU_H +#define AVUTIL_CPU_H + +#include "attributes.h" + +#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */ + + /* lower 16 bits - CPU features */ +#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX +#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW +#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions +#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions +#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt +#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions +#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions +#define AV_CPU_FLAG_SSSE3SLOW 0x4000000 ///< SSSE3 supported, but usually not faster +#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower +#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions +#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions +#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions +#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_AVXSLOW 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g. Bulldozer) +#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions +#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions +#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction +#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions +#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1 +#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2 + +#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard +#define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06 +#define AV_CPU_FLAG_POWER8 0x0004 ///< ISA 2.07 + +#define AV_CPU_FLAG_ARMV5TE (1 << 0) +#define AV_CPU_FLAG_ARMV6 (1 << 1) +#define AV_CPU_FLAG_ARMV6T2 (1 << 2) +#define AV_CPU_FLAG_VFP (1 << 3) +#define AV_CPU_FLAG_VFPV3 (1 << 4) +#define AV_CPU_FLAG_NEON (1 << 5) +#define AV_CPU_FLAG_ARMV8 (1 << 6) +#define AV_CPU_FLAG_VFP_VM (1 << 7) ///< VFPv2 vector mode, deprecated in ARMv7-A and unavailable in various CPUs implementations +#define AV_CPU_FLAG_SETEND (1 <<16) + +/** + * Return the flags which specify extensions supported by the CPU. + * The returned value is affected by av_force_cpu_flags() if that was used + * before. So av_get_cpu_flags() can easily be used in an application to + * detect the enabled cpu flags. + */ +int av_get_cpu_flags(void); + +/** + * Disables cpu detection and forces the specified flags. + * -1 is a special case that disables forcing of specific flags. + */ +void av_force_cpu_flags(int flags); + +/** + * Set a mask on flags returned by av_get_cpu_flags(). + * This function is mainly useful for testing. + * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible + */ +attribute_deprecated void av_set_cpu_flags_mask(int mask); + +/** + * Parse CPU flags from a string. + * + * The returned flags contain the specified flags as well as related unspecified flags. + * + * This function exists only for compatibility with libav. + * Please use av_parse_cpu_caps() when possible. + * @return a combination of AV_CPU_* flags, negative on error. + */ +attribute_deprecated +int av_parse_cpu_flags(const char *s); + +/** + * Parse CPU caps from a string and update the given AV_CPU_* flags based on that. + * + * @return negative on error. + */ +int av_parse_cpu_caps(unsigned *flags, const char *s); + +/** + * @return the number of logical CPU cores present. + */ +int av_cpu_count(void); + +#endif /* AVUTIL_CPU_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/crc.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/crc.h new file mode 100644 index 0000000..2a1b0d7 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/crc.h @@ -0,0 +1,103 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_crc32 + * Public header for CRC hash function implementation. + */ + +#ifndef AVUTIL_CRC_H +#define AVUTIL_CRC_H + +#include +#include +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_crc32 CRC + * @ingroup lavu_hash + * CRC (Cyclic Redundancy Check) hash function implementation. + * + * This module supports numerous CRC polynomials, in addition to the most + * widely used CRC-32-IEEE. See @ref AVCRCId for a list of available + * polynomials. + * + * @{ + */ + +typedef uint32_t AVCRC; + +typedef enum { + AV_CRC_8_ATM, + AV_CRC_16_ANSI, + AV_CRC_16_CCITT, + AV_CRC_32_IEEE, + AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */ + AV_CRC_16_ANSI_LE, /*< reversed bitorder version of AV_CRC_16_ANSI */ +#if FF_API_CRC_BIG_TABLE + AV_CRC_24_IEEE = 12, +#else + AV_CRC_24_IEEE, +#endif /* FF_API_CRC_BIG_TABLE */ + AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */ +}AVCRCId; + +/** + * Initialize a CRC table. + * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024 + * @param le If 1, the lowest bit represents the coefficient for the highest + * exponent of the corresponding polynomial (both for poly and + * actual CRC). + * If 0, you must swap the CRC parameter and the result of av_crc + * if you need the standard representation (can be simplified in + * most cases to e.g. bswap16): + * av_bswap32(crc << (32-bits)) + * @param bits number of bits for the CRC + * @param poly generator polynomial without the x**bits coefficient, in the + * representation as specified by le + * @param ctx_size size of ctx in bytes + * @return <0 on failure + */ +int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size); + +/** + * Get an initialized standard CRC table. + * @param crc_id ID of a standard CRC + * @return a pointer to the CRC table or NULL on failure + */ +const AVCRC *av_crc_get_table(AVCRCId crc_id); + +/** + * Calculate the CRC of a block. + * @param crc CRC of previous blocks if any or initial value for CRC + * @return CRC updated with the data from the given block + * + * @see av_crc_init() "le" parameter + */ +uint32_t av_crc(const AVCRC *ctx, uint32_t crc, + const uint8_t *buffer, size_t length) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_CRC_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/des.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/des.h new file mode 100644 index 0000000..4cf11f5 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/des.h @@ -0,0 +1,77 @@ +/* + * DES encryption/decryption + * Copyright (c) 2007 Reimar Doeffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_DES_H +#define AVUTIL_DES_H + +#include + +/** + * @defgroup lavu_des DES + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVDES { + uint64_t round_keys[3][16]; + int triple_des; +} AVDES; + +/** + * Allocate an AVDES context. + */ +AVDES *av_des_alloc(void); + +/** + * @brief Initializes an AVDES context. + * + * @param key_bits must be 64 or 192 + * @param decrypt 0 for encryption/CBC-MAC, 1 for decryption + * @return zero on success, negative value otherwise + */ +int av_des_init(struct AVDES *d, const uint8_t *key, int key_bits, int decrypt); + +/** + * @brief Encrypts / decrypts using the DES algorithm. + * + * @param count number of 8 byte blocks + * @param dst destination array, can be equal to src, must be 8-byte aligned + * @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL + * @param iv initialization vector for CBC mode, if NULL then ECB will be used, + * must be 8-byte aligned + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_des_crypt(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @brief Calculates CBC-MAC using the DES algorithm. + * + * @param count number of 8 byte blocks + * @param dst destination array, can be equal to src, must be 8-byte aligned + * @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL + */ +void av_des_mac(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count); + +/** + * @} + */ + +#endif /* AVUTIL_DES_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/dict.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/dict.h new file mode 100644 index 0000000..9d6d8df --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/dict.h @@ -0,0 +1,210 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Public dictionary API. + * @deprecated + * AVDictionary is provided for compatibility with libav. It is both in + * implementation as well as API inefficient. It does not scale and is + * extremely slow with large dictionaries. + * It is recommended that new code uses our tree container from tree.c/h + * where applicable, which uses AVL trees to achieve O(log n) performance. + */ + +#ifndef AVUTIL_DICT_H +#define AVUTIL_DICT_H + +#include + +#include "version.h" + +/** + * @addtogroup lavu_dict AVDictionary + * @ingroup lavu_data + * + * @brief Simple key:value store + * + * @{ + * Dictionaries are used for storing key:value pairs. To create + * an AVDictionary, simply pass an address of a NULL pointer to + * av_dict_set(). NULL can be used as an empty dictionary wherever + * a pointer to an AVDictionary is required. + * Use av_dict_get() to retrieve an entry or iterate over all + * entries and finally av_dict_free() to free the dictionary + * and all its contents. + * + @code + AVDictionary *d = NULL; // "create" an empty dictionary + AVDictionaryEntry *t = NULL; + + av_dict_set(&d, "foo", "bar", 0); // add an entry + + char *k = av_strdup("key"); // if your strings are already allocated, + char *v = av_strdup("value"); // you can avoid copying them like this + av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL); + + while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) { + <....> // iterate over all entries in d + } + av_dict_free(&d); + @endcode + */ + +#define AV_DICT_MATCH_CASE 1 /**< Only get an entry with exact-case key match. Only relevant in av_dict_get(). */ +#define AV_DICT_IGNORE_SUFFIX 2 /**< Return first entry in a dictionary whose first part corresponds to the search key, + ignoring the suffix of the found key string. Only relevant in av_dict_get(). */ +#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been + allocated with av_malloc() or another memory allocation function. */ +#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been + allocated with av_malloc() or another memory allocation function. */ +#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries. +#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no + delimiter is added, the strings are simply concatenated. */ +#define AV_DICT_MULTIKEY 64 /**< Allow to store several equal keys in the dictionary */ + +typedef struct AVDictionaryEntry { + char *key; + char *value; +} AVDictionaryEntry; + +typedef struct AVDictionary AVDictionary; + +/** + * Get a dictionary entry with matching key. + * + * The returned entry key or value must not be changed, or it will + * cause undefined behavior. + * + * To iterate through all the dictionary entries, you can set the matching key + * to the null string "" and set the AV_DICT_IGNORE_SUFFIX flag. + * + * @param prev Set to the previous matching element to find the next. + * If set to NULL the first matching element is returned. + * @param key matching key + * @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved + * @return found entry or NULL in case no matching entry was found in the dictionary + */ +AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key, + const AVDictionaryEntry *prev, int flags); + +/** + * Get number of entries in dictionary. + * + * @param m dictionary + * @return number of entries in dictionary + */ +int av_dict_count(const AVDictionary *m); + +/** + * Set the given entry in *pm, overwriting an existing entry. + * + * Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set, + * these arguments will be freed on error. + * + * Warning: Adding a new entry to a dictionary invalidates all existing entries + * previously returned with av_dict_get. + * + * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL + * a dictionary struct is allocated and put in *pm. + * @param key entry key to add to *pm (will either be av_strduped or added as a new key depending on flags) + * @param value entry value to add to *pm (will be av_strduped or added as a new key depending on flags). + * Passing a NULL value will cause an existing entry to be deleted. + * @return >= 0 on success otherwise an error code <0 + */ +int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags); + +/** + * Convenience wrapper for av_dict_set that converts the value to a string + * and stores it. + * + * Note: If AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error. + */ +int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags); + +/** + * Convenience wrapper for av_dict_get that converts the value to a pointer + * and stores it. + * + */ +int av_dict_set_intptr(AVDictionary **pm, const char *key, uintptr_t value, int flags); +uintptr_t av_dict_get_intptr(const AVDictionary *m, const char* key); +uintptr_t av_dict_strtoptr(char * value); +char * av_dict_ptrtostr(uintptr_t value); + +/** + * Parse the key/value pairs list and add the parsed entries to a dictionary. + * + * In case of failure, all the successfully set entries are stored in + * *pm. You may need to manually free the created dictionary. + * + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @param flags flags to use when adding to dictionary. + * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL + * are ignored since the key/value tokens will always + * be duplicated. + * @return 0 on success, negative AVERROR code on failure + */ +int av_dict_parse_string(AVDictionary **pm, const char *str, + const char *key_val_sep, const char *pairs_sep, + int flags); + +/** + * Copy entries from one AVDictionary struct into another. + * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL, + * this function will allocate a struct for you and put it in *dst + * @param src pointer to source AVDictionary struct + * @param flags flags to use when setting entries in *dst + * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag + * @return 0 on success, negative AVERROR code on failure. If dst was allocated + * by this function, callers should free the associated memory. + */ +int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags); + +/** + * Free all the memory allocated for an AVDictionary struct + * and all keys and values. + */ +void av_dict_free(AVDictionary **m); + +/** + * Get dictionary entries as a string. + * + * Create a string containing dictionary's entries. + * Such string may be passed back to av_dict_parse_string(). + * @note String is escaped with backslashes ('\'). + * + * @param[in] m dictionary + * @param[out] buffer Pointer to buffer that will be allocated with string containg entries. + * Buffer must be freed by the caller when is no longer needed. + * @param[in] key_val_sep character used to separate key from value + * @param[in] pairs_sep character used to separate two pairs from each other + * @return >= 0 on success, negative on error + * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same. + */ +int av_dict_get_string(const AVDictionary *m, char **buffer, + const char key_val_sep, const char pairs_sep); + +/** + * @} + */ + +#endif /* AVUTIL_DICT_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/display.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/display.h new file mode 100644 index 0000000..39c15ee --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/display.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2014 Vittorio Giovara + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_DISPLAY_H +#define AVUTIL_DISPLAY_H + +#include +#include "common.h" + +/** + * The display transformation matrix specifies an affine transformation that + * should be applied to video frames for correct presentation. It is compatible + * with the matrices stored in the ISO/IEC 14496-12 container format. + * + * The data is a 3x3 matrix represented as a 9-element array: + * + * | a b u | + * (a, b, u, c, d, v, x, y, w) -> | c d v | + * | x y w | + * + * All numbers are stored in native endianness, as 16.16 fixed-point values, + * except for u, v and w, which are stored as 2.30 fixed-point values. + * + * The transformation maps a point (p, q) in the source (pre-transformation) + * frame to the point (p', q') in the destination (post-transformation) frame as + * follows: + * | a b u | + * (p, q, 1) . | c d v | = z * (p', q', 1) + * | x y w | + * + * The transformation can also be more explicitly written in components as + * follows: + * p' = (a * p + c * q + x) / z; + * q' = (b * p + d * q + y) / z; + * z = u * p + v * q + w + */ + +/** + * Extract the rotation component of the transformation matrix. + * + * @param matrix the transformation matrix + * @return the angle (in degrees) by which the transformation rotates the frame + * counterclockwise. The angle will be in range [-180.0, 180.0], + * or NaN if the matrix is singular. + * + * @note floating point numbers are inherently inexact, so callers are + * recommended to round the return value to nearest integer before use. + */ +double av_display_rotation_get(const int32_t matrix[9]); + +/** + * Initialize a transformation matrix describing a pure counterclockwise + * rotation by the specified angle (in degrees). + * + * @param matrix an allocated transformation matrix (will be fully overwritten + * by this function) + * @param angle rotation angle in degrees. + */ +void av_display_rotation_set(int32_t matrix[9], double angle); + +/** + * Flip the input matrix horizontally and/or vertically. + * + * @param matrix an allocated transformation matrix + * @param hflip whether the matrix should be flipped horizontally + * @param vflip whether the matrix should be flipped vertically + */ +void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip); + +#endif /* AVUTIL_DISPLAY_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/dns_cache.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/dns_cache.h new file mode 100644 index 0000000..df99c2b --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/dns_cache.h @@ -0,0 +1,41 @@ +/* + * copyright (c) 2017 Raymond Zheng + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_DNS_CACHE_H +#define AVUTIL_DNS_CACHE_H + +#include "libavutil/log.h" + +typedef struct DnsCacheEntry { + volatile int ref_count; + volatile int delete_flag; + int64_t expired_time; + struct addrinfo *res; // construct by private function, not support ai_next and ai_canonname, can only be released using free_private_addrinfo +} DnsCacheEntry; + +DnsCacheEntry *get_dns_cache_reference(char *hostname); +int release_dns_cache_reference(char *hostname, DnsCacheEntry **p_entry); +int remove_dns_cache_entry(char *hostname); +int add_dns_cache_entry(char *hostname, struct addrinfo *cur_ai, int64_t timeout); +DnsCacheEntry *get_dns_cache_reference_no_remove(char *hostname, int *expired); +int update_dns_cache_nonblock(const char *hostname, const char *service, + const struct addrinfo *hints, int dns_cache_timeout); + +#endif /* AVUTIL_DNS_CACHE_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/downmix_info.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/downmix_info.h new file mode 100644 index 0000000..221cf5b --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/downmix_info.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2014 Tim Walker + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_DOWNMIX_INFO_H +#define AVUTIL_DOWNMIX_INFO_H + +#include "frame.h" + +/** + * @file + * audio downmix medatata + */ + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * @defgroup downmix_info Audio downmix metadata + * @{ + */ + +/** + * Possible downmix types. + */ +enum AVDownmixType { + AV_DOWNMIX_TYPE_UNKNOWN, /**< Not indicated. */ + AV_DOWNMIX_TYPE_LORO, /**< Lo/Ro 2-channel downmix (Stereo). */ + AV_DOWNMIX_TYPE_LTRT, /**< Lt/Rt 2-channel downmix, Dolby Surround compatible. */ + AV_DOWNMIX_TYPE_DPLII, /**< Lt/Rt 2-channel downmix, Dolby Pro Logic II compatible. */ + AV_DOWNMIX_TYPE_NB /**< Number of downmix types. Not part of ABI. */ +}; + +/** + * This structure describes optional metadata relevant to a downmix procedure. + * + * All fields are set by the decoder to the value indicated in the audio + * bitstream (if present), or to a "sane" default otherwise. + */ +typedef struct AVDownmixInfo { + /** + * Type of downmix preferred by the mastering engineer. + */ + enum AVDownmixType preferred_downmix_type; + + /** + * Absolute scale factor representing the nominal level of the center + * channel during a regular downmix. + */ + double center_mix_level; + + /** + * Absolute scale factor representing the nominal level of the center + * channel during an Lt/Rt compatible downmix. + */ + double center_mix_level_ltrt; + + /** + * Absolute scale factor representing the nominal level of the surround + * channels during a regular downmix. + */ + double surround_mix_level; + + /** + * Absolute scale factor representing the nominal level of the surround + * channels during an Lt/Rt compatible downmix. + */ + double surround_mix_level_ltrt; + + /** + * Absolute scale factor representing the level at which the LFE data is + * mixed into L/R channels during downmixing. + */ + double lfe_mix_level; +} AVDownmixInfo; + +/** + * Get a frame's AV_FRAME_DATA_DOWNMIX_INFO side data for editing. + * + * If the side data is absent, it is created and added to the frame. + * + * @param frame the frame for which the side data is to be obtained or created + * + * @return the AVDownmixInfo structure to be edited by the caller, or NULL if + * the structure cannot be allocated. + */ +AVDownmixInfo *av_downmix_info_update_side_data(AVFrame *frame); + +/** + * @} + */ + +/** + * @} + */ + +#endif /* AVUTIL_DOWNMIX_INFO_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/error.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/error.h new file mode 100644 index 0000000..71df4da --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/error.h @@ -0,0 +1,126 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * error code definitions + */ + +#ifndef AVUTIL_ERROR_H +#define AVUTIL_ERROR_H + +#include +#include + +/** + * @addtogroup lavu_error + * + * @{ + */ + + +/* error handling */ +#if EDOM > 0 +#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions. +#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value. +#else +/* Some platforms have E* and errno already negated. */ +#define AVERROR(e) (e) +#define AVUNERROR(e) (e) +#endif + +#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d)) + +#define AVERROR_BSF_NOT_FOUND FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found +#define AVERROR_BUG FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2 +#define AVERROR_BUFFER_TOO_SMALL FFERRTAG( 'B','U','F','S') ///< Buffer too small +#define AVERROR_DECODER_NOT_FOUND FFERRTAG(0xF8,'D','E','C') ///< Decoder not found +#define AVERROR_DEMUXER_NOT_FOUND FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found +#define AVERROR_ENCODER_NOT_FOUND FFERRTAG(0xF8,'E','N','C') ///< Encoder not found +#define AVERROR_EOF FFERRTAG( 'E','O','F',' ') ///< End of file +#define AVERROR_EXIT FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted +#define AVERROR_EXTERNAL FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library +#define AVERROR_FILTER_NOT_FOUND FFERRTAG(0xF8,'F','I','L') ///< Filter not found +#define AVERROR_INVALIDDATA FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input +#define AVERROR_MUXER_NOT_FOUND FFERRTAG(0xF8,'M','U','X') ///< Muxer not found +#define AVERROR_OPTION_NOT_FOUND FFERRTAG(0xF8,'O','P','T') ///< Option not found +#define AVERROR_PATCHWELCOME FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome +#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found + +#define AVERROR_STREAM_NOT_FOUND FFERRTAG(0xF8,'S','T','R') ///< Stream not found +/** + * This is semantically identical to AVERROR_BUG + * it has been introduced in Libav after our AVERROR_BUG and with a modified value. + */ +#define AVERROR_BUG2 FFERRTAG( 'B','U','G',' ') +#define AVERROR_UNKNOWN FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library +#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it. +#define AVERROR_INPUT_CHANGED (-0x636e6701) ///< Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED) +#define AVERROR_OUTPUT_CHANGED (-0x636e6702) ///< Output changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_INPUT_CHANGED) +/* HTTP & RTSP errors */ +#define AVERROR_HTTP_BAD_REQUEST FFERRTAG(0xF8,'4','0','0') +#define AVERROR_HTTP_UNAUTHORIZED FFERRTAG(0xF8,'4','0','1') +#define AVERROR_HTTP_FORBIDDEN FFERRTAG(0xF8,'4','0','3') +#define AVERROR_HTTP_NOT_FOUND FFERRTAG(0xF8,'4','0','4') +#define AVERROR_HTTP_OTHER_4XX FFERRTAG(0xF8,'4','X','X') +#define AVERROR_HTTP_SERVER_ERROR FFERRTAG(0xF8,'5','X','X') + +#define AV_ERROR_MAX_STRING_SIZE 64 + +/** + * Put a description of the AVERROR code errnum in errbuf. + * In case of failure the global variable errno is set to indicate the + * error. Even in case of failure av_strerror() will print a generic + * error message indicating the errnum provided to errbuf. + * + * @param errnum error code to describe + * @param errbuf buffer to which description is written + * @param errbuf_size the size in bytes of errbuf + * @return 0 on success, a negative value if a description for errnum + * cannot be found + */ +int av_strerror(int errnum, char *errbuf, size_t errbuf_size); + +/** + * Fill the provided buffer with a string containing an error string + * corresponding to the AVERROR code errnum. + * + * @param errbuf a buffer + * @param errbuf_size size in bytes of errbuf + * @param errnum error code to describe + * @return the buffer in input, filled with the error description + * @see av_strerror() + */ +static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum) +{ + av_strerror(errnum, errbuf, errbuf_size); + return errbuf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_err2str(errnum) \ + av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum) + +/** + * @} + */ + +#endif /* AVUTIL_ERROR_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/eval.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/eval.h new file mode 100644 index 0000000..dacd22b --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/eval.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2002 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple arithmetic expression evaluator + */ + +#ifndef AVUTIL_EVAL_H +#define AVUTIL_EVAL_H + +#include "avutil.h" + +typedef struct AVExpr AVExpr; + +/** + * Parse and evaluate an expression. + * Note, this is significantly slower than av_expr_eval(). + * + * @param res a pointer to a double where is put the result value of + * the expression, or NAN in case of error + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param const_values a zero terminated array of values for the identifiers from const_names + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_expr_parse_and_eval(double *res, const char *s, + const char * const *const_names, const double *const_values, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + void *opaque, int log_offset, void *log_ctx); + +/** + * Parse an expression. + * + * @param expr a pointer where is put an AVExpr containing the parsed + * value in case of successful parsing, or NULL otherwise. + * The pointed to AVExpr must be freed with av_expr_free() by the user + * when it is not needed anymore. + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_expr_parse(AVExpr **expr, const char *s, + const char * const *const_names, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + int log_offset, void *log_ctx); + +/** + * Evaluate a previously parsed expression. + * + * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @return the value of the expression + */ +double av_expr_eval(AVExpr *e, const double *const_values, void *opaque); + +/** + * Free a parsed expression previously created with av_expr_parse(). + */ +void av_expr_free(AVExpr *e); + +/** + * Parse the string in numstr and return its value as a double. If + * the string is empty, contains only whitespaces, or does not contain + * an initial substring that has the expected syntax for a + * floating-point number, no conversion is performed. In this case, + * returns a value of zero and the value returned in tail is the value + * of numstr. + * + * @param numstr a string representing a number, may contain one of + * the International System number postfixes, for example 'K', 'M', + * 'G'. If 'i' is appended after the postfix, powers of 2 are used + * instead of powers of 10. The 'B' postfix multiplies the value by + * 8, and can be appended after another postfix or used alone. This + * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix. + * @param tail if non-NULL puts here the pointer to the char next + * after the last parsed character + */ +double av_strtod(const char *numstr, char **tail); + +#endif /* AVUTIL_EVAL_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/ffversion.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/ffversion.h new file mode 100644 index 0000000..01b6f7b --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/ffversion.h @@ -0,0 +1,42 @@ +/* + * ffversion.h + * + * Copyright (c) 2013 Bilibili + * Copyright (c) 2013 Zhang Rui + * + * This file is part of ijkPlayer. + * + * ijkPlayer is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * ijkPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with ijkPlayer; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#if defined(__aarch64__) +# include "arm64/ffversion.h" +#elif defined(__x86_64__) +# include "x86_64/ffversion.h" +#elif defined(__arm__) + +# if defined(__ARM_ARCH_7S__) +# include "armv7s/ffversion.h" +# elif defined(__ARM_ARCH_7__) +# include "armv7/ffversion.h" +# else +# error Unsupport ARM architecture +# endif + +#elif defined(__i386__) +# include "i386/ffversion.h" +#else +# error Unsupport architecture +#endif diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/fifo.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/fifo.h new file mode 100644 index 0000000..dc7bc6f --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/fifo.h @@ -0,0 +1,179 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * a very simple circular buffer FIFO implementation + */ + +#ifndef AVUTIL_FIFO_H +#define AVUTIL_FIFO_H + +#include +#include "avutil.h" +#include "attributes.h" + +typedef struct AVFifoBuffer { + uint8_t *buffer; + uint8_t *rptr, *wptr, *end; + uint32_t rndx, wndx; +} AVFifoBuffer; + +/** + * Initialize an AVFifoBuffer. + * @param size of FIFO + * @return AVFifoBuffer or NULL in case of memory allocation failure + */ +AVFifoBuffer *av_fifo_alloc(unsigned int size); + +/** + * Initialize an AVFifoBuffer. + * @param nmemb number of elements + * @param size size of the single element + * @return AVFifoBuffer or NULL in case of memory allocation failure + */ +AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size); + +/** + * Free an AVFifoBuffer. + * @param f AVFifoBuffer to free + */ +void av_fifo_free(AVFifoBuffer *f); + +/** + * Free an AVFifoBuffer and reset pointer to NULL. + * @param f AVFifoBuffer to free + */ +void av_fifo_freep(AVFifoBuffer **f); + +/** + * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied. + * @param f AVFifoBuffer to reset + */ +void av_fifo_reset(AVFifoBuffer *f); + +/** + * Return the amount of data in bytes in the AVFifoBuffer, that is the + * amount of data you can read from it. + * @param f AVFifoBuffer to read from + * @return size + */ +int av_fifo_size(const AVFifoBuffer *f); + +/** + * Return the amount of space in bytes in the AVFifoBuffer, that is the + * amount of data you can write into it. + * @param f AVFifoBuffer to write into + * @return size + */ +int av_fifo_space(const AVFifoBuffer *f); + +/** + * Feed data at specific position from an AVFifoBuffer to a user-supplied callback. + * Similar as av_fifo_gereric_read but without discarding data. + * @param f AVFifoBuffer to read from + * @param offset offset from current read position + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from an AVFifoBuffer to a user-supplied callback. + * Similar as av_fifo_gereric_read but without discarding data. + * @param f AVFifoBuffer to read from + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from an AVFifoBuffer to a user-supplied callback. + * @param f AVFifoBuffer to read from + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from a user-supplied callback to an AVFifoBuffer. + * @param f AVFifoBuffer to write to + * @param src data source; non-const since it may be used as a + * modifiable context by the function defined in func + * @param size number of bytes to write + * @param func generic write function; the first parameter is src, + * the second is dest_buf, the third is dest_buf_size. + * func must return the number of bytes written to dest_buf, or <= 0 to + * indicate no more data available to write. + * If func is NULL, src is interpreted as a simple byte array for source data. + * @return the number of bytes written to the FIFO + */ +int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int)); + +/** + * Resize an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * + * @param f AVFifoBuffer to resize + * @param size new AVFifoBuffer size in bytes + * @return <0 for failure, >=0 otherwise + */ +int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size); + +/** + * Enlarge an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * The new fifo size may be larger than the requested size. + * + * @param f AVFifoBuffer to resize + * @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size() + * @return <0 for failure, >=0 otherwise + */ +int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space); + +/** + * Read and discard the specified amount of data from an AVFifoBuffer. + * @param f AVFifoBuffer to read from + * @param size amount of data to read in bytes + */ +void av_fifo_drain(AVFifoBuffer *f, int size); + +/** + * Return a pointer to the data stored in a FIFO buffer at a certain offset. + * The FIFO buffer is not modified. + * + * @param f AVFifoBuffer to peek at, f must be non-NULL + * @param offs an offset in bytes, its absolute value must be less + * than the used buffer size or the returned pointer will + * point outside to the buffer data. + * The used buffer size can be checked with av_fifo_size(). + */ +static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs) +{ + uint8_t *ptr = f->rptr + offs; + if (ptr >= f->end) + ptr = f->buffer + (ptr - f->end); + else if (ptr < f->buffer) + ptr = f->end - (f->buffer - ptr); + return ptr; +} + +#endif /* AVUTIL_FIFO_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/file.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/file.h new file mode 100644 index 0000000..8666c7b --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/file.h @@ -0,0 +1,69 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_FILE_H +#define AVUTIL_FILE_H + +#include + +#include "avutil.h" + +/** + * @file + * Misc file utilities. + */ + +/** + * Read the file with name filename, and put its content in a newly + * allocated buffer or map it with mmap() when available. + * In case of success set *bufptr to the read or mmapped buffer, and + * *size to the size in bytes of the buffer in *bufptr. + * The returned buffer must be released with av_file_unmap(). + * + * @param log_offset loglevel offset used for logging + * @param log_ctx context used for logging + * @return a non negative number in case of success, a negative value + * corresponding to an AVERROR error code in case of failure + */ +av_warn_unused_result +int av_file_map(const char *filename, uint8_t **bufptr, size_t *size, + int log_offset, void *log_ctx); + +/** + * Unmap or free the buffer bufptr created by av_file_map(). + * + * @param size size in bytes of bufptr, must be the same as returned + * by av_file_map() + */ +void av_file_unmap(uint8_t *bufptr, size_t size); + +/** + * Wrapper to work around the lack of mkstemp() on mingw. + * Also, tries to create file in /tmp first, if possible. + * *prefix can be a character constant; *filename will be allocated internally. + * @return file descriptor of opened file (or negative value corresponding to an + * AVERROR code on error) + * and opened file name in **filename. + * @note On very old libcs it is necessary to set a secure umask before + * calling this, av_tempfile() can't call umask itself as it is used in + * libraries and could interfere with the calling application. + * @deprecated as fd numbers cannot be passed saftely between libs on some platforms + */ +int av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx); + +#endif /* AVUTIL_FILE_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/frame.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/frame.h new file mode 100644 index 0000000..7cb78a1 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/frame.h @@ -0,0 +1,746 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_frame + * reference-counted frame API + */ + +#ifndef AVUTIL_FRAME_H +#define AVUTIL_FRAME_H + +#include + +#include "avutil.h" +#include "buffer.h" +#include "dict.h" +#include "rational.h" +#include "samplefmt.h" +#include "pixfmt.h" +#include "version.h" + + +/** + * @defgroup lavu_frame AVFrame + * @ingroup lavu_data + * + * @{ + * AVFrame is an abstraction for reference-counted raw multimedia data. + */ + +enum AVFrameSideDataType { + /** + * The data is the AVPanScan struct defined in libavcodec. + */ + AV_FRAME_DATA_PANSCAN, + /** + * ATSC A53 Part 4 Closed Captions. + * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data. + * The number of bytes of CC data is AVFrameSideData.size. + */ + AV_FRAME_DATA_A53_CC, + /** + * Stereoscopic 3d metadata. + * The data is the AVStereo3D struct defined in libavutil/stereo3d.h. + */ + AV_FRAME_DATA_STEREO3D, + /** + * The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h. + */ + AV_FRAME_DATA_MATRIXENCODING, + /** + * Metadata relevant to a downmix procedure. + * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h. + */ + AV_FRAME_DATA_DOWNMIX_INFO, + /** + * ReplayGain information in the form of the AVReplayGain struct. + */ + AV_FRAME_DATA_REPLAYGAIN, + /** + * This side data contains a 3x3 transformation matrix describing an affine + * transformation that needs to be applied to the frame for correct + * presentation. + * + * See libavutil/display.h for a detailed description of the data. + */ + AV_FRAME_DATA_DISPLAYMATRIX, + /** + * Active Format Description data consisting of a single byte as specified + * in ETSI TS 101 154 using AVActiveFormatDescription enum. + */ + AV_FRAME_DATA_AFD, + /** + * Motion vectors exported by some codecs (on demand through the export_mvs + * flag set in the libavcodec AVCodecContext flags2 option). + * The data is the AVMotionVector struct defined in + * libavutil/motion_vector.h. + */ + AV_FRAME_DATA_MOTION_VECTORS, + /** + * Recommmends skipping the specified number of samples. This is exported + * only if the "skip_manual" AVOption is set in libavcodec. + * This has the same format as AV_PKT_DATA_SKIP_SAMPLES. + * @code + * u32le number of samples to skip from start of this packet + * u32le number of samples to skip from end of this packet + * u8 reason for start skip + * u8 reason for end skip (0=padding silence, 1=convergence) + * @endcode + */ + AV_FRAME_DATA_SKIP_SAMPLES, + /** + * This side data must be associated with an audio frame and corresponds to + * enum AVAudioServiceType defined in avcodec.h. + */ + AV_FRAME_DATA_AUDIO_SERVICE_TYPE, + /** + * Mastering display metadata associated with a video frame. The payload is + * an AVMasteringDisplayMetadata type and contains information about the + * mastering display color volume. + */ + AV_FRAME_DATA_MASTERING_DISPLAY_METADATA, + /** + * The GOP timecode in 25 bit timecode format. Data format is 64-bit integer. + * This is set on the first frame of a GOP that has a temporal reference of 0. + */ + AV_FRAME_DATA_GOP_TIMECODE, + + /** + * The data represents the AVSphericalMapping structure defined in + * libavutil/spherical.h. + */ + AV_FRAME_DATA_SPHERICAL, +}; + +enum AVActiveFormatDescription { + AV_AFD_SAME = 8, + AV_AFD_4_3 = 9, + AV_AFD_16_9 = 10, + AV_AFD_14_9 = 11, + AV_AFD_4_3_SP_14_9 = 13, + AV_AFD_16_9_SP_14_9 = 14, + AV_AFD_SP_4_3 = 15, +}; + + +/** + * Structure to hold side data for an AVFrame. + * + * sizeof(AVFrameSideData) is not a part of the public ABI, so new fields may be added + * to the end with a minor bump. + */ +typedef struct AVFrameSideData { + enum AVFrameSideDataType type; + uint8_t *data; + int size; + AVDictionary *metadata; + AVBufferRef *buf; +} AVFrameSideData; + +/** + * This structure describes decoded (raw) audio or video data. + * + * AVFrame must be allocated using av_frame_alloc(). Note that this only + * allocates the AVFrame itself, the buffers for the data must be managed + * through other means (see below). + * AVFrame must be freed with av_frame_free(). + * + * AVFrame is typically allocated once and then reused multiple times to hold + * different data (e.g. a single AVFrame to hold frames received from a + * decoder). In such a case, av_frame_unref() will free any references held by + * the frame and reset it to its original clean state before it + * is reused again. + * + * The data described by an AVFrame is usually reference counted through the + * AVBuffer API. The underlying buffer references are stored in AVFrame.buf / + * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at + * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case, + * every single data plane must be contained in one of the buffers in + * AVFrame.buf or AVFrame.extended_buf. + * There may be a single buffer for all the data, or one separate buffer for + * each plane, or anything in between. + * + * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added + * to the end with a minor bump. + * + * Fields can be accessed through AVOptions, the name string used, matches the + * C structure field name for fields accessible through AVOptions. The AVClass + * for AVFrame can be obtained from avcodec_get_frame_class() + */ +typedef struct AVFrame { +#define AV_NUM_DATA_POINTERS 8 + /** + * pointer to the picture/channel planes. + * This might be different from the first allocated byte + * + * Some decoders access areas outside 0,0 - width,height, please + * see avcodec_align_dimensions2(). Some filters and swscale can read + * up to 16 bytes beyond the planes, if these filters are to be used, + * then 16 extra bytes must be allocated. + * + * NOTE: Except for hwaccel formats, pointers not needed by the format + * MUST be set to NULL. + */ + uint8_t *data[AV_NUM_DATA_POINTERS]; + + /** + * For video, size in bytes of each picture line. + * For audio, size in bytes of each plane. + * + * For audio, only linesize[0] may be set. For planar audio, each channel + * plane must be the same size. + * + * For video the linesizes should be multiples of the CPUs alignment + * preference, this is 16 or 32 for modern desktop CPUs. + * Some code requires such alignment other code can be slower without + * correct alignment, for yet other it makes no difference. + * + * @note The linesize may be larger than the size of usable data -- there + * may be extra padding present for performance reasons. + */ + int linesize[AV_NUM_DATA_POINTERS]; + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data should always be set in a valid frame, + * but for planar audio with more channels that can fit in data, + * extended_data must be used in order to access all channels. + */ + uint8_t **extended_data; + + /** + * width and height of the video frame + */ + int width, height; + + /** + * number of audio samples (per channel) described by this frame + */ + int nb_samples; + + /** + * format of the frame, -1 if unknown or unset + * Values correspond to enum AVPixelFormat for video frames, + * enum AVSampleFormat for audio) + */ + int format; + + /** + * 1 -> keyframe, 0-> not + */ + int key_frame; + + /** + * Picture type of the frame. + */ + enum AVPictureType pict_type; + + /** + * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified. + */ + AVRational sample_aspect_ratio; + + /** + * Presentation timestamp in time_base units (time when frame should be shown to user). + */ + int64_t pts; + +#if FF_API_PKT_PTS + /** + * PTS copied from the AVPacket that was decoded to produce this frame. + * @deprecated use the pts field instead + */ + attribute_deprecated + int64_t pkt_pts; +#endif + + /** + * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used) + * This is also the Presentation time of this AVFrame calculated from + * only AVPacket.dts values without pts values. + */ + int64_t pkt_dts; + + /** + * picture number in bitstream order + */ + int coded_picture_number; + /** + * picture number in display order + */ + int display_picture_number; + + /** + * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) + */ + int quality; + + /** + * for some private data of the user + */ + void *opaque; + +#if FF_API_ERROR_FRAME + /** + * @deprecated unused + */ + attribute_deprecated + uint64_t error[AV_NUM_DATA_POINTERS]; +#endif + + /** + * When decoding, this signals how much the picture must be delayed. + * extra_delay = repeat_pict / (2*fps) + */ + int repeat_pict; + + /** + * The content of the picture is interlaced. + */ + int interlaced_frame; + + /** + * If the content is interlaced, is top field displayed first. + */ + int top_field_first; + + /** + * Tell user application that palette has changed from previous frame. + */ + int palette_has_changed; + + /** + * reordered opaque 64 bits (generally an integer or a double precision float + * PTS but can be anything). + * The user sets AVCodecContext.reordered_opaque to represent the input at + * that time, + * the decoder reorders values as needed and sets AVFrame.reordered_opaque + * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque + * @deprecated in favor of pkt_pts + */ + int64_t reordered_opaque; + + /** + * Sample rate of the audio data. + */ + int sample_rate; + + /** + * Channel layout of the audio data. + */ + uint64_t channel_layout; + + /** + * AVBuffer references backing the data for this frame. If all elements of + * this array are NULL, then this frame is not reference counted. This array + * must be filled contiguously -- if buf[i] is non-NULL then buf[j] must + * also be non-NULL for all j < i. + * + * There may be at most one AVBuffer per data plane, so for video this array + * always contains all the references. For planar audio with more than + * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in + * this array. Then the extra AVBufferRef pointers are stored in the + * extended_buf array. + */ + AVBufferRef *buf[AV_NUM_DATA_POINTERS]; + + /** + * For planar audio which requires more than AV_NUM_DATA_POINTERS + * AVBufferRef pointers, this array will hold all the references which + * cannot fit into AVFrame.buf. + * + * Note that this is different from AVFrame.extended_data, which always + * contains all the pointers. This array only contains the extra pointers, + * which cannot fit into AVFrame.buf. + * + * This array is always allocated using av_malloc() by whoever constructs + * the frame. It is freed in av_frame_unref(). + */ + AVBufferRef **extended_buf; + /** + * Number of elements in extended_buf. + */ + int nb_extended_buf; + + AVFrameSideData **side_data; + int nb_side_data; + +/** + * @defgroup lavu_frame_flags AV_FRAME_FLAGS + * @ingroup lavu_frame + * Flags describing additional frame properties. + * + * @{ + */ + +/** + * The frame data may be corrupted, e.g. due to decoding errors. + */ +#define AV_FRAME_FLAG_CORRUPT (1 << 0) +/** + * A flag to mark the frames which need to be decoded, but shouldn't be output. + */ +#define AV_FRAME_FLAG_DISCARD (1 << 2) +/** + * @} + */ + + /** + * Frame flags, a combination of @ref lavu_frame_flags + */ + int flags; + + /** + * MPEG vs JPEG YUV range. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + enum AVColorPrimaries color_primaries; + + enum AVColorTransferCharacteristic color_trc; + + /** + * YUV colorspace type. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + enum AVChromaLocation chroma_location; + + /** + * frame timestamp estimated using various heuristics, in stream time base + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int64_t best_effort_timestamp; + + /** + * reordered pos from the last AVPacket that has been input into the decoder + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_pos; + + /** + * duration of the corresponding packet, expressed in + * AVStream->time_base units, 0 if unknown. + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_duration; + + /** + * metadata. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVDictionary *metadata; + + /** + * decode error flags of the frame, set to a combination of + * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there + * were errors during the decoding. + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int decode_error_flags; +#define FF_DECODE_ERROR_INVALID_BITSTREAM 1 +#define FF_DECODE_ERROR_MISSING_REFERENCE 2 + + /** + * number of audio channels, only used for audio. + * - encoding: unused + * - decoding: Read by user. + */ + int channels; + + /** + * size of the corresponding packet containing the compressed + * frame. + * It is set to a negative value if unknown. + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int pkt_size; + +#if FF_API_FRAME_QP + /** + * QP table + */ + attribute_deprecated + int8_t *qscale_table; + /** + * QP store stride + */ + attribute_deprecated + int qstride; + + attribute_deprecated + int qscale_type; + + AVBufferRef *qp_table_buf; +#endif + /** + * For hwaccel-format frames, this should be a reference to the + * AVHWFramesContext describing the frame. + */ + AVBufferRef *hw_frames_ctx; + + /** + * AVBufferRef for free use by the API user. FFmpeg will never check the + * contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when + * the frame is unreferenced. av_frame_copy_props() calls create a new + * reference with av_buffer_ref() for the target frame's opaque_ref field. + * + * This is unrelated to the opaque field, although it serves a similar + * purpose. + */ + AVBufferRef *opaque_ref; +} AVFrame; + +/** + * Accessors for some AVFrame fields. These used to be provided for ABI + * compatibility, and do not need to be used anymore. + */ +int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame); +void av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val); +int64_t av_frame_get_pkt_duration (const AVFrame *frame); +void av_frame_set_pkt_duration (AVFrame *frame, int64_t val); +int64_t av_frame_get_pkt_pos (const AVFrame *frame); +void av_frame_set_pkt_pos (AVFrame *frame, int64_t val); +int64_t av_frame_get_channel_layout (const AVFrame *frame); +void av_frame_set_channel_layout (AVFrame *frame, int64_t val); +int av_frame_get_channels (const AVFrame *frame); +void av_frame_set_channels (AVFrame *frame, int val); +int av_frame_get_sample_rate (const AVFrame *frame); +void av_frame_set_sample_rate (AVFrame *frame, int val); +AVDictionary *av_frame_get_metadata (const AVFrame *frame); +void av_frame_set_metadata (AVFrame *frame, AVDictionary *val); +int av_frame_get_decode_error_flags (const AVFrame *frame); +void av_frame_set_decode_error_flags (AVFrame *frame, int val); +int av_frame_get_pkt_size(const AVFrame *frame); +void av_frame_set_pkt_size(AVFrame *frame, int val); +AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame); +#if FF_API_FRAME_QP +int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type); +int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type); +#endif +enum AVColorSpace av_frame_get_colorspace(const AVFrame *frame); +void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val); +enum AVColorRange av_frame_get_color_range(const AVFrame *frame); +void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val); + +/** + * Get the name of a colorspace. + * @return a static string identifying the colorspace; can be NULL. + */ +const char *av_get_colorspace_name(enum AVColorSpace val); + +/** + * Allocate an AVFrame and set its fields to default values. The resulting + * struct must be freed using av_frame_free(). + * + * @return An AVFrame filled with default values or NULL on failure. + * + * @note this only allocates the AVFrame itself, not the data buffers. Those + * must be allocated through other means, e.g. with av_frame_get_buffer() or + * manually. + */ +AVFrame *av_frame_alloc(void); + +/** + * Free the frame and any dynamically allocated objects in it, + * e.g. extended_data. If the frame is reference counted, it will be + * unreferenced first. + * + * @param frame frame to be freed. The pointer will be set to NULL. + */ +void av_frame_free(AVFrame **frame); + +/** + * Set up a new reference to the data described by the source frame. + * + * Copy frame properties from src to dst and create a new reference for each + * AVBufferRef from src. + * + * If src is not reference counted, new buffers are allocated and the data is + * copied. + * + * @warning: dst MUST have been either unreferenced with av_frame_unref(dst), + * or newly allocated with av_frame_alloc() before calling this + * function, or undefined behavior will occur. + * + * @return 0 on success, a negative AVERROR on error + */ +int av_frame_ref(AVFrame *dst, const AVFrame *src); + +/** + * Create a new frame that references the same data as src. + * + * This is a shortcut for av_frame_alloc()+av_frame_ref(). + * + * @return newly created AVFrame on success, NULL on error. + */ +AVFrame *av_frame_clone(const AVFrame *src); + +/** + * Unreference all the buffers referenced by frame and reset the frame fields. + */ +void av_frame_unref(AVFrame *frame); + +/** + * Move everything contained in src to dst and reset src. + * + * @warning: dst is not unreferenced, but directly overwritten without reading + * or deallocating its contents. Call av_frame_unref(dst) manually + * before calling this function to ensure that no memory is leaked. + */ +void av_frame_move_ref(AVFrame *dst, AVFrame *src); + +/** + * Allocate new buffer(s) for audio or video data. + * + * The following fields must be set on frame before calling this function: + * - format (pixel format for video, sample format for audio) + * - width and height for video + * - nb_samples and channel_layout for audio + * + * This function will fill AVFrame.data and AVFrame.buf arrays and, if + * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf. + * For planar formats, one buffer will be allocated for each plane. + * + * @warning: if frame already has been allocated, calling this function will + * leak memory. In addition, undefined behavior can occur in certain + * cases. + * + * @param frame frame in which to store the new buffers. + * @param align required buffer size alignment + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_frame_get_buffer(AVFrame *frame, int align); + +/** + * Check if the frame data is writable. + * + * @return A positive value if the frame data is writable (which is true if and + * only if each of the underlying buffers has only one reference, namely the one + * stored in this frame). Return 0 otherwise. + * + * If 1 is returned the answer is valid until av_buffer_ref() is called on any + * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly). + * + * @see av_frame_make_writable(), av_buffer_is_writable() + */ +int av_frame_is_writable(AVFrame *frame); + +/** + * Ensure that the frame data is writable, avoiding data copy if possible. + * + * Do nothing if the frame is writable, allocate new buffers and copy the data + * if it is not. + * + * @return 0 on success, a negative AVERROR on error. + * + * @see av_frame_is_writable(), av_buffer_is_writable(), + * av_buffer_make_writable() + */ +int av_frame_make_writable(AVFrame *frame); + +/** + * Copy the frame data from src to dst. + * + * This function does not allocate anything, dst must be already initialized and + * allocated with the same parameters as src. + * + * This function only copies the frame data (i.e. the contents of the data / + * extended data arrays), not any other properties. + * + * @return >= 0 on success, a negative AVERROR on error. + */ +int av_frame_copy(AVFrame *dst, const AVFrame *src); + +/** + * Copy only "metadata" fields from src to dst. + * + * Metadata for the purpose of this function are those fields that do not affect + * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample + * aspect ratio (for video), but not width/height or channel layout. + * Side data is also copied. + */ +int av_frame_copy_props(AVFrame *dst, const AVFrame *src); + +/** + * Get the buffer reference a given data plane is stored in. + * + * @param plane index of the data plane of interest in frame->extended_data. + * + * @return the buffer reference that contains the plane or NULL if the input + * frame is not valid. + */ +AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane); + +/** + * Add a new side data to a frame. + * + * @param frame a frame to which the side data should be added + * @param type type of the added side data + * @param size size of the side data + * + * @return newly added side data on success, NULL on error + */ +AVFrameSideData *av_frame_new_side_data(AVFrame *frame, + enum AVFrameSideDataType type, + int size); + +/** + * @return a pointer to the side data of a given type on success, NULL if there + * is no side data with such type in this frame. + */ +AVFrameSideData *av_frame_get_side_data(const AVFrame *frame, + enum AVFrameSideDataType type); + +/** + * If side data of the supplied type exists in the frame, free it and remove it + * from the frame. + */ +void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type); + +/** + * @return a string identifying the side data type + */ +const char *av_frame_side_data_name(enum AVFrameSideDataType type); + +/** + * @} + */ + +#endif /* AVUTIL_FRAME_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hash.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hash.h new file mode 100644 index 0000000..a20b893 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hash.h @@ -0,0 +1,263 @@ +/* + * Copyright (C) 2013 Reimar Döffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_hash_generic + * Generic hashing API + */ + +#ifndef AVUTIL_HASH_H +#define AVUTIL_HASH_H + +#include + +/** + * @defgroup lavu_hash Hash Functions + * @ingroup lavu_crypto + * Hash functions useful in multimedia. + * + * Hash functions are widely used in multimedia, from error checking and + * concealment to internal regression testing. libavutil has efficient + * implementations of a variety of hash functions that may be useful for + * FFmpeg and other multimedia applications. + * + * @{ + * + * @defgroup lavu_hash_generic Generic Hashing API + * An abstraction layer for all hash functions supported by libavutil. + * + * If your application needs to support a wide range of different hash + * functions, then the Generic Hashing API is for you. It provides a generic, + * reusable API for @ref lavu_hash "all hash functions" implemented in libavutil. + * If you just need to use one particular hash function, use the @ref lavu_hash + * "individual hash" directly. + * + * @section Sample Code + * + * A basic template for using the Generic Hashing API follows: + * + * @code + * struct AVHashContext *ctx = NULL; + * const char *hash_name = NULL; + * uint8_t *output_buf = NULL; + * + * // Select from a string returned by av_hash_names() + * hash_name = ...; + * + * // Allocate a hash context + * ret = av_hash_alloc(&ctx, hash_name); + * if (ret < 0) + * return ret; + * + * // Initialize the hash context + * av_hash_init(ctx); + * + * // Update the hash context with data + * while (data_left) { + * av_hash_update(ctx, data, size); + * } + * + * // Now we have no more data, so it is time to finalize the hash and get the + * // output. But we need to first allocate an output buffer. Note that you can + * // use any memory allocation function, including malloc(), not just + * // av_malloc(). + * output_buf = av_malloc(av_hash_get_size(ctx)); + * if (!output_buf) + * return AVERROR(ENOMEM); + * + * // Finalize the hash context. + * // You can use any of the av_hash_final*() functions provided, for other + * // output formats. If you do so, be sure to adjust the memory allocation + * // above. See the function documentation below for the exact amount of extra + * // memory needed. + * av_hash_final(ctx, output_buffer); + * + * // Free the context + * av_hash_freep(&ctx); + * @endcode + * + * @section Hash Function-Specific Information + * If the CRC32 hash is selected, the #AV_CRC_32_IEEE polynomial will be + * used. + * + * If the Murmur3 hash is selected, the default seed will be used. See @ref + * lavu_murmur3_seedinfo "Murmur3" for more information. + * + * @{ + */ + +/** + * @example ffhash.c + * This example is a simple command line application that takes one or more + * arguments. It demonstrates a typical use of the hashing API with allocation, + * initialization, updating, and finalizing. + */ + +struct AVHashContext; + +/** + * Allocate a hash context for the algorithm specified by name. + * + * @return >= 0 for success, a negative error code for failure + * + * @note The context is not initialized after a call to this function; you must + * call av_hash_init() to do so. + */ +int av_hash_alloc(struct AVHashContext **ctx, const char *name); + +/** + * Get the names of available hash algorithms. + * + * This function can be used to enumerate the algorithms. + * + * @param[in] i Index of the hash algorithm, starting from 0 + * @return Pointer to a static string or `NULL` if `i` is out of range + */ +const char *av_hash_names(int i); + +/** + * Get the name of the algorithm corresponding to the given hash context. + */ +const char *av_hash_get_name(const struct AVHashContext *ctx); + +/** + * Maximum value that av_hash_get_size() will currently return. + * + * You can use this if you absolutely want or need to use static allocation for + * the output buffer and are fine with not supporting hashes newly added to + * libavutil without recompilation. + * + * @warning + * Adding new hashes with larger sizes, and increasing the macro while doing + * so, will not be considered an ABI change. To prevent your code from + * overflowing a buffer, either dynamically allocate the output buffer with + * av_hash_get_size(), or limit your use of the Hashing API to hashes that are + * already in FFmpeg during the time of compilation. + */ +#define AV_HASH_MAX_SIZE 64 + +/** + * Get the size of the resulting hash value in bytes. + * + * The maximum value this function will currently return is available as macro + * #AV_HASH_MAX_SIZE. + * + * @param[in] ctx Hash context + * @return Size of the hash value in bytes + */ +int av_hash_get_size(const struct AVHashContext *ctx); + +/** + * Initialize or reset a hash context. + * + * @param[in,out] ctx Hash context + */ +void av_hash_init(struct AVHashContext *ctx); + +/** + * Update a hash context with additional data. + * + * @param[in,out] ctx Hash context + * @param[in] src Data to be added to the hash context + * @param[in] len Size of the additional data + */ +void av_hash_update(struct AVHashContext *ctx, const uint8_t *src, int len); + +/** + * Finalize a hash context and compute the actual hash value. + * + * The minimum size of `dst` buffer is given by av_hash_get_size() or + * #AV_HASH_MAX_SIZE. The use of the latter macro is discouraged. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the final hash value will be stored + * + * @see av_hash_final_bin() provides an alternative API + */ +void av_hash_final(struct AVHashContext *ctx, uint8_t *dst); + +/** + * Finalize a hash context and store the actual hash value in a buffer. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * If `size` is smaller than the hash size (given by av_hash_get_size()), the + * hash is truncated; if size is larger, the buffer is padded with 0. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the final hash value will be stored + * @param[in] size Number of bytes to write to `dst` + */ +void av_hash_final_bin(struct AVHashContext *ctx, uint8_t *dst, int size); + +/** + * Finalize a hash context and store the hexadecimal representation of the + * actual hash value as a string. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * The string is always 0-terminated. + * + * If `size` is smaller than `2 * hash_size + 1`, where `hash_size` is the + * value returned by av_hash_get_size(), the string will be truncated. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the string will be stored + * @param[in] size Maximum number of bytes to write to `dst` + */ +void av_hash_final_hex(struct AVHashContext *ctx, uint8_t *dst, int size); + +/** + * Finalize a hash context and store the Base64 representation of the + * actual hash value as a string. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * The string is always 0-terminated. + * + * If `size` is smaller than AV_BASE64_SIZE(hash_size), where `hash_size` is + * the value returned by av_hash_get_size(), the string will be truncated. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the final hash value will be stored + * @param[in] size Maximum number of bytes to write to `dst` + */ +void av_hash_final_b64(struct AVHashContext *ctx, uint8_t *dst, int size); + +/** + * Free hash context and set hash context pointer to `NULL`. + * + * @param[in,out] ctx Pointer to hash context + */ +void av_hash_freep(struct AVHashContext **ctx); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_HASH_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hmac.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hmac.h new file mode 100644 index 0000000..576a0a4 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hmac.h @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2012 Martin Storsjo + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HMAC_H +#define AVUTIL_HMAC_H + +#include + +#include "version.h" +/** + * @defgroup lavu_hmac HMAC + * @ingroup lavu_crypto + * @{ + */ + +enum AVHMACType { + AV_HMAC_MD5, + AV_HMAC_SHA1, + AV_HMAC_SHA224, + AV_HMAC_SHA256, + AV_HMAC_SHA384 = 12, + AV_HMAC_SHA512, +}; + +typedef struct AVHMAC AVHMAC; + +/** + * Allocate an AVHMAC context. + * @param type The hash function used for the HMAC. + */ +AVHMAC *av_hmac_alloc(enum AVHMACType type); + +/** + * Free an AVHMAC context. + * @param ctx The context to free, may be NULL + */ +void av_hmac_free(AVHMAC *ctx); + +/** + * Initialize an AVHMAC context with an authentication key. + * @param ctx The HMAC context + * @param key The authentication key + * @param keylen The length of the key, in bytes + */ +void av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen); + +/** + * Hash data with the HMAC. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + */ +void av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len); + +/** + * Finish hashing and output the HMAC digest. + * @param ctx The HMAC context + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen); + +/** + * Hash an array of data with a key. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + * @param key The authentication key + * @param keylen The length of the key, in bytes + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len, + const uint8_t *key, unsigned int keylen, + uint8_t *out, unsigned int outlen); + +/** + * @} + */ + +#endif /* AVUTIL_HMAC_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext.h new file mode 100644 index 0000000..e35fb25 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext.h @@ -0,0 +1,523 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_H +#define AVUTIL_HWCONTEXT_H + +#include "buffer.h" +#include "frame.h" +#include "log.h" +#include "pixfmt.h" + +enum AVHWDeviceType { + AV_HWDEVICE_TYPE_VDPAU, + AV_HWDEVICE_TYPE_CUDA, + AV_HWDEVICE_TYPE_VAAPI, + AV_HWDEVICE_TYPE_DXVA2, + AV_HWDEVICE_TYPE_QSV, +}; + +typedef struct AVHWDeviceInternal AVHWDeviceInternal; + +/** + * This struct aggregates all the (hardware/vendor-specific) "high-level" state, + * i.e. state that is not tied to a concrete processing configuration. + * E.g., in an API that supports hardware-accelerated encoding and decoding, + * this struct will (if possible) wrap the state that is common to both encoding + * and decoding and from which specific instances of encoders or decoders can be + * derived. + * + * This struct is reference-counted with the AVBuffer mechanism. The + * av_hwdevice_ctx_alloc() constructor yields a reference, whose data field + * points to the actual AVHWDeviceContext. Further objects derived from + * AVHWDeviceContext (such as AVHWFramesContext, describing a frame pool with + * specific properties) will hold an internal reference to it. After all the + * references are released, the AVHWDeviceContext itself will be freed, + * optionally invoking a user-specified callback for uninitializing the hardware + * state. + */ +typedef struct AVHWDeviceContext { + /** + * A class for logging. Set by av_hwdevice_ctx_alloc(). + */ + const AVClass *av_class; + + /** + * Private data used internally by libavutil. Must not be accessed in any + * way by the caller. + */ + AVHWDeviceInternal *internal; + + /** + * This field identifies the underlying API used for hardware access. + * + * This field is set when this struct is allocated and never changed + * afterwards. + */ + enum AVHWDeviceType type; + + /** + * The format-specific data, allocated and freed by libavutil along with + * this context. + * + * Should be cast by the user to the format-specific context defined in the + * corresponding header (hwcontext_*.h) and filled as described in the + * documentation before calling av_hwdevice_ctx_init(). + * + * After calling av_hwdevice_ctx_init() this struct should not be modified + * by the caller. + */ + void *hwctx; + + /** + * This field may be set by the caller before calling av_hwdevice_ctx_init(). + * + * If non-NULL, this callback will be called when the last reference to + * this context is unreferenced, immediately before it is freed. + * + * @note when other objects (e.g an AVHWFramesContext) are derived from this + * struct, this callback will be invoked after all such child objects + * are fully uninitialized and their respective destructors invoked. + */ + void (*free)(struct AVHWDeviceContext *ctx); + + /** + * Arbitrary user data, to be used e.g. by the free() callback. + */ + void *user_opaque; +} AVHWDeviceContext; + +typedef struct AVHWFramesInternal AVHWFramesInternal; + +/** + * This struct describes a set or pool of "hardware" frames (i.e. those with + * data not located in normal system memory). All the frames in the pool are + * assumed to be allocated in the same way and interchangeable. + * + * This struct is reference-counted with the AVBuffer mechanism and tied to a + * given AVHWDeviceContext instance. The av_hwframe_ctx_alloc() constructor + * yields a reference, whose data field points to the actual AVHWFramesContext + * struct. + */ +typedef struct AVHWFramesContext { + /** + * A class for logging. + */ + const AVClass *av_class; + + /** + * Private data used internally by libavutil. Must not be accessed in any + * way by the caller. + */ + AVHWFramesInternal *internal; + + /** + * A reference to the parent AVHWDeviceContext. This reference is owned and + * managed by the enclosing AVHWFramesContext, but the caller may derive + * additional references from it. + */ + AVBufferRef *device_ref; + + /** + * The parent AVHWDeviceContext. This is simply a pointer to + * device_ref->data provided for convenience. + * + * Set by libavutil in av_hwframe_ctx_init(). + */ + AVHWDeviceContext *device_ctx; + + /** + * The format-specific data, allocated and freed automatically along with + * this context. + * + * Should be cast by the user to the format-specific context defined in the + * corresponding header (hwframe_*.h) and filled as described in the + * documentation before calling av_hwframe_ctx_init(). + * + * After any frames using this context are created, the contents of this + * struct should not be modified by the caller. + */ + void *hwctx; + + /** + * This field may be set by the caller before calling av_hwframe_ctx_init(). + * + * If non-NULL, this callback will be called when the last reference to + * this context is unreferenced, immediately before it is freed. + */ + void (*free)(struct AVHWFramesContext *ctx); + + /** + * Arbitrary user data, to be used e.g. by the free() callback. + */ + void *user_opaque; + + /** + * A pool from which the frames are allocated by av_hwframe_get_buffer(). + * This field may be set by the caller before calling av_hwframe_ctx_init(). + * The buffers returned by calling av_buffer_pool_get() on this pool must + * have the properties described in the documentation in the corresponding hw + * type's header (hwcontext_*.h). The pool will be freed strictly before + * this struct's free() callback is invoked. + * + * This field may be NULL, then libavutil will attempt to allocate a pool + * internally. Note that certain device types enforce pools allocated at + * fixed size (frame count), which cannot be extended dynamically. In such a + * case, initial_pool_size must be set appropriately. + */ + AVBufferPool *pool; + + /** + * Initial size of the frame pool. If a device type does not support + * dynamically resizing the pool, then this is also the maximum pool size. + * + * May be set by the caller before calling av_hwframe_ctx_init(). Must be + * set if pool is NULL and the device type does not support dynamic pools. + */ + int initial_pool_size; + + /** + * The pixel format identifying the underlying HW surface type. + * + * Must be a hwaccel format, i.e. the corresponding descriptor must have the + * AV_PIX_FMT_FLAG_HWACCEL flag set. + * + * Must be set by the user before calling av_hwframe_ctx_init(). + */ + enum AVPixelFormat format; + + /** + * The pixel format identifying the actual data layout of the hardware + * frames. + * + * Must be set by the caller before calling av_hwframe_ctx_init(). + * + * @note when the underlying API does not provide the exact data layout, but + * only the colorspace/bit depth, this field should be set to the fully + * planar version of that format (e.g. for 8-bit 420 YUV it should be + * AV_PIX_FMT_YUV420P, not AV_PIX_FMT_NV12 or anything else). + */ + enum AVPixelFormat sw_format; + + /** + * The allocated dimensions of the frames in this pool. + * + * Must be set by the user before calling av_hwframe_ctx_init(). + */ + int width, height; +} AVHWFramesContext; + +/** + * Allocate an AVHWDeviceContext for a given hardware type. + * + * @param type the type of the hardware device to allocate. + * @return a reference to the newly created AVHWDeviceContext on success or NULL + * on failure. + */ +AVBufferRef *av_hwdevice_ctx_alloc(enum AVHWDeviceType type); + +/** + * Finalize the device context before use. This function must be called after + * the context is filled with all the required information and before it is + * used in any way. + * + * @param ref a reference to the AVHWDeviceContext + * @return 0 on success, a negative AVERROR code on failure + */ +int av_hwdevice_ctx_init(AVBufferRef *ref); + +/** + * Open a device of the specified type and create an AVHWDeviceContext for it. + * + * This is a convenience function intended to cover the simple cases. Callers + * who need to fine-tune device creation/management should open the device + * manually and then wrap it in an AVHWDeviceContext using + * av_hwdevice_ctx_alloc()/av_hwdevice_ctx_init(). + * + * The returned context is already initialized and ready for use, the caller + * should not call av_hwdevice_ctx_init() on it. The user_opaque/free fields of + * the created AVHWDeviceContext are set by this function and should not be + * touched by the caller. + * + * @param device_ctx On success, a reference to the newly-created device context + * will be written here. The reference is owned by the caller + * and must be released with av_buffer_unref() when no longer + * needed. On failure, NULL will be written to this pointer. + * @param type The type of the device to create. + * @param device A type-specific string identifying the device to open. + * @param opts A dictionary of additional (type-specific) options to use in + * opening the device. The dictionary remains owned by the caller. + * @param flags currently unused + * + * @return 0 on success, a negative AVERROR code on failure. + */ +int av_hwdevice_ctx_create(AVBufferRef **device_ctx, enum AVHWDeviceType type, + const char *device, AVDictionary *opts, int flags); + +/** + * Allocate an AVHWFramesContext tied to a given device context. + * + * @param device_ctx a reference to a AVHWDeviceContext. This function will make + * a new reference for internal use, the one passed to the + * function remains owned by the caller. + * @return a reference to the newly created AVHWFramesContext on success or NULL + * on failure. + */ +AVBufferRef *av_hwframe_ctx_alloc(AVBufferRef *device_ctx); + +/** + * Finalize the context before use. This function must be called after the + * context is filled with all the required information and before it is attached + * to any frames. + * + * @param ref a reference to the AVHWFramesContext + * @return 0 on success, a negative AVERROR code on failure + */ +int av_hwframe_ctx_init(AVBufferRef *ref); + +/** + * Allocate a new frame attached to the given AVHWFramesContext. + * + * @param hwframe_ctx a reference to an AVHWFramesContext + * @param frame an empty (freshly allocated or unreffed) frame to be filled with + * newly allocated buffers. + * @param flags currently unused, should be set to zero + * @return 0 on success, a negative AVERROR code on failure + */ +int av_hwframe_get_buffer(AVBufferRef *hwframe_ctx, AVFrame *frame, int flags); + +/** + * Copy data to or from a hw surface. At least one of dst/src must have an + * AVHWFramesContext attached. + * + * If src has an AVHWFramesContext attached, then the format of dst (if set) + * must use one of the formats returned by av_hwframe_transfer_get_formats(src, + * AV_HWFRAME_TRANSFER_DIRECTION_FROM). + * If dst has an AVHWFramesContext attached, then the format of src must use one + * of the formats returned by av_hwframe_transfer_get_formats(dst, + * AV_HWFRAME_TRANSFER_DIRECTION_TO) + * + * dst may be "clean" (i.e. with data/buf pointers unset), in which case the + * data buffers will be allocated by this function using av_frame_get_buffer(). + * If dst->format is set, then this format will be used, otherwise (when + * dst->format is AV_PIX_FMT_NONE) the first acceptable format will be chosen. + * + * The two frames must have matching allocated dimensions (i.e. equal to + * AVHWFramesContext.width/height), since not all device types support + * transferring a sub-rectangle of the whole surface. The display dimensions + * (i.e. AVFrame.width/height) may be smaller than the allocated dimensions, but + * also have to be equal for both frames. When the display dimensions are + * smaller than the allocated dimensions, the content of the padding in the + * destination frame is unspecified. + * + * @param dst the destination frame. dst is not touched on failure. + * @param src the source frame. + * @param flags currently unused, should be set to zero + * @return 0 on success, a negative AVERROR error code on failure. + */ +int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags); + +enum AVHWFrameTransferDirection { + /** + * Transfer the data from the queried hw frame. + */ + AV_HWFRAME_TRANSFER_DIRECTION_FROM, + + /** + * Transfer the data to the queried hw frame. + */ + AV_HWFRAME_TRANSFER_DIRECTION_TO, +}; + +/** + * Get a list of possible source or target formats usable in + * av_hwframe_transfer_data(). + * + * @param hwframe_ctx the frame context to obtain the information for + * @param dir the direction of the transfer + * @param formats the pointer to the output format list will be written here. + * The list is terminated with AV_PIX_FMT_NONE and must be freed + * by the caller when no longer needed using av_free(). + * If this function returns successfully, the format list will + * have at least one item (not counting the terminator). + * On failure, the contents of this pointer are unspecified. + * @param flags currently unused, should be set to zero + * @return 0 on success, a negative AVERROR code on failure. + */ +int av_hwframe_transfer_get_formats(AVBufferRef *hwframe_ctx, + enum AVHWFrameTransferDirection dir, + enum AVPixelFormat **formats, int flags); + + +/** + * This struct describes the constraints on hardware frames attached to + * a given device with a hardware-specific configuration. This is returned + * by av_hwdevice_get_hwframe_constraints() and must be freed by + * av_hwframe_constraints_free() after use. + */ +typedef struct AVHWFramesConstraints { + /** + * A list of possible values for format in the hw_frames_ctx, + * terminated by AV_PIX_FMT_NONE. This member will always be filled. + */ + enum AVPixelFormat *valid_hw_formats; + + /** + * A list of possible values for sw_format in the hw_frames_ctx, + * terminated by AV_PIX_FMT_NONE. Can be NULL if this information is + * not known. + */ + enum AVPixelFormat *valid_sw_formats; + + /** + * The minimum size of frames in this hw_frames_ctx. + * (Zero if not known.) + */ + int min_width; + int min_height; + + /** + * The maximum size of frames in this hw_frames_ctx. + * (INT_MAX if not known / no limit.) + */ + int max_width; + int max_height; +} AVHWFramesConstraints; + +/** + * Allocate a HW-specific configuration structure for a given HW device. + * After use, the user must free all members as required by the specific + * hardware structure being used, then free the structure itself with + * av_free(). + * + * @param device_ctx a reference to the associated AVHWDeviceContext. + * @return The newly created HW-specific configuration structure on + * success or NULL on failure. + */ +void *av_hwdevice_hwconfig_alloc(AVBufferRef *device_ctx); + +/** + * Get the constraints on HW frames given a device and the HW-specific + * configuration to be used with that device. If no HW-specific + * configuration is provided, returns the maximum possible capabilities + * of the device. + * + * @param device_ctx a reference to the associated AVHWDeviceContext. + * @param hwconfig a filled HW-specific configuration structure, or NULL + * to return the maximum possible capabilities of the device. + * @return AVHWFramesConstraints structure describing the constraints + * on the device, or NULL if not available. + */ +AVHWFramesConstraints *av_hwdevice_get_hwframe_constraints(AVBufferRef *ref, + const void *hwconfig); + +/** + * Free an AVHWFrameConstraints structure. + * + * @param constraints The (filled or unfilled) AVHWFrameConstraints structure. + */ +void av_hwframe_constraints_free(AVHWFramesConstraints **constraints); + + +/** + * Flags to apply to frame mappings. + */ +enum { + /** + * The mapping must be readable. + */ + AV_HWFRAME_MAP_READ = 1 << 0, + /** + * The mapping must be writeable. + */ + AV_HWFRAME_MAP_WRITE = 1 << 1, + /** + * The mapped frame will be overwritten completely in subsequent + * operations, so the current frame data need not be loaded. Any values + * which are not overwritten are unspecified. + */ + AV_HWFRAME_MAP_OVERWRITE = 1 << 2, + /** + * The mapping must be direct. That is, there must not be any copying in + * the map or unmap steps. Note that performance of direct mappings may + * be much lower than normal memory. + */ + AV_HWFRAME_MAP_DIRECT = 1 << 3, +}; + +/** + * Map a hardware frame. + * + * This has a number of different possible effects, depending on the format + * and origin of the src and dst frames. On input, src should be a usable + * frame with valid buffers and dst should be blank (typically as just created + * by av_frame_alloc()). src should have an associated hwframe context, and + * dst may optionally have a format and associated hwframe context. + * + * If src was created by mapping a frame from the hwframe context of dst, + * then this function undoes the mapping - dst is replaced by a reference to + * the frame that src was originally mapped from. + * + * If both src and dst have an associated hwframe context, then this function + * attempts to map the src frame from its hardware context to that of dst and + * then fill dst with appropriate data to be usable there. This will only be + * possible if the hwframe contexts and associated devices are compatible - + * given compatible devices, av_hwframe_ctx_create_derived() can be used to + * create a hwframe context for dst in which mapping should be possible. + * + * If src has a hwframe context but dst does not, then the src frame is + * mapped to normal memory and should thereafter be usable as a normal frame. + * If the format is set on dst, then the mapping will attempt to create dst + * with that format and fail if it is not possible. If format is unset (is + * AV_PIX_FMT_NONE) then dst will be mapped with whatever the most appropriate + * format to use is (probably the sw_format of the src hwframe context). + * + * A return value of AVERROR(ENOSYS) indicates that the mapping is not + * possible with the given arguments and hwframe setup, while other return + * values indicate that it failed somehow. + * + * @param dst Destination frame, to contain the mapping. + * @param src Source frame, to be mapped. + * @param flags Some combination of AV_HWFRAME_MAP_* flags. + * @return Zero on success, negative AVERROR code on failure. + */ +int av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags); + + +/** + * Create and initialise an AVHWFramesContext as a mapping of another existing + * AVHWFramesContext on a different device. + * + * av_hwframe_ctx_init() should not be called after this. + * + * @param derived_frame_ctx On success, a reference to the newly created + * AVHWFramesContext. + * @param derived_device_ctx A reference to the device to create the new + * AVHWFramesContext on. + * @param source_frame_ctx A reference to an existing AVHWFramesContext + * which will be mapped to the derived context. + * @param flags Currently unused; should be set to zero. + * @return Zero on success, negative AVERROR code on failure. + */ +int av_hwframe_ctx_create_derived(AVBufferRef **derived_frame_ctx, + enum AVPixelFormat format, + AVBufferRef *derived_device_ctx, + AVBufferRef *source_frame_ctx, + int flags); + +#endif /* AVUTIL_HWCONTEXT_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_cuda.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_cuda.h new file mode 100644 index 0000000..12dae84 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_cuda.h @@ -0,0 +1,51 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#ifndef AVUTIL_HWCONTEXT_CUDA_H +#define AVUTIL_HWCONTEXT_CUDA_H + +#ifndef CUDA_VERSION +#include +#endif + +#include "pixfmt.h" + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_CUDA. + * + * This API supports dynamic frame pools. AVHWFramesContext.pool must return + * AVBufferRefs whose data pointer is a CUdeviceptr. + */ + +typedef struct AVCUDADeviceContextInternal AVCUDADeviceContextInternal; + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVCUDADeviceContext { + CUcontext cuda_ctx; + AVCUDADeviceContextInternal *internal; +} AVCUDADeviceContext; + +/** + * AVHWFramesContext.hwctx is currently not used + */ + +#endif /* AVUTIL_HWCONTEXT_CUDA_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h new file mode 100644 index 0000000..6c36cb4 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_dxva2.h @@ -0,0 +1,72 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#ifndef AVUTIL_HWCONTEXT_DXVA2_H +#define AVUTIL_HWCONTEXT_DXVA2_H + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_DXVA2. + * + * Only fixed-size pools are supported. + * + * For user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs + * with the data pointer set to a pointer to IDirect3DSurface9. + */ + +#include +#include + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVDXVA2DeviceContext { + IDirect3DDeviceManager9 *devmgr; +} AVDXVA2DeviceContext; + +/** + * This struct is allocated as AVHWFramesContext.hwctx + */ +typedef struct AVDXVA2FramesContext { + /** + * The surface type (e.g. DXVA2_VideoProcessorRenderTarget or + * DXVA2_VideoDecoderRenderTarget). Must be set by the caller. + */ + DWORD surface_type; + + /** + * The surface pool. When an external pool is not provided by the caller, + * this will be managed (allocated and filled on init, freed on uninit) by + * libavutil. + */ + IDirect3DSurface9 **surfaces; + int nb_surfaces; + + /** + * Certain drivers require the decoder to be destroyed before the surfaces. + * To allow internally managed pools to work properly in such cases, this + * field is provided. + * + * If it is non-NULL, libavutil will call IDirectXVideoDecoder_Release() on + * it just before the internal surface pool is freed. + */ + IDirectXVideoDecoder *decoder_to_release; +} AVDXVA2FramesContext; + +#endif /* AVUTIL_HWCONTEXT_DXVA2_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_qsv.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_qsv.h new file mode 100644 index 0000000..b98d611 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_qsv.h @@ -0,0 +1,53 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_QSV_H +#define AVUTIL_HWCONTEXT_QSV_H + +#include + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_QSV. + * + * This API does not support dynamic frame pools. AVHWFramesContext.pool must + * contain AVBufferRefs whose data pointer points to an mfxFrameSurface1 struct. + */ + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVQSVDeviceContext { + mfxSession session; +} AVQSVDeviceContext; + +/** + * This struct is allocated as AVHWFramesContext.hwctx + */ +typedef struct AVQSVFramesContext { + mfxFrameSurface1 *surfaces; + int nb_surfaces; + + /** + * A combination of MFX_MEMTYPE_* describing the frame pool. + */ + int frame_type; +} AVQSVFramesContext; + +#endif /* AVUTIL_HWCONTEXT_QSV_H */ + diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h new file mode 100644 index 0000000..da1d4fe --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_vaapi.h @@ -0,0 +1,110 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_VAAPI_H +#define AVUTIL_HWCONTEXT_VAAPI_H + +#include + +/** + * @file + * API-specific header for AV_HWDEVICE_TYPE_VAAPI. + * + * Dynamic frame pools are supported, but note that any pool used as a render + * target is required to be of fixed size in order to be be usable as an + * argument to vaCreateContext(). + * + * For user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs + * with the data pointer set to a VASurfaceID. + */ + +enum { + /** + * The quirks field has been set by the user and should not be detected + * automatically by av_hwdevice_ctx_init(). + */ + AV_VAAPI_DRIVER_QUIRK_USER_SET = (1 << 0), + /** + * The driver does not destroy parameter buffers when they are used by + * vaRenderPicture(). Additional code will be required to destroy them + * separately afterwards. + */ + AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS = (1 << 1), + + /** + * The driver does not support the VASurfaceAttribMemoryType attribute, + * so the surface allocation code will not try to use it. + */ + AV_VAAPI_DRIVER_QUIRK_ATTRIB_MEMTYPE = (1 << 2), +}; + +/** + * VAAPI connection details. + * + * Allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVVAAPIDeviceContext { + /** + * The VADisplay handle, to be filled by the user. + */ + VADisplay display; + /** + * Driver quirks to apply - this is filled by av_hwdevice_ctx_init(), + * with reference to a table of known drivers, unless the + * AV_VAAPI_DRIVER_QUIRK_USER_SET bit is already present. The user + * may need to refer to this field when performing any later + * operations using VAAPI with the same VADisplay. + */ + unsigned int driver_quirks; +} AVVAAPIDeviceContext; + +/** + * VAAPI-specific data associated with a frame pool. + * + * Allocated as AVHWFramesContext.hwctx. + */ +typedef struct AVVAAPIFramesContext { + /** + * Set by the user to apply surface attributes to all surfaces in + * the frame pool. If null, default settings are used. + */ + VASurfaceAttrib *attributes; + int nb_attributes; + /** + * The surfaces IDs of all surfaces in the pool after creation. + * Only valid if AVHWFramesContext.initial_pool_size was positive. + * These are intended to be used as the render_targets arguments to + * vaCreateContext(). + */ + VASurfaceID *surface_ids; + int nb_surfaces; +} AVVAAPIFramesContext; + +/** + * VAAPI hardware pipeline configuration details. + * + * Allocated with av_hwdevice_hwconfig_alloc(). + */ +typedef struct AVVAAPIHWConfig { + /** + * ID of a VAAPI pipeline configuration. + */ + VAConfigID config_id; +} AVVAAPIHWConfig; + +#endif /* AVUTIL_HWCONTEXT_VAAPI_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h new file mode 100644 index 0000000..1b7ea1e --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/hwcontext_vdpau.h @@ -0,0 +1,44 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_VDPAU_H +#define AVUTIL_HWCONTEXT_VDPAU_H + +#include + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_VDPAU. + * + * This API supports dynamic frame pools. AVHWFramesContext.pool must return + * AVBufferRefs whose data pointer is a VdpVideoSurface. + */ + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVVDPAUDeviceContext { + VdpDevice device; + VdpGetProcAddress *get_proc_address; +} AVVDPAUDeviceContext; + +/** + * AVHWFramesContext.hwctx is currently not used + */ + +#endif /* AVUTIL_HWCONTEXT_VDPAU_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/i386/avconfig.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/i386/avconfig.h new file mode 100644 index 0000000..36f72aa --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/i386/avconfig.h @@ -0,0 +1,6 @@ +/* Generated by ffconf */ +#ifndef AVUTIL_AVCONFIG_H +#define AVUTIL_AVCONFIG_H +#define AV_HAVE_BIGENDIAN 0 +#define AV_HAVE_FAST_UNALIGNED 0 +#endif /* AVUTIL_AVCONFIG_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/i386/ffversion.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/i386/ffversion.h new file mode 100644 index 0000000..7ab8f1a --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/i386/ffversion.h @@ -0,0 +1,5 @@ +/* Automatically generated by version.sh, do not manually edit! */ +#ifndef AVUTIL_FFVERSION_H +#define AVUTIL_FFVERSION_H +#define FFMPEG_VERSION "ff3.3--fx0.8.0--20210325--fix_android11_crash" +#endif /* AVUTIL_FFVERSION_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/imgutils.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/imgutils.h new file mode 100644 index 0000000..a4a5efc --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/imgutils.h @@ -0,0 +1,246 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_IMGUTILS_H +#define AVUTIL_IMGUTILS_H + +/** + * @file + * misc image utilities + * + * @addtogroup lavu_picture + * @{ + */ + +#include "avutil.h" +#include "pixdesc.h" +#include "rational.h" + +/** + * Compute the max pixel step for each plane of an image with a + * format described by pixdesc. + * + * The pixel step is the distance in bytes between the first byte of + * the group of bytes which describe a pixel component and the first + * byte of the successive group in the same plane for the same + * component. + * + * @param max_pixsteps an array which is filled with the max pixel step + * for each plane. Since a plane may contain different pixel + * components, the computed max_pixsteps[plane] is relative to the + * component in the plane with the max pixel step. + * @param max_pixstep_comps an array which is filled with the component + * for each plane which has the max pixel step. May be NULL. + */ +void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], + const AVPixFmtDescriptor *pixdesc); + +/** + * Compute the size of an image line with format pix_fmt and width + * width for the plane plane. + * + * @return the computed size in bytes + */ +int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane); + +/** + * Fill plane linesizes for an image with pixel format pix_fmt and + * width width. + * + * @param linesizes array to be filled with the linesize for each plane + * @return >= 0 in case of success, a negative error code otherwise + */ +int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width); + +/** + * Fill plane data pointers for an image with pixel format pix_fmt and + * height height. + * + * @param data pointers array to be filled with the pointer for each image plane + * @param ptr the pointer to a buffer which will contain the image + * @param linesizes the array containing the linesize for each + * plane, should be filled by av_image_fill_linesizes() + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, + uint8_t *ptr, const int linesizes[4]); + +/** + * Allocate an image with size w and h and pixel format pix_fmt, and + * fill pointers and linesizes accordingly. + * The allocated image buffer has to be freed by using + * av_freep(&pointers[0]). + * + * @param align the value to use for buffer size alignment + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int av_image_alloc(uint8_t *pointers[4], int linesizes[4], + int w, int h, enum AVPixelFormat pix_fmt, int align); + +/** + * Copy image plane from src to dst. + * That is, copy "height" number of lines of "bytewidth" bytes each. + * The first byte of each successive line is separated by *_linesize + * bytes. + * + * bytewidth must be contained by both absolute values of dst_linesize + * and src_linesize, otherwise the function behavior is undefined. + * + * @param dst_linesize linesize for the image plane in dst + * @param src_linesize linesize for the image plane in src + */ +void av_image_copy_plane(uint8_t *dst, int dst_linesize, + const uint8_t *src, int src_linesize, + int bytewidth, int height); + +/** + * Copy image in src_data to dst_data. + * + * @param dst_linesizes linesizes for the image in dst_data + * @param src_linesizes linesizes for the image in src_data + */ +void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], + const uint8_t *src_data[4], const int src_linesizes[4], + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Copy image data located in uncacheable (e.g. GPU mapped) memory. Where + * available, this function will use special functionality for reading from such + * memory, which may result in greatly improved performance compared to plain + * av_image_copy(). + * + * The data pointers and the linesizes must be aligned to the maximum required + * by the CPU architecture. + * + * @note The linesize parameters have the type ptrdiff_t here, while they are + * int for av_image_copy(). + * @note On x86, the linesizes currently need to be aligned to the cacheline + * size (i.e. 64) to get improved performance. + */ +void av_image_copy_uc_from(uint8_t *dst_data[4], const ptrdiff_t dst_linesizes[4], + const uint8_t *src_data[4], const ptrdiff_t src_linesizes[4], + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Setup the data pointers and linesizes based on the specified image + * parameters and the provided array. + * + * The fields of the given image are filled in by using the src + * address which points to the image data buffer. Depending on the + * specified pixel format, one or multiple image data pointers and + * line sizes will be set. If a planar format is specified, several + * pointers will be set pointing to the different picture planes and + * the line sizes of the different planes will be stored in the + * lines_sizes array. Call with src == NULL to get the required + * size for the src buffer. + * + * To allocate the buffer and fill in the dst_data and dst_linesize in + * one call, use av_image_alloc(). + * + * @param dst_data data pointers to be filled in + * @param dst_linesizes linesizes for the image in dst_data to be filled in + * @param src buffer which will contain or contains the actual image data, can be NULL + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @param align the value used in src for linesize alignment + * @return the size in bytes required for src, a negative error code + * in case of failure + */ +int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], + const uint8_t *src, + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Return the size in bytes of the amount of data required to store an + * image with the given parameters. + * + * @param[in] align the assumed linesize alignment + */ +int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Copy image data from an image into a buffer. + * + * av_image_get_buffer_size() can be used to compute the required size + * for the buffer to fill. + * + * @param dst a buffer into which picture data will be copied + * @param dst_size the size in bytes of dst + * @param src_data pointers containing the source image data + * @param src_linesizes linesizes for the image in src_data + * @param pix_fmt the pixel format of the source image + * @param width the width of the source image in pixels + * @param height the height of the source image in pixels + * @param align the assumed linesize alignment for dst + * @return the number of bytes written to dst, or a negative value + * (error code) on error + */ +int av_image_copy_to_buffer(uint8_t *dst, int dst_size, + const uint8_t * const src_data[4], const int src_linesize[4], + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Check if the given dimension of an image is valid, meaning that all + * bytes of the image can be addressed with a signed int. + * + * @param w the width of the picture + * @param h the height of the picture + * @param log_offset the offset to sum to the log level for logging with log_ctx + * @param log_ctx the parent logging context, it may be NULL + * @return >= 0 if valid, a negative error code otherwise + */ +int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx); + +/** + * Check if the given dimension of an image is valid, meaning that all + * bytes of a plane of an image with the specified pix_fmt can be addressed + * with a signed int. + * + * @param w the width of the picture + * @param h the height of the picture + * @param max_pixels the maximum number of pixels the user wants to accept + * @param pix_fmt the pixel format, can be AV_PIX_FMT_NONE if unknown. + * @param log_offset the offset to sum to the log level for logging with log_ctx + * @param log_ctx the parent logging context, it may be NULL + * @return >= 0 if valid, a negative error code otherwise + */ +int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx); + +/** + * Check if the given sample aspect ratio of an image is valid. + * + * It is considered invalid if the denominator is 0 or if applying the ratio + * to the image size would make the smaller dimension less than 1. If the + * sar numerator is 0, it is considered unknown and will return as valid. + * + * @param w width of the image + * @param h height of the image + * @param sar sample aspect ratio of the image + * @return 0 if valid, a negative AVERROR code otherwise + */ +int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar); + +/** + * @} + */ + + +#endif /* AVUTIL_IMGUTILS_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/intfloat.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/intfloat.h new file mode 100644 index 0000000..fe3d7ec --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/intfloat.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2011 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTFLOAT_H +#define AVUTIL_INTFLOAT_H + +#include +#include "attributes.h" + +union av_intfloat32 { + uint32_t i; + float f; +}; + +union av_intfloat64 { + uint64_t i; + double f; +}; + +/** + * Reinterpret a 32-bit integer as a float. + */ +static av_always_inline float av_int2float(uint32_t i) +{ + union av_intfloat32 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a float as a 32-bit integer. + */ +static av_always_inline uint32_t av_float2int(float f) +{ + union av_intfloat32 v; + v.f = f; + return v.i; +} + +/** + * Reinterpret a 64-bit integer as a double. + */ +static av_always_inline double av_int2double(uint64_t i) +{ + union av_intfloat64 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a double as a 64-bit integer. + */ +static av_always_inline uint64_t av_double2int(double f) +{ + union av_intfloat64 v; + v.f = f; + return v.i; +} + +#endif /* AVUTIL_INTFLOAT_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/intreadwrite.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/intreadwrite.h new file mode 100644 index 0000000..d54d4b9 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/intreadwrite.h @@ -0,0 +1,634 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTREADWRITE_H +#define AVUTIL_INTREADWRITE_H + +#include +#include "libavutil/avconfig.h" +#include "attributes.h" +#include "bswap.h" + +typedef union { + uint64_t u64; + uint32_t u32[2]; + uint16_t u16[4]; + uint8_t u8 [8]; + double f64; + float f32[2]; +} av_alias av_alias64; + +typedef union { + uint32_t u32; + uint16_t u16[2]; + uint8_t u8 [4]; + float f32; +} av_alias av_alias32; + +typedef union { + uint16_t u16; + uint8_t u8 [2]; +} av_alias av_alias16; + +/* + * Arch-specific headers can provide any combination of + * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros. + * Preprocessor symbols must be defined, even if these are implemented + * as inline functions. + * + * R/W means read/write, B/L/N means big/little/native endianness. + * The following macros require aligned access, compared to their + * unaligned variants: AV_(COPY|SWAP|ZERO)(64|128), AV_[RW]N[8-64]A. + * Incorrect usage may range from abysmal performance to crash + * depending on the platform. + * + * The unaligned variants are AV_[RW][BLN][8-64] and AV_COPY*U. + */ + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_ARM +# include "arm/intreadwrite.h" +#elif ARCH_AVR32 +# include "avr32/intreadwrite.h" +#elif ARCH_MIPS +# include "mips/intreadwrite.h" +#elif ARCH_PPC +# include "ppc/intreadwrite.h" +#elif ARCH_TOMI +# include "tomi/intreadwrite.h" +#elif ARCH_X86 +# include "x86/intreadwrite.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +/* + * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers. + */ + +#if AV_HAVE_BIGENDIAN + +# if defined(AV_RN16) && !defined(AV_RB16) +# define AV_RB16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RB16) +# define AV_RN16(p) AV_RB16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WB16) +# define AV_WB16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WB16) +# define AV_WN16(p, v) AV_WB16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RB24) +# define AV_RB24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RB24) +# define AV_RN24(p) AV_RB24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WB24) +# define AV_WB24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WB24) +# define AV_WN24(p, v) AV_WB24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RB32) +# define AV_RB32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RB32) +# define AV_RN32(p) AV_RB32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WB32) +# define AV_WB32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WB32) +# define AV_WN32(p, v) AV_WB32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RB48) +# define AV_RB48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RB48) +# define AV_RN48(p) AV_RB48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WB48) +# define AV_WB48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WB48) +# define AV_WN48(p, v) AV_WB48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RB64) +# define AV_RB64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RB64) +# define AV_RN64(p) AV_RB64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WB64) +# define AV_WB64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WB64) +# define AV_WN64(p, v) AV_WB64(p, v) +# endif + +#else /* AV_HAVE_BIGENDIAN */ + +# if defined(AV_RN16) && !defined(AV_RL16) +# define AV_RL16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RL16) +# define AV_RN16(p) AV_RL16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WL16) +# define AV_WL16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WL16) +# define AV_WN16(p, v) AV_WL16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RL24) +# define AV_RL24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RL24) +# define AV_RN24(p) AV_RL24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WL24) +# define AV_WL24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WL24) +# define AV_WN24(p, v) AV_WL24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RL32) +# define AV_RL32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RL32) +# define AV_RN32(p) AV_RL32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WL32) +# define AV_WL32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WL32) +# define AV_WN32(p, v) AV_WL32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RL48) +# define AV_RL48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RL48) +# define AV_RN48(p) AV_RL48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WL48) +# define AV_WL48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WL48) +# define AV_WN48(p, v) AV_WL48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RL64) +# define AV_RL64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RL64) +# define AV_RN64(p) AV_RL64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WL64) +# define AV_WL64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WL64) +# define AV_WN64(p, v) AV_WL64(p, v) +# endif + +#endif /* !AV_HAVE_BIGENDIAN */ + +/* + * Define AV_[RW]N helper macros to simplify definitions not provided + * by per-arch headers. + */ + +#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__) + +union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias; +union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias; +union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; + +# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l) +# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v)) + +#elif defined(__DECC) + +# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) +# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) + +#elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_X64)) && AV_HAVE_FAST_UNALIGNED + +# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) +# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) + +#elif AV_HAVE_FAST_UNALIGNED + +# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) +# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#else + +#ifndef AV_RB16 +# define AV_RB16(x) \ + ((((const uint8_t*)(x))[0] << 8) | \ + ((const uint8_t*)(x))[1]) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, val) do { \ + uint16_t d = (val); \ + ((uint8_t*)(p))[1] = (d); \ + ((uint8_t*)(p))[0] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RL16 +# define AV_RL16(x) \ + ((((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, val) do { \ + uint16_t d = (val); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RB32 +# define AV_RB32(x) \ + (((uint32_t)((const uint8_t*)(x))[0] << 24) | \ + (((const uint8_t*)(x))[1] << 16) | \ + (((const uint8_t*)(x))[2] << 8) | \ + ((const uint8_t*)(x))[3]) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, val) do { \ + uint32_t d = (val); \ + ((uint8_t*)(p))[3] = (d); \ + ((uint8_t*)(p))[2] = (d)>>8; \ + ((uint8_t*)(p))[1] = (d)>>16; \ + ((uint8_t*)(p))[0] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RL32 +# define AV_RL32(x) \ + (((uint32_t)((const uint8_t*)(x))[3] << 24) | \ + (((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, val) do { \ + uint32_t d = (val); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RB64 +# define AV_RB64(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 8) | \ + (uint64_t)((const uint8_t*)(x))[7]) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, val) do { \ + uint64_t d = (val); \ + ((uint8_t*)(p))[7] = (d); \ + ((uint8_t*)(p))[6] = (d)>>8; \ + ((uint8_t*)(p))[5] = (d)>>16; \ + ((uint8_t*)(p))[4] = (d)>>24; \ + ((uint8_t*)(p))[3] = (d)>>32; \ + ((uint8_t*)(p))[2] = (d)>>40; \ + ((uint8_t*)(p))[1] = (d)>>48; \ + ((uint8_t*)(p))[0] = (d)>>56; \ + } while(0) +#endif + +#ifndef AV_RL64 +# define AV_RL64(x) \ + (((uint64_t)((const uint8_t*)(x))[7] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, val) do { \ + uint64_t d = (val); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + ((uint8_t*)(p))[6] = (d)>>48; \ + ((uint8_t*)(p))[7] = (d)>>56; \ + } while(0) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RN(s, p) AV_RB##s(p) +# define AV_WN(s, p, v) AV_WB##s(p, v) +#else +# define AV_RN(s, p) AV_RL##s(p) +# define AV_WN(s, p, v) AV_WL##s(p, v) +#endif + +#endif /* HAVE_FAST_UNALIGNED */ + +#ifndef AV_RN16 +# define AV_RN16(p) AV_RN(16, p) +#endif + +#ifndef AV_RN32 +# define AV_RN32(p) AV_RN(32, p) +#endif + +#ifndef AV_RN64 +# define AV_RN64(p) AV_RN(64, p) +#endif + +#ifndef AV_WN16 +# define AV_WN16(p, v) AV_WN(16, p, v) +#endif + +#ifndef AV_WN32 +# define AV_WN32(p, v) AV_WN(32, p, v) +#endif + +#ifndef AV_WN64 +# define AV_WN64(p, v) AV_WN(64, p, v) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RB(s, p) AV_RN##s(p) +# define AV_WB(s, p, v) AV_WN##s(p, v) +# define AV_RL(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v)) +#else +# define AV_RB(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v)) +# define AV_RL(s, p) AV_RN##s(p) +# define AV_WL(s, p, v) AV_WN##s(p, v) +#endif + +#define AV_RB8(x) (((const uint8_t*)(x))[0]) +#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0) + +#define AV_RL8(x) AV_RB8(x) +#define AV_WL8(p, d) AV_WB8(p, d) + +#ifndef AV_RB16 +# define AV_RB16(p) AV_RB(16, p) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, v) AV_WB(16, p, v) +#endif + +#ifndef AV_RL16 +# define AV_RL16(p) AV_RL(16, p) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, v) AV_WL(16, p, v) +#endif + +#ifndef AV_RB32 +# define AV_RB32(p) AV_RB(32, p) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, v) AV_WB(32, p, v) +#endif + +#ifndef AV_RL32 +# define AV_RL32(p) AV_RL(32, p) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, v) AV_WL(32, p, v) +#endif + +#ifndef AV_RB64 +# define AV_RB64(p) AV_RB(64, p) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, v) AV_WB(64, p, v) +#endif + +#ifndef AV_RL64 +# define AV_RL64(p) AV_RL(64, p) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, v) AV_WL(64, p, v) +#endif + +#ifndef AV_RB24 +# define AV_RB24(x) \ + ((((const uint8_t*)(x))[0] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[2]) +#endif +#ifndef AV_WB24 +# define AV_WB24(p, d) do { \ + ((uint8_t*)(p))[2] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[0] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RL24 +# define AV_RL24(x) \ + ((((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL24 +# define AV_WL24(p, d) do { \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RB48 +# define AV_RB48(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 8) | \ + (uint64_t)((const uint8_t*)(x))[5]) +#endif +#ifndef AV_WB48 +# define AV_WB48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[5] = (d); \ + ((uint8_t*)(p))[4] = (d)>>8; \ + ((uint8_t*)(p))[3] = (d)>>16; \ + ((uint8_t*)(p))[2] = (d)>>24; \ + ((uint8_t*)(p))[1] = (d)>>32; \ + ((uint8_t*)(p))[0] = (d)>>40; \ + } while(0) +#endif + +#ifndef AV_RL48 +# define AV_RL48(x) \ + (((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL48 +# define AV_WL48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + } while(0) +#endif + +/* + * The AV_[RW]NA macros access naturally aligned data + * in a type-safe way. + */ + +#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s) +#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#ifndef AV_RN16A +# define AV_RN16A(p) AV_RNA(16, p) +#endif + +#ifndef AV_RN32A +# define AV_RN32A(p) AV_RNA(32, p) +#endif + +#ifndef AV_RN64A +# define AV_RN64A(p) AV_RNA(64, p) +#endif + +#ifndef AV_WN16A +# define AV_WN16A(p, v) AV_WNA(16, p, v) +#endif + +#ifndef AV_WN32A +# define AV_WN32A(p, v) AV_WNA(32, p, v) +#endif + +#ifndef AV_WN64A +# define AV_WN64A(p, v) AV_WNA(64, p, v) +#endif + +/* + * The AV_COPYxxU macros are suitable for copying data to/from unaligned + * memory locations. + */ + +#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s)); + +#ifndef AV_COPY16U +# define AV_COPY16U(d, s) AV_COPYU(16, d, s) +#endif + +#ifndef AV_COPY32U +# define AV_COPY32U(d, s) AV_COPYU(32, d, s) +#endif + +#ifndef AV_COPY64U +# define AV_COPY64U(d, s) AV_COPYU(64, d, s) +#endif + +#ifndef AV_COPY128U +# define AV_COPY128U(d, s) \ + do { \ + AV_COPY64U(d, s); \ + AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \ + } while(0) +#endif + +/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be + * naturally aligned. They may be implemented using MMX, + * so emms_c() must be called before using any float code + * afterwards. + */ + +#define AV_COPY(n, d, s) \ + (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n) + +#ifndef AV_COPY16 +# define AV_COPY16(d, s) AV_COPY(16, d, s) +#endif + +#ifndef AV_COPY32 +# define AV_COPY32(d, s) AV_COPY(32, d, s) +#endif + +#ifndef AV_COPY64 +# define AV_COPY64(d, s) AV_COPY(64, d, s) +#endif + +#ifndef AV_COPY128 +# define AV_COPY128(d, s) \ + do { \ + AV_COPY64(d, s); \ + AV_COPY64((char*)(d)+8, (char*)(s)+8); \ + } while(0) +#endif + +#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b)) + +#ifndef AV_SWAP64 +# define AV_SWAP64(a, b) AV_SWAP(64, a, b) +#endif + +#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0) + +#ifndef AV_ZERO16 +# define AV_ZERO16(d) AV_ZERO(16, d) +#endif + +#ifndef AV_ZERO32 +# define AV_ZERO32(d) AV_ZERO(32, d) +#endif + +#ifndef AV_ZERO64 +# define AV_ZERO64(d) AV_ZERO(64, d) +#endif + +#ifndef AV_ZERO128 +# define AV_ZERO128(d) \ + do { \ + AV_ZERO64(d); \ + AV_ZERO64((char*)(d)+8); \ + } while(0) +#endif + +#endif /* AVUTIL_INTREADWRITE_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/lfg.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/lfg.h new file mode 100644 index 0000000..03f779a --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/lfg.h @@ -0,0 +1,71 @@ +/* + * Lagged Fibonacci PRNG + * Copyright (c) 2008 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LFG_H +#define AVUTIL_LFG_H + +#include + +typedef struct AVLFG { + unsigned int state[64]; + int index; +} AVLFG; + +void av_lfg_init(AVLFG *c, unsigned int seed); + +/** + * Seed the state of the ALFG using binary data. + * + * Return value: 0 on success, negative value (AVERROR) on failure. + */ +int av_lfg_init_from_data(AVLFG *c, const uint8_t *data, unsigned int length); + +/** + * Get the next random unsigned 32-bit number using an ALFG. + * + * Please also consider a simple LCG like state= state*1664525+1013904223, + * it may be good enough and faster for your specific use case. + */ +static inline unsigned int av_lfg_get(AVLFG *c){ + c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63]; + return c->state[c->index++ & 63]; +} + +/** + * Get the next random unsigned 32-bit number using a MLFG. + * + * Please also consider av_lfg_get() above, it is faster. + */ +static inline unsigned int av_mlfg_get(AVLFG *c){ + unsigned int a= c->state[(c->index-55) & 63]; + unsigned int b= c->state[(c->index-24) & 63]; + return c->state[c->index++ & 63] = 2*a*b+a+b; +} + +/** + * Get the next two numbers generated by a Box-Muller Gaussian + * generator using the random numbers issued by lfg. + * + * @param out array where the two generated numbers are placed + */ +void av_bmg_get(AVLFG *lfg, double out[2]); + +#endif /* AVUTIL_LFG_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/log.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/log.h new file mode 100644 index 0000000..f0a5738 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/log.h @@ -0,0 +1,376 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LOG_H +#define AVUTIL_LOG_H + +#include +#include "avutil.h" +#include "attributes.h" +#include "version.h" + +typedef enum { + AV_CLASS_CATEGORY_NA = 0, + AV_CLASS_CATEGORY_INPUT, + AV_CLASS_CATEGORY_OUTPUT, + AV_CLASS_CATEGORY_MUXER, + AV_CLASS_CATEGORY_DEMUXER, + AV_CLASS_CATEGORY_ENCODER, + AV_CLASS_CATEGORY_DECODER, + AV_CLASS_CATEGORY_FILTER, + AV_CLASS_CATEGORY_BITSTREAM_FILTER, + AV_CLASS_CATEGORY_SWSCALER, + AV_CLASS_CATEGORY_SWRESAMPLER, + AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40, + AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT, + AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT, + AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT, + AV_CLASS_CATEGORY_DEVICE_OUTPUT, + AV_CLASS_CATEGORY_DEVICE_INPUT, + AV_CLASS_CATEGORY_NB ///< not part of ABI/API +}AVClassCategory; + +#define AV_IS_INPUT_DEVICE(category) \ + (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_INPUT)) + +#define AV_IS_OUTPUT_DEVICE(category) \ + (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_OUTPUT)) + +struct AVOptionRanges; + +/** + * Describe the class of an AVClass context structure. That is an + * arbitrary struct of which the first field is a pointer to an + * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.). + */ +typedef struct AVClass { + /** + * The name of the class; usually it is the same name as the + * context structure type to which the AVClass is associated. + */ + const char* class_name; + + /** + * A pointer to a function which returns the name of a context + * instance ctx associated with the class. + */ + const char* (*item_name)(void* ctx); + + /** + * a pointer to the first option specified in the class if any or NULL + * + * @see av_set_default_options() + */ + const struct AVOption *option; + + /** + * LIBAVUTIL_VERSION with which this structure was created. + * This is used to allow fields to be added without requiring major + * version bumps everywhere. + */ + + int version; + + /** + * Offset in the structure where log_level_offset is stored. + * 0 means there is no such variable + */ + int log_level_offset_offset; + + /** + * Offset in the structure where a pointer to the parent context for + * logging is stored. For example a decoder could pass its AVCodecContext + * to eval as such a parent context, which an av_log() implementation + * could then leverage to display the parent context. + * The offset can be NULL. + */ + int parent_log_context_offset; + + /** + * Return next AVOptions-enabled child or NULL + */ + void* (*child_next)(void *obj, void *prev); + + /** + * Return an AVClass corresponding to the next potential + * AVOptions-enabled child. + * + * The difference between child_next and this is that + * child_next iterates over _already existing_ objects, while + * child_class_next iterates over _all possible_ children. + */ + const struct AVClass* (*child_class_next)(const struct AVClass *prev); + + /** + * Category used for visualization (like color) + * This is only set if the category is equal for all objects using this class. + * available since version (51 << 16 | 56 << 8 | 100) + */ + AVClassCategory category; + + /** + * Callback to return the category. + * available since version (51 << 16 | 59 << 8 | 100) + */ + AVClassCategory (*get_category)(void* ctx); + + /** + * Callback to return the supported/allowed ranges. + * available since version (52.12) + */ + int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags); +} AVClass; + +/** + * @addtogroup lavu_log + * + * @{ + * + * @defgroup lavu_log_constants Logging Constants + * + * @{ + */ + +/** + * Print no output. + */ +#define AV_LOG_QUIET -8 + +/** + * Something went really wrong and we will crash now. + */ +#define AV_LOG_PANIC 0 + +/** + * Something went wrong and recovery is not possible. + * For example, no header was found for a format which depends + * on headers or an illegal combination of parameters is used. + */ +#define AV_LOG_FATAL 8 + +/** + * Something went wrong and cannot losslessly be recovered. + * However, not all future data is affected. + */ +#define AV_LOG_ERROR 16 + +/** + * Something somehow does not look correct. This may or may not + * lead to problems. An example would be the use of '-vstrict -2'. + */ +#define AV_LOG_WARNING 24 + +/** + * Standard information. + */ +#define AV_LOG_INFO 32 + +/** + * Detailed information. + */ +#define AV_LOG_VERBOSE 40 + +/** + * Stuff which is only useful for libav* developers. + */ +#define AV_LOG_DEBUG 48 + +/** + * Extremely verbose debugging, useful for libav* development. + */ +#define AV_LOG_TRACE 56 + +#define AV_LOG_MAX_OFFSET (AV_LOG_TRACE - AV_LOG_QUIET) + +/** + * @} + */ + +/** + * Sets additional colors for extended debugging sessions. + * @code + av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), "Message in purple\n"); + @endcode + * Requires 256color terminal support. Uses outside debugging is not + * recommended. + */ +#define AV_LOG_C(x) ((x) << 8) + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct or NULL if general log. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + */ +void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4); + + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void av_vlog(void *avcl, int level, const char *fmt, va_list vl); + +/** + * Get the current log level + * + * @see lavu_log_constants + * + * @return Current log level + */ +int av_log_get_level(void); + +/** + * Set the log level + * + * @see lavu_log_constants + * + * @param level Logging level + */ +void av_log_set_level(int level); + +/** + * Set the logging callback + * + * @note The callback must be thread safe, even if the application does not use + * threads itself as some codecs are multithreaded. + * + * @see av_log_default_callback + * + * @param callback A logging function with a compatible signature. + */ +void av_log_set_callback(void (*callback)(void*, int, const char*, va_list)); + +/** + * Default logging callback + * + * It prints the message to stderr, optionally colorizing it. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void av_log_default_callback(void *avcl, int level, const char *fmt, + va_list vl); + +/** + * Return the context name + * + * @param ctx The AVClass context + * + * @return The AVClass class_name + */ +const char* av_default_item_name(void* ctx); +AVClassCategory av_default_get_category(void *ptr); + +/** + * Format a line of log the same way as the default callback. + * @param line buffer to receive the formatted line + * @param line_size size of the buffer + * @param print_prefix used to store whether the prefix must be printed; + * must point to a persistent integer initially set to 1 + */ +void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl, + char *line, int line_size, int *print_prefix); + +/** + * Format a line of log the same way as the default callback. + * @param line buffer to receive the formatted line; + * may be NULL if line_size is 0 + * @param line_size size of the buffer; at most line_size-1 characters will + * be written to the buffer, plus one null terminator + * @param print_prefix used to store whether the prefix must be printed; + * must point to a persistent integer initially set to 1 + * @return Returns a negative value if an error occurred, otherwise returns + * the number of characters that would have been written for a + * sufficiently large buffer, not including the terminating null + * character. If the return value is not less than line_size, it means + * that the log message was truncated to fit the buffer. + */ +int av_log_format_line2(void *ptr, int level, const char *fmt, va_list vl, + char *line, int line_size, int *print_prefix); + +#if FF_API_DLOG +/** + * av_dlog macros + * @deprecated unused + * Useful to print debug messages that shouldn't get compiled in normally. + */ + +#ifdef DEBUG +# define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__) +#else +# define av_dlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0) +#endif +#endif /* FF_API_DLOG */ + +/** + * Skip repeated messages, this requires the user app to use av_log() instead of + * (f)printf as the 2 would otherwise interfere and lead to + * "Last message repeated x times" messages below (f)printf messages with some + * bad luck. + * Also to receive the last, "last repeated" line if any, the user app must + * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end + */ +#define AV_LOG_SKIP_REPEATED 1 + +/** + * Include the log severity in messages originating from codecs. + * + * Results in messages such as: + * [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts + */ +#define AV_LOG_PRINT_LEVEL 2 + +void av_log_set_flags(int arg); +int av_log_get_flags(void); + +/** + * @} + */ + +#endif /* AVUTIL_LOG_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/macros.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/macros.h new file mode 100644 index 0000000..2007ee5 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/macros.h @@ -0,0 +1,50 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu + * Utility Preprocessor macros + */ + +#ifndef AVUTIL_MACROS_H +#define AVUTIL_MACROS_H + +/** + * @addtogroup preproc_misc Preprocessor String Macros + * + * String manipulation macros + * + * @{ + */ + +#define AV_STRINGIFY(s) AV_TOSTRING(s) +#define AV_TOSTRING(s) #s + +#define AV_GLUE(a, b) a ## b +#define AV_JOIN(a, b) AV_GLUE(a, b) + +/** + * @} + */ + +#define AV_PRAGMA(s) _Pragma(#s) + +#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1)) + +#endif /* AVUTIL_MACROS_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/mastering_display_metadata.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/mastering_display_metadata.h new file mode 100644 index 0000000..936533f --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/mastering_display_metadata.h @@ -0,0 +1,89 @@ +/** + * Copyright (c) 2016 Neil Birkbeck + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MASTERING_DISPLAY_METADATA_H +#define AVUTIL_MASTERING_DISPLAY_METADATA_H + +#include "frame.h" +#include "rational.h" + + +/** + * Mastering display metadata capable of representing the color volume of + * the display used to master the content (SMPTE 2086:2014). + * + * To be used as payload of a AVFrameSideData or AVPacketSideData with the + * appropriate type. + * + * @note The struct should be allocated with av_mastering_display_metadata_alloc() + * and its size is not a part of the public ABI. + */ +typedef struct AVMasteringDisplayMetadata { + /** + * CIE 1931 xy chromaticity coords of color primaries (r, g, b order). + */ + AVRational display_primaries[3][2]; + + /** + * CIE 1931 xy chromaticity coords of white point. + */ + AVRational white_point[2]; + + /** + * Min luminance of mastering display (cd/m^2). + */ + AVRational min_luminance; + + /** + * Max luminance of mastering display (cd/m^2). + */ + AVRational max_luminance; + + /** + * Flag indicating whether the display primaries (and white point) are set. + */ + int has_primaries; + + /** + * Flag indicating whether the luminance (min_ and max_) have been set. + */ + int has_luminance; + +} AVMasteringDisplayMetadata; + +/** + * Allocate an AVMasteringDisplayMetadata structure and set its fields to + * default values. The resulting struct can be freed using av_freep(). + * + * @return An AVMasteringDisplayMetadata filled with default values or NULL + * on failure. + */ +AVMasteringDisplayMetadata *av_mastering_display_metadata_alloc(void); + +/** + * Allocate a complete AVMasteringDisplayMetadata and add it to the frame. + * + * @param frame The frame which side data is added to. + * + * @return The AVMasteringDisplayMetadata structure to be filled by caller. + */ +AVMasteringDisplayMetadata *av_mastering_display_metadata_create_side_data(AVFrame *frame); + +#endif /* AVUTIL_MASTERING_DISPLAY_METADATA_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/mathematics.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/mathematics.h new file mode 100644 index 0000000..5490180 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/mathematics.h @@ -0,0 +1,242 @@ +/* + * copyright (c) 2005-2012 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @addtogroup lavu_math + * Mathematical utilities for working with timestamp and time base. + */ + +#ifndef AVUTIL_MATHEMATICS_H +#define AVUTIL_MATHEMATICS_H + +#include +#include +#include "attributes.h" +#include "rational.h" +#include "intfloat.h" + +#ifndef M_E +#define M_E 2.7182818284590452354 /* e */ +#endif +#ifndef M_LN2 +#define M_LN2 0.69314718055994530942 /* log_e 2 */ +#endif +#ifndef M_LN10 +#define M_LN10 2.30258509299404568402 /* log_e 10 */ +#endif +#ifndef M_LOG2_10 +#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */ +#endif +#ifndef M_PHI +#define M_PHI 1.61803398874989484820 /* phi / golden ratio */ +#endif +#ifndef M_PI +#define M_PI 3.14159265358979323846 /* pi */ +#endif +#ifndef M_PI_2 +#define M_PI_2 1.57079632679489661923 /* pi/2 */ +#endif +#ifndef M_SQRT1_2 +#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */ +#endif +#ifndef M_SQRT2 +#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */ +#endif +#ifndef NAN +#define NAN av_int2float(0x7fc00000) +#endif +#ifndef INFINITY +#define INFINITY av_int2float(0x7f800000) +#endif + +/** + * @addtogroup lavu_math + * + * @{ + */ + +/** + * Rounding methods. + */ +enum AVRounding { + AV_ROUND_ZERO = 0, ///< Round toward zero. + AV_ROUND_INF = 1, ///< Round away from zero. + AV_ROUND_DOWN = 2, ///< Round toward -infinity. + AV_ROUND_UP = 3, ///< Round toward +infinity. + AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero. + /** + * Flag telling rescaling functions to pass `INT64_MIN`/`MAX` through + * unchanged, avoiding special cases for #AV_NOPTS_VALUE. + * + * Unlike other values of the enumeration AVRounding, this value is a + * bitmask that must be used in conjunction with another value of the + * enumeration through a bitwise OR, in order to set behavior for normal + * cases. + * + * @code{.c} + * av_rescale_rnd(3, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX); + * // Rescaling 3: + * // Calculating 3 * 1 / 2 + * // 3 / 2 is rounded up to 2 + * // => 2 + * + * av_rescale_rnd(AV_NOPTS_VALUE, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX); + * // Rescaling AV_NOPTS_VALUE: + * // AV_NOPTS_VALUE == INT64_MIN + * // AV_NOPTS_VALUE is passed through + * // => AV_NOPTS_VALUE + * @endcode + */ + AV_ROUND_PASS_MINMAX = 8192, +}; + +/** + * Compute the greatest common divisor of two integer operands. + * + * @param a,b Operands + * @return GCD of a and b up to sign; if a >= 0 and b >= 0, return value is >= 0; + * if a == 0 and b == 0, returns 0. + */ +int64_t av_const av_gcd(int64_t a, int64_t b); + +/** + * Rescale a 64-bit integer with rounding to nearest. + * + * The operation is mathematically equivalent to `a * b / c`, but writing that + * directly can overflow. + * + * This function is equivalent to av_rescale_rnd() with #AV_ROUND_NEAR_INF. + * + * @see av_rescale_rnd(), av_rescale_q(), av_rescale_q_rnd() + */ +int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const; + +/** + * Rescale a 64-bit integer with specified rounding. + * + * The operation is mathematically equivalent to `a * b / c`, but writing that + * directly can overflow, and does not support different rounding methods. + * + * @see av_rescale(), av_rescale_q(), av_rescale_q_rnd() + */ +int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers. + * + * The operation is mathematically equivalent to `a * bq / cq`. + * + * This function is equivalent to av_rescale_q_rnd() with #AV_ROUND_NEAR_INF. + * + * @see av_rescale(), av_rescale_rnd(), av_rescale_q_rnd() + */ +int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers with specified rounding. + * + * The operation is mathematically equivalent to `a * bq / cq`. + * + * @see av_rescale(), av_rescale_rnd(), av_rescale_q() + */ +int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, + enum AVRounding rnd) av_const; + +/** + * Compare two timestamps each in its own time base. + * + * @return One of the following values: + * - -1 if `ts_a` is before `ts_b` + * - 1 if `ts_a` is after `ts_b` + * - 0 if they represent the same position + * + * @warning + * The result of the function is undefined if one of the timestamps is outside + * the `int64_t` range when represented in the other's timebase. + */ +int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b); + +/** + * Compare the remainders of two integer operands divided by a common divisor. + * + * In other words, compare the least significant `log2(mod)` bits of integers + * `a` and `b`. + * + * @code{.c} + * av_compare_mod(0x11, 0x02, 0x10) < 0 // since 0x11 % 0x10 (0x1) < 0x02 % 0x10 (0x2) + * av_compare_mod(0x11, 0x02, 0x20) > 0 // since 0x11 % 0x20 (0x11) > 0x02 % 0x20 (0x02) + * @endcode + * + * @param a,b Operands + * @param mod Divisor; must be a power of 2 + * @return + * - a negative value if `a % mod < b % mod` + * - a positive value if `a % mod > b % mod` + * - zero if `a % mod == b % mod` + */ +int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod); + +/** + * Rescale a timestamp while preserving known durations. + * + * This function is designed to be called per audio packet to scale the input + * timestamp to a different time base. Compared to a simple av_rescale_q() + * call, this function is robust against possible inconsistent frame durations. + * + * The `last` parameter is a state variable that must be preserved for all + * subsequent calls for the same stream. For the first call, `*last` should be + * initialized to #AV_NOPTS_VALUE. + * + * @param[in] in_tb Input time base + * @param[in] in_ts Input timestamp + * @param[in] fs_tb Duration time base; typically this is finer-grained + * (greater) than `in_tb` and `out_tb` + * @param[in] duration Duration till the next call to this function (i.e. + * duration of the current packet/frame) + * @param[in,out] last Pointer to a timestamp expressed in terms of + * `fs_tb`, acting as a state variable + * @param[in] out_tb Output timebase + * @return Timestamp expressed in terms of `out_tb` + * + * @note In the context of this function, "duration" is in term of samples, not + * seconds. + */ +int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb); + +/** + * Add a value to a timestamp. + * + * This function guarantees that when the same value is repeatly added that + * no accumulation of rounding errors occurs. + * + * @param[in] ts Input timestamp + * @param[in] ts_tb Input timestamp time base + * @param[in] inc Value to be added + * @param[in] inc_tb Time base of `inc` + */ +int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc); + + +/** + * @} + */ + +#endif /* AVUTIL_MATHEMATICS_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/md5.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/md5.h new file mode 100644 index 0000000..9571c1f --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/md5.h @@ -0,0 +1,89 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_md5 + * Public header for MD5 hash function implementation. + */ + +#ifndef AVUTIL_MD5_H +#define AVUTIL_MD5_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_md5 MD5 + * @ingroup lavu_hash + * MD5 hash function implementation. + * + * @{ + */ + +extern const int av_md5_size; + +struct AVMD5; + +/** + * Allocate an AVMD5 context. + */ +struct AVMD5 *av_md5_alloc(void); + +/** + * Initialize MD5 hashing. + * + * @param ctx pointer to the function context (of size av_md5_size) + */ +void av_md5_init(struct AVMD5 *ctx); + +/** + * Update hash value. + * + * @param ctx hash function context + * @param src input data to update hash with + * @param len input data length + */ +void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len); + +/** + * Finish hashing and output digest value. + * + * @param ctx hash function context + * @param dst buffer where output digest value is stored + */ +void av_md5_final(struct AVMD5 *ctx, uint8_t *dst); + +/** + * Hash an array of data. + * + * @param dst The output buffer to write the digest into + * @param src The data to hash + * @param len The length of the data, in bytes + */ +void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len); + +/** + * @} + */ + +#endif /* AVUTIL_MD5_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/mem.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/mem.h new file mode 100644 index 0000000..527cd03 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/mem.h @@ -0,0 +1,699 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_mem + * Memory handling functions + */ + +#ifndef AVUTIL_MEM_H +#define AVUTIL_MEM_H + +#include +#include + +#include "attributes.h" +#include "error.h" +#include "avutil.h" + +/** + * @addtogroup lavu_mem + * Utilities for manipulating memory. + * + * FFmpeg has several applications of memory that are not required of a typical + * program. For example, the computing-heavy components like video decoding and + * encoding can be sped up significantly through the use of aligned memory. + * + * However, for each of FFmpeg's applications of memory, there might not be a + * recognized or standardized API for that specific use. Memory alignment, for + * instance, varies wildly depending on operating systems, architectures, and + * compilers. Hence, this component of @ref libavutil is created to make + * dealing with memory consistently possible on all platforms. + * + * @{ + * + * @defgroup lavu_mem_macros Alignment Macros + * Helper macros for declaring aligned variables. + * @{ + */ + +/** + * @def DECLARE_ALIGNED(n,t,v) + * Declare a variable that is aligned in memory. + * + * @code{.c} + * DECLARE_ALIGNED(16, uint16_t, aligned_int) = 42; + * DECLARE_ALIGNED(32, uint8_t, aligned_array)[128]; + * + * // The default-alignment equivalent would be + * uint16_t aligned_int = 42; + * uint8_t aligned_array[128]; + * @endcode + * + * @param n Minimum alignment in bytes + * @param t Type of the variable (or array element) + * @param v Name of the variable + */ + +/** + * @def DECLARE_ASM_CONST(n,t,v) + * Declare a static constant aligned variable appropriate for use in inline + * assembly code. + * + * @code{.c} + * DECLARE_ASM_CONST(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008); + * @endcode + * + * @param n Minimum alignment in bytes + * @param t Type of the variable (or array element) + * @param v Name of the variable + */ + +#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v +#elif defined(__TI_COMPILER_VERSION__) + #define DECLARE_ALIGNED(n,t,v) \ + AV_PRAGMA(DATA_ALIGN(v,n)) \ + t __attribute__((aligned(n))) v + #define DECLARE_ASM_CONST(n,t,v) \ + AV_PRAGMA(DATA_ALIGN(v,n)) \ + static const t __attribute__((aligned(n))) v +#elif defined(__DJGPP__) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (FFMIN(n, 16)))) v + #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v +#elif defined(__GNUC__) || defined(__clang__) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v +#elif defined(_MSC_VER) + #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v + #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v +#else + #define DECLARE_ALIGNED(n,t,v) t v + #define DECLARE_ASM_CONST(n,t,v) static const t v +#endif + +/** + * @} + */ + +/** + * @defgroup lavu_mem_attrs Function Attributes + * Function attributes applicable to memory handling functions. + * + * These function attributes can help compilers emit more useful warnings, or + * generate better code. + * @{ + */ + +/** + * @def av_malloc_attrib + * Function attribute denoting a malloc-like function. + * + * @see Function attribute `malloc` in GCC's documentation + */ + +#if AV_GCC_VERSION_AT_LEAST(3,1) + #define av_malloc_attrib __attribute__((__malloc__)) +#else + #define av_malloc_attrib +#endif + +/** + * @def av_alloc_size(...) + * Function attribute used on a function that allocates memory, whose size is + * given by the specified parameter(s). + * + * @code{.c} + * void *av_malloc(size_t size) av_alloc_size(1); + * void *av_calloc(size_t nmemb, size_t size) av_alloc_size(1, 2); + * @endcode + * + * @param ... One or two parameter indexes, separated by a comma + * + * @see Function attribute `alloc_size` in GCC's documentation + */ + +#if AV_GCC_VERSION_AT_LEAST(4,3) + #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__))) +#else + #define av_alloc_size(...) +#endif + +/** + * @} + */ + +/** + * @defgroup lavu_mem_funcs Heap Management + * Functions responsible for allocating, freeing, and copying memory. + * + * All memory allocation functions have a built-in upper limit of `INT_MAX` + * bytes. This may be changed with av_max_alloc(), although exercise extreme + * caution when doing so. + * + * @{ + */ + +/** + * Allocate a memory block with alignment suitable for all memory accesses + * (including vectors if available on the CPU). + * + * @param size Size in bytes for the memory block to be allocated + * @return Pointer to the allocated block, or `NULL` if the block cannot + * be allocated + * @see av_mallocz() + */ +void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a memory block with alignment suitable for all memory accesses + * (including vectors if available on the CPU) and zero all the bytes of the + * block. + * + * @param size Size in bytes for the memory block to be allocated + * @return Pointer to the allocated block, or `NULL` if it cannot be allocated + * @see av_malloc() + */ +void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a memory block for an array with av_malloc(). + * + * The allocated memory will have size `size * nmemb` bytes. + * + * @param nmemb Number of element + * @param size Size of a single element + * @return Pointer to the allocated block, or `NULL` if the block cannot + * be allocated + * @see av_malloc() + */ +av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size) +{ + if (!size || nmemb >= INT_MAX / size) + return NULL; + return av_malloc(nmemb * size); +} + +/** + * Allocate a memory block for an array with av_mallocz(). + * + * The allocated memory will have size `size * nmemb` bytes. + * + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to the allocated block, or `NULL` if the block cannot + * be allocated + * + * @see av_mallocz() + * @see av_malloc_array() + */ +av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size) +{ + if (!size || nmemb >= INT_MAX / size) + return NULL; + return av_mallocz(nmemb * size); +} + +/** + * Non-inlined equivalent of av_mallocz_array(). + * + * Created for symmetry with the calloc() C function. + */ +void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib; + +/** + * Allocate, reallocate, or free a block of memory. + * + * If `ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is + * zero, free the memory block pointed to by `ptr`. Otherwise, expand or + * shrink that block of memory according to `size`. + * + * @param ptr Pointer to a memory block already allocated with + * av_realloc() or `NULL` + * @param size Size in bytes of the memory block to be allocated or + * reallocated + * + * @return Pointer to a newly-reallocated block or `NULL` if the block + * cannot be reallocated or the function is used to free the memory block + * + * @warning Unlike av_malloc(), the returned pointer is not guaranteed to be + * correctly aligned. + * @see av_fast_realloc() + * @see av_reallocp() + */ +void *av_realloc(void *ptr, size_t size) av_alloc_size(2); + +/** + * Allocate, reallocate, or free a block of memory through a pointer to a + * pointer. + * + * If `*ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is + * zero, free the memory block pointed to by `*ptr`. Otherwise, expand or + * shrink that block of memory according to `size`. + * + * @param[in,out] ptr Pointer to a pointer to a memory block already allocated + * with av_realloc(), or a pointer to `NULL`. The pointer + * is updated on success, or freed on failure. + * @param[in] size Size in bytes for the memory block to be allocated or + * reallocated + * + * @return Zero on success, an AVERROR error code on failure + * + * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be + * correctly aligned. + */ +av_warn_unused_result +int av_reallocp(void *ptr, size_t size); + +/** + * Allocate, reallocate, or free a block of memory. + * + * This function does the same thing as av_realloc(), except: + * - It takes two size arguments and allocates `nelem * elsize` bytes, + * after checking the result of the multiplication for integer overflow. + * - It frees the input block in case of failure, thus avoiding the memory + * leak with the classic + * @code{.c} + * buf = realloc(buf); + * if (!buf) + * return -1; + * @endcode + * pattern. + */ +void *av_realloc_f(void *ptr, size_t nelem, size_t elsize); + +/** + * Allocate, reallocate, or free an array. + * + * If `ptr` is `NULL` and `nmemb` > 0, allocate a new block. If + * `nmemb` is zero, free the memory block pointed to by `ptr`. + * + * @param ptr Pointer to a memory block already allocated with + * av_realloc() or `NULL` + * @param nmemb Number of elements in the array + * @param size Size of the single element of the array + * + * @return Pointer to a newly-reallocated block or NULL if the block + * cannot be reallocated or the function is used to free the memory block + * + * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be + * correctly aligned. + * @see av_reallocp_array() + */ +av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size); + +/** + * Allocate, reallocate, or free an array through a pointer to a pointer. + * + * If `*ptr` is `NULL` and `nmemb` > 0, allocate a new block. If `nmemb` is + * zero, free the memory block pointed to by `*ptr`. + * + * @param[in,out] ptr Pointer to a pointer to a memory block already + * allocated with av_realloc(), or a pointer to `NULL`. + * The pointer is updated on success, or freed on failure. + * @param[in] nmemb Number of elements + * @param[in] size Size of the single element + * + * @return Zero on success, an AVERROR error code on failure + * + * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be + * correctly aligned. + */ +av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size); + +/** + * Reallocate the given buffer if it is not large enough, otherwise do nothing. + * + * If the given buffer is `NULL`, then a new uninitialized buffer is allocated. + * + * If the given buffer is not large enough, and reallocation fails, `NULL` is + * returned and `*size` is set to 0, but the original buffer is not changed or + * freed. + * + * A typical use pattern follows: + * + * @code{.c} + * uint8_t *buf = ...; + * uint8_t *new_buf = av_fast_realloc(buf, ¤t_size, size_needed); + * if (!new_buf) { + * // Allocation failed; clean up original buffer + * av_freep(&buf); + * return AVERROR(ENOMEM); + * } + * @endcode + * + * @param[in,out] ptr Already allocated buffer, or `NULL` + * @param[in,out] size Pointer to current size of buffer `ptr`. `*size` is + * changed to `min_size` in case of success or 0 in + * case of failure + * @param[in] min_size New size of buffer `ptr` + * @return `ptr` if the buffer is large enough, a pointer to newly reallocated + * buffer if the buffer was not large enough, or `NULL` in case of + * error + * @see av_realloc() + * @see av_fast_malloc() + */ +void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate a buffer, reusing the given one if large enough. + * + * Contrary to av_fast_realloc(), the current buffer contents might not be + * preserved and on error the old buffer is freed, thus no special handling to + * avoid memleaks is necessary. + * + * `*ptr` is allowed to be `NULL`, in which case allocation always happens if + * `size_needed` is greater than 0. + * + * @code{.c} + * uint8_t *buf = ...; + * av_fast_malloc(&buf, ¤t_size, size_needed); + * if (!buf) { + * // Allocation failed; buf already freed + * return AVERROR(ENOMEM); + * } + * @endcode + * + * @param[in,out] ptr Pointer to pointer to an already allocated buffer. + * `*ptr` will be overwritten with pointer to new + * buffer on success or `NULL` on failure + * @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is + * changed to `min_size` in case of success or 0 in + * case of failure + * @param[in] min_size New size of buffer `*ptr` + * @see av_realloc() + * @see av_fast_mallocz() + */ +void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate and clear a buffer, reusing the given one if large enough. + * + * Like av_fast_malloc(), but all newly allocated space is initially cleared. + * Reused buffer is not cleared. + * + * `*ptr` is allowed to be `NULL`, in which case allocation always happens if + * `size_needed` is greater than 0. + * + * @param[in,out] ptr Pointer to pointer to an already allocated buffer. + * `*ptr` will be overwritten with pointer to new + * buffer on success or `NULL` on failure + * @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is + * changed to `min_size` in case of success or 0 in + * case of failure + * @param[in] min_size New size of buffer `*ptr` + * @see av_fast_malloc() + */ +void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size); + +/** + * Free a memory block which has been allocated with a function of av_malloc() + * or av_realloc() family. + * + * @param ptr Pointer to the memory block which should be freed. + * + * @note `ptr = NULL` is explicitly allowed. + * @note It is recommended that you use av_freep() instead, to prevent leaving + * behind dangling pointers. + * @see av_freep() + */ +void av_free(void *ptr); + +/** + * Free a memory block which has been allocated with a function of av_malloc() + * or av_realloc() family, and set the pointer pointing to it to `NULL`. + * + * @code{.c} + * uint8_t *buf = av_malloc(16); + * av_free(buf); + * // buf now contains a dangling pointer to freed memory, and accidental + * // dereference of buf will result in a use-after-free, which may be a + * // security risk. + * + * uint8_t *buf = av_malloc(16); + * av_freep(&buf); + * // buf is now NULL, and accidental dereference will only result in a + * // NULL-pointer dereference. + * @endcode + * + * @param ptr Pointer to the pointer to the memory block which should be freed + * @note `*ptr = NULL` is safe and leads to no action. + * @see av_free() + */ +void av_freep(void *ptr); + +/** + * Duplicate a string. + * + * @param s String to be duplicated + * @return Pointer to a newly-allocated string containing a + * copy of `s` or `NULL` if the string cannot be allocated + * @see av_strndup() + */ +char *av_strdup(const char *s) av_malloc_attrib; + +/** + * Duplicate a substring of a string. + * + * @param s String to be duplicated + * @param len Maximum length of the resulting string (not counting the + * terminating byte) + * @return Pointer to a newly-allocated string containing a + * substring of `s` or `NULL` if the string cannot be allocated + */ +char *av_strndup(const char *s, size_t len) av_malloc_attrib; + +/** + * Duplicate a buffer with av_malloc(). + * + * @param p Buffer to be duplicated + * @param size Size in bytes of the buffer copied + * @return Pointer to a newly allocated buffer containing a + * copy of `p` or `NULL` if the buffer cannot be allocated + */ +void *av_memdup(const void *p, size_t size); + +/** + * Overlapping memcpy() implementation. + * + * @param dst Destination buffer + * @param back Number of bytes back to start copying (i.e. the initial size of + * the overlapping window); must be > 0 + * @param cnt Number of bytes to copy; must be >= 0 + * + * @note `cnt > back` is valid, this will copy the bytes we just copied, + * thus creating a repeating pattern with a period length of `back`. + */ +void av_memcpy_backptr(uint8_t *dst, int back, int cnt); + +/** + * @} + */ + +/** + * @defgroup lavu_mem_dynarray Dynamic Array + * + * Utilities to make an array grow when needed. + * + * Sometimes, the programmer would want to have an array that can grow when + * needed. The libavutil dynamic array utilities fill that need. + * + * libavutil supports two systems of appending elements onto a dynamically + * allocated array, the first one storing the pointer to the value in the + * array, and the second storing the value directly. In both systems, the + * caller is responsible for maintaining a variable containing the length of + * the array, as well as freeing of the array after use. + * + * The first system stores pointers to values in a block of dynamically + * allocated memory. Since only pointers are stored, the function does not need + * to know the size of the type. Both av_dynarray_add() and + * av_dynarray_add_nofree() implement this system. + * + * @code + * type **array = NULL; //< an array of pointers to values + * int nb = 0; //< a variable to keep track of the length of the array + * + * type to_be_added = ...; + * type to_be_added2 = ...; + * + * av_dynarray_add(&array, &nb, &to_be_added); + * if (nb == 0) + * return AVERROR(ENOMEM); + * + * av_dynarray_add(&array, &nb, &to_be_added2); + * if (nb == 0) + * return AVERROR(ENOMEM); + * + * // Now: + * // nb == 2 + * // &to_be_added == array[0] + * // &to_be_added2 == array[1] + * + * av_freep(&array); + * @endcode + * + * The second system stores the value directly in a block of memory. As a + * result, the function has to know the size of the type. av_dynarray2_add() + * implements this mechanism. + * + * @code + * type *array = NULL; //< an array of values + * int nb = 0; //< a variable to keep track of the length of the array + * + * type to_be_added = ...; + * type to_be_added2 = ...; + * + * type *addr = av_dynarray2_add((void **)&array, &nb, sizeof(*array), NULL); + * if (!addr) + * return AVERROR(ENOMEM); + * memcpy(addr, &to_be_added, sizeof(to_be_added)); + * + * // Shortcut of the above. + * type *addr = av_dynarray2_add((void **)&array, &nb, sizeof(*array), + * (const void *)&to_be_added2); + * if (!addr) + * return AVERROR(ENOMEM); + * + * // Now: + * // nb == 2 + * // to_be_added == array[0] + * // to_be_added2 == array[1] + * + * av_freep(&array); + * @endcode + * + * @{ + */ + +/** + * Add the pointer to an element to a dynamic array. + * + * The array to grow is supposed to be an array of pointers to + * structures, and the element to add must be a pointer to an already + * allocated structure. + * + * The array is reallocated when its size reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by `nb_ptr` + * is incremented. + * In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and + * `*nb_ptr` is set to 0. + * + * @param[in,out] tab_ptr Pointer to the array to grow + * @param[in,out] nb_ptr Pointer to the number of elements in the array + * @param[in] elem Element to add + * @see av_dynarray_add_nofree(), av_dynarray2_add() + */ +void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem); + +/** + * Add an element to a dynamic array. + * + * Function has the same functionality as av_dynarray_add(), + * but it doesn't free memory on fails. It returns error code + * instead and leave current buffer untouched. + * + * @return >=0 on success, negative otherwise + * @see av_dynarray_add(), av_dynarray2_add() + */ +av_warn_unused_result +int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem); + +/** + * Add an element of size `elem_size` to a dynamic array. + * + * The array is reallocated when its number of elements reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by `nb_ptr` + * is incremented. + * In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and + * `*nb_ptr` is set to 0. + * + * @param[in,out] tab_ptr Pointer to the array to grow + * @param[in,out] nb_ptr Pointer to the number of elements in the array + * @param[in] elem_size Size in bytes of an element in the array + * @param[in] elem_data Pointer to the data of the element to add. If + * `NULL`, the space of the newly added element is + * allocated but left uninitialized. + * + * @return Pointer to the data of the element to copy in the newly allocated + * space + * @see av_dynarray_add(), av_dynarray_add_nofree() + */ +void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, + const uint8_t *elem_data); + +/** + * @} + */ + +/** + * @defgroup lavu_mem_misc Miscellaneous Functions + * + * Other functions related to memory allocation. + * + * @{ + */ + +/** + * Multiply two `size_t` values checking for overflow. + * + * @param[in] a,b Operands of multiplication + * @param[out] r Pointer to the result of the operation + * @return 0 on success, AVERROR(EINVAL) on overflow + */ +static inline int av_size_mult(size_t a, size_t b, size_t *r) +{ + size_t t = a * b; + /* Hack inspired from glibc: don't try the division if nelem and elsize + * are both less than sqrt(SIZE_MAX). */ + if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b) + return AVERROR(EINVAL); + *r = t; + return 0; +} + +/** + * Set the maximum size that may be allocated in one block. + * + * The value specified with this function is effective for all libavutil's @ref + * lavu_mem_funcs "heap management functions." + * + * By default, the max value is defined as `INT_MAX`. + * + * @param max Value to be set as the new maximum size + * + * @warning Exercise extreme caution when using this function. Don't touch + * this if you do not understand the full consequence of doing so. + */ +void av_max_alloc(size_t max); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_MEM_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/motion_vector.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/motion_vector.h new file mode 100644 index 0000000..ec29556 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/motion_vector.h @@ -0,0 +1,57 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MOTION_VECTOR_H +#define AVUTIL_MOTION_VECTOR_H + +#include + +typedef struct AVMotionVector { + /** + * Where the current macroblock comes from; negative value when it comes + * from the past, positive value when it comes from the future. + * XXX: set exact relative ref frame reference instead of a +/- 1 "direction". + */ + int32_t source; + /** + * Width and height of the block. + */ + uint8_t w, h; + /** + * Absolute source position. Can be outside the frame area. + */ + int16_t src_x, src_y; + /** + * Absolute destination position. Can be outside the frame area. + */ + int16_t dst_x, dst_y; + /** + * Extra flag information. + * Currently unused. + */ + uint64_t flags; + /** + * Motion vector + * src_x = dst_x + motion_x / motion_scale + * src_y = dst_y + motion_y / motion_scale + */ + int32_t motion_x, motion_y; + uint16_t motion_scale; +} AVMotionVector; + +#endif /* AVUTIL_MOTION_VECTOR_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/murmur3.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/murmur3.h new file mode 100644 index 0000000..6a1694c --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/murmur3.h @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2013 Reimar Döffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_murmur3 + * Public header for MurmurHash3 hash function implementation. + */ + +#ifndef AVUTIL_MURMUR3_H +#define AVUTIL_MURMUR3_H + +#include + +/** + * @defgroup lavu_murmur3 Murmur3 + * @ingroup lavu_hash + * MurmurHash3 hash function implementation. + * + * MurmurHash3 is a non-cryptographic hash function, of which three + * incompatible versions were created by its inventor Austin Appleby: + * + * - 32-bit output + * - 128-bit output for 32-bit platforms + * - 128-bit output for 64-bit platforms + * + * FFmpeg only implements the last variant: 128-bit output designed for 64-bit + * platforms. Even though the hash function was designed for 64-bit platforms, + * the function in reality works on 32-bit systems too, only with reduced + * performance. + * + * @anchor lavu_murmur3_seedinfo + * By design, MurmurHash3 requires a seed to operate. In response to this, + * libavutil provides two functions for hash initiation, one that requires a + * seed (av_murmur3_init_seeded()) and one that uses a fixed arbitrary integer + * as the seed, and therefore does not (av_murmur3_init()). + * + * To make hashes comparable, you should provide the same seed for all calls to + * this hash function -- if you are supplying one yourself, that is. + * + * @{ + */ + +/** + * Allocate an AVMurMur3 hash context. + * + * @return Uninitialized hash context or `NULL` in case of error + */ +struct AVMurMur3 *av_murmur3_alloc(void); + +/** + * Initialize or reinitialize an AVMurMur3 hash context with a seed. + * + * @param[out] c Hash context + * @param[in] seed Random seed + * + * @see av_murmur3_init() + * @see @ref lavu_murmur3_seedinfo "Detailed description" on a discussion of + * seeds for MurmurHash3. + */ +void av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed); + +/** + * Initialize or reinitialize an AVMurMur3 hash context. + * + * Equivalent to av_murmur3_init_seeded() with a built-in seed. + * + * @param[out] c Hash context + * + * @see av_murmur3_init_seeded() + * @see @ref lavu_murmur3_seedinfo "Detailed description" on a discussion of + * seeds for MurmurHash3. + */ +void av_murmur3_init(struct AVMurMur3 *c); + +/** + * Update hash context with new data. + * + * @param[out] c Hash context + * @param[in] src Input data to update hash with + * @param[in] len Number of bytes to read from `src` + */ +void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len); + +/** + * Finish hashing and output digest value. + * + * @param[in,out] c Hash context + * @param[out] dst Buffer where output digest value is stored + */ +void av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]); + +/** + * @} + */ + +#endif /* AVUTIL_MURMUR3_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/opt.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/opt.h new file mode 100644 index 0000000..0d89379 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/opt.h @@ -0,0 +1,866 @@ +/* + * AVOptions + * copyright (c) 2005 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_OPT_H +#define AVUTIL_OPT_H + +/** + * @file + * AVOptions + */ + +#include "rational.h" +#include "avutil.h" +#include "dict.h" +#include "log.h" +#include "pixfmt.h" +#include "samplefmt.h" +#include "version.h" + +/** + * @defgroup avoptions AVOptions + * @ingroup lavu_data + * @{ + * AVOptions provide a generic system to declare options on arbitrary structs + * ("objects"). An option can have a help text, a type and a range of possible + * values. Options may then be enumerated, read and written to. + * + * @section avoptions_implement Implementing AVOptions + * This section describes how to add AVOptions capabilities to a struct. + * + * All AVOptions-related information is stored in an AVClass. Therefore + * the first member of the struct should be a pointer to an AVClass describing it. + * The option field of the AVClass must be set to a NULL-terminated static array + * of AVOptions. Each AVOption must have a non-empty name, a type, a default + * value and for number-type AVOptions also a range of allowed values. It must + * also declare an offset in bytes from the start of the struct, where the field + * associated with this AVOption is located. Other fields in the AVOption struct + * should also be set when applicable, but are not required. + * + * The following example illustrates an AVOptions-enabled struct: + * @code + * typedef struct test_struct { + * const AVClass *class; + * int int_opt; + * char *str_opt; + * uint8_t *bin_opt; + * int bin_len; + * } test_struct; + * + * static const AVOption test_options[] = { + * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt), + * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX }, + * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt), + * AV_OPT_TYPE_STRING }, + * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt), + * AV_OPT_TYPE_BINARY }, + * { NULL }, + * }; + * + * static const AVClass test_class = { + * .class_name = "test class", + * .item_name = av_default_item_name, + * .option = test_options, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * @endcode + * + * Next, when allocating your struct, you must ensure that the AVClass pointer + * is set to the correct value. Then, av_opt_set_defaults() can be called to + * initialize defaults. After that the struct is ready to be used with the + * AVOptions API. + * + * When cleaning up, you may use the av_opt_free() function to automatically + * free all the allocated string and binary options. + * + * Continuing with the above example: + * + * @code + * test_struct *alloc_test_struct(void) + * { + * test_struct *ret = av_mallocz(sizeof(*ret)); + * ret->class = &test_class; + * av_opt_set_defaults(ret); + * return ret; + * } + * void free_test_struct(test_struct **foo) + * { + * av_opt_free(*foo); + * av_freep(foo); + * } + * @endcode + * + * @subsection avoptions_implement_nesting Nesting + * It may happen that an AVOptions-enabled struct contains another + * AVOptions-enabled struct as a member (e.g. AVCodecContext in + * libavcodec exports generic options, while its priv_data field exports + * codec-specific options). In such a case, it is possible to set up the + * parent struct to export a child's options. To do that, simply + * implement AVClass.child_next() and AVClass.child_class_next() in the + * parent struct's AVClass. + * Assuming that the test_struct from above now also contains a + * child_struct field: + * + * @code + * typedef struct child_struct { + * AVClass *class; + * int flags_opt; + * } child_struct; + * static const AVOption child_opts[] = { + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX }, + * { NULL }, + * }; + * static const AVClass child_class = { + * .class_name = "child class", + * .item_name = av_default_item_name, + * .option = child_opts, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * + * void *child_next(void *obj, void *prev) + * { + * test_struct *t = obj; + * if (!prev && t->child_struct) + * return t->child_struct; + * return NULL + * } + * const AVClass child_class_next(const AVClass *prev) + * { + * return prev ? NULL : &child_class; + * } + * @endcode + * Putting child_next() and child_class_next() as defined above into + * test_class will now make child_struct's options accessible through + * test_struct (again, proper setup as described above needs to be done on + * child_struct right after it is created). + * + * From the above example it might not be clear why both child_next() + * and child_class_next() are needed. The distinction is that child_next() + * iterates over actually existing objects, while child_class_next() + * iterates over all possible child classes. E.g. if an AVCodecContext + * was initialized to use a codec which has private options, then its + * child_next() will return AVCodecContext.priv_data and finish + * iterating. OTOH child_class_next() on AVCodecContext.av_class will + * iterate over all available codecs with private options. + * + * @subsection avoptions_implement_named_constants Named constants + * It is possible to create named constants for options. Simply set the unit + * field of the option the constants should apply to a string and + * create the constants themselves as options of type AV_OPT_TYPE_CONST + * with their unit field set to the same string. + * Their default_val field should contain the value of the named + * constant. + * For example, to add some named constants for the test_flags option + * above, put the following into the child_opts array: + * @code + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" }, + * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" }, + * @endcode + * + * @section avoptions_use Using AVOptions + * This section deals with accessing options in an AVOptions-enabled struct. + * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or + * AVFormatContext in libavformat. + * + * @subsection avoptions_use_examine Examining AVOptions + * The basic functions for examining options are av_opt_next(), which iterates + * over all options defined for one object, and av_opt_find(), which searches + * for an option with the given name. + * + * The situation is more complicated with nesting. An AVOptions-enabled struct + * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag + * to av_opt_find() will make the function search children recursively. + * + * For enumerating there are basically two cases. The first is when you want to + * get all options that may potentially exist on the struct and its children + * (e.g. when constructing documentation). In that case you should call + * av_opt_child_class_next() recursively on the parent struct's AVClass. The + * second case is when you have an already initialized struct with all its + * children and you want to get all options that can be actually written or read + * from it. In that case you should call av_opt_child_next() recursively (and + * av_opt_next() on each result). + * + * @subsection avoptions_use_get_set Reading and writing AVOptions + * When setting options, you often have a string read directly from the + * user. In such a case, simply passing it to av_opt_set() is enough. For + * non-string type options, av_opt_set() will parse the string according to the + * option type. + * + * Similarly av_opt_get() will read any option type and convert it to a string + * which will be returned. Do not forget that the string is allocated, so you + * have to free it with av_free(). + * + * In some cases it may be more convenient to put all options into an + * AVDictionary and call av_opt_set_dict() on it. A specific case of this + * are the format/codec open functions in lavf/lavc which take a dictionary + * filled with option as a parameter. This makes it possible to set some options + * that cannot be set otherwise, since e.g. the input file format is not known + * before the file is actually opened. + */ + +enum AVOptionType{ + AV_OPT_TYPE_FLAGS, + AV_OPT_TYPE_INT, + AV_OPT_TYPE_INT64, + AV_OPT_TYPE_DOUBLE, + AV_OPT_TYPE_FLOAT, + AV_OPT_TYPE_STRING, + AV_OPT_TYPE_RATIONAL, + AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length + AV_OPT_TYPE_DICT, + AV_OPT_TYPE_UINT64, + AV_OPT_TYPE_CONST = 128, + AV_OPT_TYPE_IMAGE_SIZE = MKBETAG('S','I','Z','E'), ///< offset must point to two consecutive integers + AV_OPT_TYPE_PIXEL_FMT = MKBETAG('P','F','M','T'), + AV_OPT_TYPE_SAMPLE_FMT = MKBETAG('S','F','M','T'), + AV_OPT_TYPE_VIDEO_RATE = MKBETAG('V','R','A','T'), ///< offset must point to AVRational + AV_OPT_TYPE_DURATION = MKBETAG('D','U','R',' '), + AV_OPT_TYPE_COLOR = MKBETAG('C','O','L','R'), + AV_OPT_TYPE_CHANNEL_LAYOUT = MKBETAG('C','H','L','A'), + AV_OPT_TYPE_BOOL = MKBETAG('B','O','O','L'), +}; + +/** + * AVOption + */ +typedef struct AVOption { + const char *name; + + /** + * short English help text + * @todo What about other languages? + */ + const char *help; + + /** + * The offset relative to the context structure where the option + * value is stored. It should be 0 for named constants. + */ + int offset; + enum AVOptionType type; + + /** + * the default value for scalar options + */ + union { + int64_t i64; + double dbl; + const char *str; + /* TODO those are unused now */ + AVRational q; + } default_val; + double min; ///< minimum valid value for the option + double max; ///< maximum valid value for the option + + int flags; +#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding +#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding +#if FF_API_OPT_TYPE_METADATA +#define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ... +#endif +#define AV_OPT_FLAG_AUDIO_PARAM 8 +#define AV_OPT_FLAG_VIDEO_PARAM 16 +#define AV_OPT_FLAG_SUBTITLE_PARAM 32 +/** + * The option is intended for exporting values to the caller. + */ +#define AV_OPT_FLAG_EXPORT 64 +/** + * The option may not be set through the AVOptions API, only read. + * This flag only makes sense when AV_OPT_FLAG_EXPORT is also set. + */ +#define AV_OPT_FLAG_READONLY 128 +#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering +//FIXME think about enc-audio, ... style flags + + /** + * The logical unit to which the option belongs. Non-constant + * options and corresponding named constants share the same + * unit. May be NULL. + */ + const char *unit; +} AVOption; + +/** + * A single allowed range of values, or a single allowed value. + */ +typedef struct AVOptionRange { + const char *str; + /** + * Value range. + * For string ranges this represents the min/max length. + * For dimensions this represents the min/max pixel count or width/height in multi-component case. + */ + double value_min, value_max; + /** + * Value's component range. + * For string this represents the unicode range for chars, 0-127 limits to ASCII. + */ + double component_min, component_max; + /** + * Range flag. + * If set to 1 the struct encodes a range, if set to 0 a single value. + */ + int is_range; +} AVOptionRange; + +/** + * List of AVOptionRange structs. + */ +typedef struct AVOptionRanges { + /** + * Array of option ranges. + * + * Most of option types use just one component. + * Following describes multi-component option types: + * + * AV_OPT_TYPE_IMAGE_SIZE: + * component index 0: range of pixel count (width * height). + * component index 1: range of width. + * component index 2: range of height. + * + * @note To obtain multi-component version of this structure, user must + * provide AV_OPT_MULTI_COMPONENT_RANGE to av_opt_query_ranges or + * av_opt_query_ranges_default function. + * + * Multi-component range can be read as in following example: + * + * @code + * int range_index, component_index; + * AVOptionRanges *ranges; + * AVOptionRange *range[3]; //may require more than 3 in the future. + * av_opt_query_ranges(&ranges, obj, key, AV_OPT_MULTI_COMPONENT_RANGE); + * for (range_index = 0; range_index < ranges->nb_ranges; range_index++) { + * for (component_index = 0; component_index < ranges->nb_components; component_index++) + * range[component_index] = ranges->range[ranges->nb_ranges * component_index + range_index]; + * //do something with range here. + * } + * av_opt_freep_ranges(&ranges); + * @endcode + */ + AVOptionRange **range; + /** + * Number of ranges per component. + */ + int nb_ranges; + /** + * Number of componentes. + */ + int nb_components; +} AVOptionRanges; + +/** + * Show the obj options. + * + * @param req_flags requested flags for the options to show. Show only the + * options for which it is opt->flags & req_flags. + * @param rej_flags rejected flags for the options to show. Show only the + * options for which it is !(opt->flags & req_flags). + * @param av_log_obj log context to use for showing the options + */ +int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags); + +/** + * Set the values of all AVOption fields to their default values. + * + * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass) + */ +void av_opt_set_defaults(void *s); + +/** + * Set the values of all AVOption fields to their default values. Only these + * AVOption fields for which (opt->flags & mask) == flags will have their + * default applied to s. + * + * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass) + * @param mask combination of AV_OPT_FLAG_* + * @param flags combination of AV_OPT_FLAG_* + */ +void av_opt_set_defaults2(void *s, int mask, int flags); + +/** + * Parse the key/value pairs list in opts. For each key/value pair + * found, stores the value in the field in ctx that is named like the + * key. ctx must be an AVClass context, storing is done using + * AVOptions. + * + * @param opts options string to parse, may be NULL + * @param key_val_sep a 0-terminated list of characters used to + * separate key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @return the number of successfully set key/value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by av_opt_set() if a key/value pair + * cannot be set + */ +int av_set_options_string(void *ctx, const char *opts, + const char *key_val_sep, const char *pairs_sep); + +/** + * Parse the key-value pairs list in opts. For each key=value pair found, + * set the value of the corresponding option in ctx. + * + * @param ctx the AVClass object to set options on + * @param opts the options string, key-value pairs separated by a + * delimiter + * @param shorthand a NULL-terminated array of options names for shorthand + * notation: if the first field in opts has no key part, + * the key is taken from the first element of shorthand; + * then again for the second, etc., until either opts is + * finished, shorthand is finished or a named option is + * found; after that, all options must be named + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @return the number of successfully set key=value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by av_set_string3() if a key/value pair + * cannot be set + * + * Options names must use only the following characters: a-z A-Z 0-9 - . / _ + * Separators must use characters distinct from option names and from each + * other. + */ +int av_opt_set_from_string(void *ctx, const char *opts, + const char *const *shorthand, + const char *key_val_sep, const char *pairs_sep); +/** + * Free all allocated objects in obj. + */ +void av_opt_free(void *obj); + +/** + * Check whether a particular flag is set in a flags field. + * + * @param field_name the name of the flag field option + * @param flag_name the name of the flag to check + * @return non-zero if the flag is set, zero if the flag isn't set, + * isn't of the right type, or the flags field doesn't exist. + */ +int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name); + +/** + * Set all the options from a given dictionary on an object. + * + * @param obj a struct whose first element is a pointer to AVClass + * @param options options to process. This dictionary will be freed and replaced + * by a new one containing all options not found in obj. + * Of course this new dictionary needs to be freed by caller + * with av_dict_free(). + * + * @return 0 on success, a negative AVERROR if some option was found in obj, + * but could not be set. + * + * @see av_dict_copy() + */ +int av_opt_set_dict(void *obj, struct AVDictionary **options); + + +/** + * Set all the options from a given dictionary on an object. + * + * @param obj a struct whose first element is a pointer to AVClass + * @param options options to process. This dictionary will be freed and replaced + * by a new one containing all options not found in obj. + * Of course this new dictionary needs to be freed by caller + * with av_dict_free(). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * + * @return 0 on success, a negative AVERROR if some option was found in obj, + * but could not be set. + * + * @see av_dict_copy() + */ +int av_opt_set_dict2(void *obj, struct AVDictionary **options, int search_flags); + +/** + * Extract a key-value pair from the beginning of a string. + * + * @param ropts pointer to the options string, will be updated to + * point to the rest of the string (one of the pairs_sep + * or the final NUL) + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @param flags flags; see the AV_OPT_FLAG_* values below + * @param rkey parsed key; must be freed using av_free() + * @param rval parsed value; must be freed using av_free() + * + * @return >=0 for success, or a negative value corresponding to an + * AVERROR code in case of error; in particular: + * AVERROR(EINVAL) if no key is present + * + */ +int av_opt_get_key_value(const char **ropts, + const char *key_val_sep, const char *pairs_sep, + unsigned flags, + char **rkey, char **rval); + +enum { + + /** + * Accept to parse a value without a key; the key will then be returned + * as NULL. + */ + AV_OPT_FLAG_IMPLICIT_KEY = 1, +}; + +/** + * @defgroup opt_eval_funcs Evaluating option strings + * @{ + * This group of functions can be used to evaluate option strings + * and get numbers out of them. They do the same thing as av_opt_set(), + * except the result is written into the caller-supplied pointer. + * + * @param obj a struct whose first element is a pointer to AVClass. + * @param o an option for which the string is to be evaluated. + * @param val string to be evaluated. + * @param *_out value of the string will be written here. + * + * @return 0 on success, a negative number on failure. + */ +int av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out); +int av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out); +int av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out); +int av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out); +int av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out); +int av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out); +/** + * @} + */ + +#define AV_OPT_SEARCH_CHILDREN (1 << 0) /**< Search in possible children of the + given object first. */ +/** + * The obj passed to av_opt_find() is fake -- only a double pointer to AVClass + * instead of a required pointer to a struct containing AVClass. This is + * useful for searching for options without needing to allocate the corresponding + * object. + */ +#define AV_OPT_SEARCH_FAKE_OBJ (1 << 1) + +/** + * In av_opt_get, return NULL if the option has a pointer type and is set to NULL, + * rather than returning an empty string. + */ +#define AV_OPT_ALLOW_NULL (1 << 2) + +/** + * Allows av_opt_query_ranges and av_opt_query_ranges_default to return more than + * one component for certain option types. + * @see AVOptionRanges for details. + */ +#define AV_OPT_MULTI_COMPONENT_RANGE (1 << 12) + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * + * @return A pointer to the option found, or NULL if no option + * was found. + * + * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable + * directly with av_opt_set(). Use special calls which take an options + * AVDictionary (e.g. avformat_open_input()) to set options found with this + * flag. + */ +const AVOption *av_opt_find(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags); + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * @param[out] target_obj if non-NULL, an object to which the option belongs will be + * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present + * in search_flags. This parameter is ignored if search_flags contain + * AV_OPT_SEARCH_FAKE_OBJ. + * + * @return A pointer to the option found, or NULL if no option + * was found. + */ +const AVOption *av_opt_find2(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags, void **target_obj); + +/** + * Iterate over all AVOptions belonging to obj. + * + * @param obj an AVOptions-enabled struct or a double pointer to an + * AVClass describing it. + * @param prev result of the previous call to av_opt_next() on this object + * or NULL + * @return next AVOption or NULL + */ +const AVOption *av_opt_next(const void *obj, const AVOption *prev); + +/** + * Iterate over AVOptions-enabled children of obj. + * + * @param prev result of a previous call to this function or NULL + * @return next AVOptions-enabled child or NULL + */ +void *av_opt_child_next(void *obj, void *prev); + +/** + * Iterate over potential AVOptions-enabled children of parent. + * + * @param prev result of a previous call to this function or NULL + * @return AVClass corresponding to next potential child or NULL + */ +const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev); + +/** + * @defgroup opt_set_funcs Option setting functions + * @{ + * Those functions set the field of obj with the given name to value. + * + * @param[in] obj A struct whose first element is a pointer to an AVClass. + * @param[in] name the name of the field to set + * @param[in] val The value to set. In case of av_opt_set() if the field is not + * of a string type, then the given string is parsed. + * SI postfixes and some named scalars are supported. + * If the field is of a numeric type, it has to be a numeric or named + * scalar. Behavior with more than one scalar and +- infix operators + * is undefined. + * If the field is of a flags type, it has to be a sequence of numeric + * scalars or named flags separated by '+' or '-'. Prefixing a flag + * with '+' causes it to be set without affecting the other flags; + * similarly, '-' unsets a flag. + * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be set on a child of obj. + * + * @return 0 if the value has been set, or an AVERROR code in case of + * error: + * AVERROR_OPTION_NOT_FOUND if no matching option exists + * AVERROR(ERANGE) if the value is out of range + * AVERROR(EINVAL) if the value is not valid + */ +int av_opt_set (void *obj, const char *name, const char *val, int search_flags); +int av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags); +int av_opt_set_double (void *obj, const char *name, double val, int search_flags); +int av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags); +int av_opt_set_bin (void *obj, const char *name, const uint8_t *val, int size, int search_flags); +int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags); +int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags); +int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags); +int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags); +int av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags); +/** + * @note Any old dictionary present is discarded and replaced with a copy of the new one. The + * caller still owns val is and responsible for freeing it. + */ +int av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val, int search_flags); + +/** + * Set a binary option to an integer list. + * + * @param obj AVClass object to set options on + * @param name name of the binary option + * @param val pointer to an integer list (must have the correct type with + * regard to the contents of the list) + * @param term list terminator (usually 0 or -1) + * @param flags search flags + */ +#define av_opt_set_int_list(obj, name, val, term, flags) \ + (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \ + AVERROR(EINVAL) : \ + av_opt_set_bin(obj, name, (const uint8_t *)(val), \ + av_int_list_length(val, term) * sizeof(*(val)), flags)) + +/** + * @} + */ + +/** + * @defgroup opt_get_funcs Option getting functions + * @{ + * Those functions get a value of the option with the given name from an object. + * + * @param[in] obj a struct whose first element is a pointer to an AVClass. + * @param[in] name name of the option to get. + * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be found in a child of obj. + * @param[out] out_val value of the option will be written here + * @return >=0 on success, a negative error code otherwise + */ +/** + * @note the returned string will be av_malloc()ed and must be av_free()ed by the caller + * + * @note if AV_OPT_ALLOW_NULL is set in search_flags in av_opt_get, and the option has + * AV_OPT_TYPE_STRING or AV_OPT_TYPE_BINARY and is set to NULL, *out_val will be set + * to NULL instead of an allocated empty string. + */ +int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val); +int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val); +int av_opt_get_double (void *obj, const char *name, int search_flags, double *out_val); +int av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val); +int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out); +int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt); +int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt); +int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val); +int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout); +/** + * @param[out] out_val The returned dictionary is a copy of the actual value and must + * be freed with av_dict_free() by the caller + */ +int av_opt_get_dict_val(void *obj, const char *name, int search_flags, AVDictionary **out_val); +/** + * @} + */ +/** + * Gets a pointer to the requested field in a struct. + * This function allows accessing a struct even when its fields are moved or + * renamed since the application making the access has been compiled, + * + * @returns a pointer to the field, it can be cast to the correct type and read + * or written to. + */ +void *av_opt_ptr(const AVClass *avclass, void *obj, const char *name); + +/** + * Free an AVOptionRanges struct and set it to NULL. + */ +void av_opt_freep_ranges(AVOptionRanges **ranges); + +/** + * Get a list of allowed ranges for the given option. + * + * The returned list may depend on other fields in obj like for example profile. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges + * + * The result must be freed with av_opt_freep_ranges. + * + * @return number of compontents returned on success, a negative errro code otherwise + */ +int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * Copy options from src object into dest object. + * + * Options that require memory allocation (e.g. string or binary) are malloc'ed in dest object. + * Original memory allocated for such options is freed unless both src and dest options points to the same memory. + * + * @param dest Object to copy from + * @param src Object to copy into + * @return 0 on success, negative on error + */ +int av_opt_copy(void *dest, const void *src); + +/** + * Get a default list of allowed ranges for the given option. + * + * This list is constructed without using the AVClass.query_ranges() callback + * and can be used as fallback from within the callback. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges + * + * The result must be freed with av_opt_free_ranges. + * + * @return number of compontents returned on success, a negative errro code otherwise + */ +int av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * Check if given option is set to its default value. + * + * Options o must belong to the obj. This function must not be called to check child's options state. + * @see av_opt_is_set_to_default_by_name(). + * + * @param obj AVClass object to check option on + * @param o option to be checked + * @return >0 when option is set to its default, + * 0 when option is not set its default, + * <0 on error + */ +int av_opt_is_set_to_default(void *obj, const AVOption *o); + +/** + * Check if given option is set to its default value. + * + * @param obj AVClass object to check option on + * @param name option name + * @param search_flags combination of AV_OPT_SEARCH_* + * @return >0 when option is set to its default, + * 0 when option is not set its default, + * <0 on error + */ +int av_opt_is_set_to_default_by_name(void *obj, const char *name, int search_flags); + + +#define AV_OPT_SERIALIZE_SKIP_DEFAULTS 0x00000001 ///< Serialize options that are not set to default values only. +#define AV_OPT_SERIALIZE_OPT_FLAGS_EXACT 0x00000002 ///< Serialize options that exactly match opt_flags only. + +/** + * Serialize object's options. + * + * Create a string containing object's serialized options. + * Such string may be passed back to av_opt_set_from_string() in order to restore option values. + * A key/value or pairs separator occurring in the serialized value or + * name string are escaped through the av_escape() function. + * + * @param[in] obj AVClass object to serialize + * @param[in] opt_flags serialize options with all the specified flags set (AV_OPT_FLAG) + * @param[in] flags combination of AV_OPT_SERIALIZE_* flags + * @param[out] buffer Pointer to buffer that will be allocated with string containg serialized options. + * Buffer must be freed by the caller when is no longer needed. + * @param[in] key_val_sep character used to separate key from value + * @param[in] pairs_sep character used to separate two pairs from each other + * @return >= 0 on success, negative on error + * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same. + */ +int av_opt_serialize(void *obj, int opt_flags, int flags, char **buffer, + const char key_val_sep, const char pairs_sep); +/** + * @} + */ + +#endif /* AVUTIL_OPT_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/parseutils.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/parseutils.h new file mode 100644 index 0000000..e66d24b --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/parseutils.h @@ -0,0 +1,193 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PARSEUTILS_H +#define AVUTIL_PARSEUTILS_H + +#include + +#include "rational.h" + +/** + * @file + * misc parsing utilities + */ + +/** + * Parse str and store the parsed ratio in q. + * + * Note that a ratio with infinite (1/0) or negative value is + * considered valid, so you should check on the returned value if you + * want to exclude those values. + * + * The undefined value can be expressed using the "0:0" string. + * + * @param[in,out] q pointer to the AVRational which will contain the ratio + * @param[in] str the string to parse: it has to be a string in the format + * num:den, a float number or an expression + * @param[in] max the maximum allowed numerator and denominator + * @param[in] log_offset log level offset which is applied to the log + * level of log_ctx + * @param[in] log_ctx parent logging context + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_ratio(AVRational *q, const char *str, int max, + int log_offset, void *log_ctx); + +#define av_parse_ratio_quiet(rate, str, max) \ + av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL) + +/** + * Parse str and put in width_ptr and height_ptr the detected values. + * + * @param[in,out] width_ptr pointer to the variable which will contain the detected + * width value + * @param[in,out] height_ptr pointer to the variable which will contain the detected + * height value + * @param[in] str the string to parse: it has to be a string in the format + * width x height or a valid video size abbreviation. + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str); + +/** + * Parse str and store the detected values in *rate. + * + * @param[in,out] rate pointer to the AVRational which will contain the detected + * frame rate + * @param[in] str the string to parse: it has to be a string in the format + * rate_num / rate_den, a float number or a valid video rate abbreviation + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_video_rate(AVRational *rate, const char *str); + +/** + * Put the RGBA values that correspond to color_string in rgba_color. + * + * @param color_string a string specifying a color. It can be the name of + * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence, + * possibly followed by "@" and a string representing the alpha + * component. + * The alpha component may be a string composed by "0x" followed by an + * hexadecimal number or a decimal number between 0.0 and 1.0, which + * represents the opacity value (0x00/0.0 means completely transparent, + * 0xff/1.0 completely opaque). + * If the alpha component is not specified then 0xff is assumed. + * The string "random" will result in a random color. + * @param slen length of the initial part of color_string containing the + * color. It can be set to -1 if color_string is a null terminated string + * containing nothing else than the color. + * @return >= 0 in case of success, a negative value in case of + * failure (for example if color_string cannot be parsed). + */ +int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, + void *log_ctx); + +/** + * Get the name of a color from the internal table of hard-coded named + * colors. + * + * This function is meant to enumerate the color names recognized by + * av_parse_color(). + * + * @param color_idx index of the requested color, starting from 0 + * @param rgbp if not NULL, will point to a 3-elements array with the color value in RGB + * @return the color name string or NULL if color_idx is not in the array + */ +const char *av_get_known_color_name(int color_idx, const uint8_t **rgb); + +/** + * Parse timestr and return in *time a corresponding number of + * microseconds. + * + * @param timeval puts here the number of microseconds corresponding + * to the string in timestr. If the string represents a duration, it + * is the number of microseconds contained in the time interval. If + * the string is a date, is the number of microseconds since 1st of + * January, 1970 up to the time of the parsed date. If timestr cannot + * be successfully parsed, set *time to INT64_MIN. + + * @param timestr a string representing a date or a duration. + * - If a date the syntax is: + * @code + * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z] + * now + * @endcode + * If the value is "now" it takes the current time. + * Time is local time unless Z is appended, in which case it is + * interpreted as UTC. + * If the year-month-day part is not specified it takes the current + * year-month-day. + * - If a duration the syntax is: + * @code + * [-][HH:]MM:SS[.m...] + * [-]S+[.m...] + * @endcode + * @param duration flag which tells how to interpret timestr, if not + * zero timestr is interpreted as a duration, otherwise as a date + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_parse_time(int64_t *timeval, const char *timestr, int duration); + +/** + * Attempt to find a specific tag in a URL. + * + * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. + * Return 1 if found. + */ +int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info); + +/** + * Simplified version of strptime + * + * Parse the input string p according to the format string fmt and + * store its results in the structure dt. + * This implementation supports only a subset of the formats supported + * by the standard strptime(). + * + * The supported input field descriptors are listed below. + * - %H: the hour as a decimal number, using a 24-hour clock, in the + * range '00' through '23' + * - %J: hours as a decimal number, in the range '0' through INT_MAX + * - %M: the minute as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %S: the second as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %Y: the year as a decimal number, using the Gregorian calendar + * - %m: the month as a decimal number, in the range '1' through '12' + * - %d: the day of the month as a decimal number, in the range '1' + * through '31' + * - %T: alias for '%H:%M:%S' + * - %%: a literal '%' + * + * @return a pointer to the first character not processed in this function + * call. In case the input string contains more characters than + * required by the format string the return value points right after + * the last consumed input character. In case the whole input string + * is consumed the return value points to the null byte at the end of + * the string. On failure NULL is returned. + */ +char *av_small_strptime(const char *p, const char *fmt, struct tm *dt); + +/** + * Convert the decomposed UTC time in tm to a time_t value. + */ +time_t av_timegm(struct tm *tm); + +#endif /* AVUTIL_PARSEUTILS_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/pixdesc.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/pixdesc.h new file mode 100644 index 0000000..c3a6f27 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/pixdesc.h @@ -0,0 +1,399 @@ +/* + * pixel format descriptor + * Copyright (c) 2009 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXDESC_H +#define AVUTIL_PIXDESC_H + +#include + +#include "attributes.h" +#include "pixfmt.h" +#include "version.h" + +typedef struct AVComponentDescriptor { + /** + * Which of the 4 planes contains the component. + */ + int plane; + + /** + * Number of elements between 2 horizontally consecutive pixels. + * Elements are bits for bitstream formats, bytes otherwise. + */ + int step; + + /** + * Number of elements before the component of the first pixel. + * Elements are bits for bitstream formats, bytes otherwise. + */ + int offset; + + /** + * Number of least significant bits that must be shifted away + * to get the value. + */ + int shift; + + /** + * Number of bits in the component. + */ + int depth; + +#if FF_API_PLUS1_MINUS1 + /** deprecated, use step instead */ + attribute_deprecated int step_minus1; + + /** deprecated, use depth instead */ + attribute_deprecated int depth_minus1; + + /** deprecated, use offset instead */ + attribute_deprecated int offset_plus1; +#endif +} AVComponentDescriptor; + +/** + * Descriptor that unambiguously describes how the bits of a pixel are + * stored in the up to 4 data planes of an image. It also stores the + * subsampling factors and number of components. + * + * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV + * and all the YUV variants) AVPixFmtDescriptor just stores how values + * are stored not what these values represent. + */ +typedef struct AVPixFmtDescriptor { + const char *name; + uint8_t nb_components; ///< The number of components each pixel has, (1-4) + + /** + * Amount to shift the luma width right to find the chroma width. + * For YV12 this is 1 for example. + * chroma_width = AV_CEIL_RSHIFT(luma_width, log2_chroma_w) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_w; + + /** + * Amount to shift the luma height right to find the chroma height. + * For YV12 this is 1 for example. + * chroma_height= AV_CEIL_RSHIFT(luma_height, log2_chroma_h) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_h; + + /** + * Combination of AV_PIX_FMT_FLAG_... flags. + */ + uint64_t flags; + + /** + * Parameters that describe how pixels are packed. + * If the format has 1 or 2 components, then luma is 0. + * If the format has 3 or 4 components: + * if the RGB flag is set then 0 is red, 1 is green and 2 is blue; + * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V. + * + * If present, the Alpha channel is always the last component. + */ + AVComponentDescriptor comp[4]; + + /** + * Alternative comma-separated names. + */ + const char *alias; +} AVPixFmtDescriptor; + +/** + * Pixel format is big-endian. + */ +#define AV_PIX_FMT_FLAG_BE (1 << 0) +/** + * Pixel format has a palette in data[1], values are indexes in this palette. + */ +#define AV_PIX_FMT_FLAG_PAL (1 << 1) +/** + * All values of a component are bit-wise packed end to end. + */ +#define AV_PIX_FMT_FLAG_BITSTREAM (1 << 2) +/** + * Pixel format is an HW accelerated format. + */ +#define AV_PIX_FMT_FLAG_HWACCEL (1 << 3) +/** + * At least one pixel component is not in the first data plane. + */ +#define AV_PIX_FMT_FLAG_PLANAR (1 << 4) +/** + * The pixel format contains RGB-like data (as opposed to YUV/grayscale). + */ +#define AV_PIX_FMT_FLAG_RGB (1 << 5) + +/** + * The pixel format is "pseudo-paletted". This means that it contains a + * fixed palette in the 2nd plane but the palette is fixed/constant for each + * PIX_FMT. This allows interpreting the data as if it was PAL8, which can + * in some cases be simpler. Or the data can be interpreted purely based on + * the pixel format without using the palette. + * An example of a pseudo-paletted format is AV_PIX_FMT_GRAY8 + */ +#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6) + +/** + * The pixel format has an alpha channel. This is set on all formats that + * support alpha in some way. The exception is AV_PIX_FMT_PAL8, which can + * carry alpha as part of the palette. Details are explained in the + * AVPixelFormat enum, and are also encoded in the corresponding + * AVPixFmtDescriptor. + * + * The alpha is always straight, never pre-multiplied. + * + * If a codec or a filter does not support alpha, it should set all alpha to + * opaque, or use the equivalent pixel formats without alpha component, e.g. + * AV_PIX_FMT_RGB0 (or AV_PIX_FMT_RGB24 etc.) instead of AV_PIX_FMT_RGBA. + */ +#define AV_PIX_FMT_FLAG_ALPHA (1 << 7) + +/** + * The pixel format is following a Bayer pattern + */ +#define AV_PIX_FMT_FLAG_BAYER (1 << 8) + +/** + * Return the number of bits per pixel used by the pixel format + * described by pixdesc. Note that this is not the same as the number + * of bits per sample. + * + * The returned number of bits refers to the number of bits actually + * used for storing the pixel information, that is padding bits are + * not counted. + */ +int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * Return the number of bits per pixel for the pixel format + * described by pixdesc, including any padding or unused bits. + */ +int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * @return a pixel format descriptor for provided pixel format or NULL if + * this pixel format is unknown. + */ +const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt); + +/** + * Iterate over all pixel format descriptors known to libavutil. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev); + +/** + * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc + * is not a valid pointer to a pixel format descriptor. + */ +enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc); + +/** + * Utility function to access log2_chroma_w log2_chroma_h from + * the pixel format AVPixFmtDescriptor. + * + * See av_get_chroma_sub_sample() for a function that asserts a + * valid pixel format instead of returning an error code. + * Its recommended that you use avcodec_get_chroma_sub_sample unless + * you do check the return code! + * + * @param[in] pix_fmt the pixel format + * @param[out] h_shift store log2_chroma_w (horizontal/width shift) + * @param[out] v_shift store log2_chroma_h (vertical/height shift) + * + * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format + */ +int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, + int *h_shift, int *v_shift); + +/** + * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a + * valid pixel format. + */ +int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt); + +/** + * @return the name for provided color range or NULL if unknown. + */ +const char *av_color_range_name(enum AVColorRange range); + +/** + * @return the name for provided color primaries or NULL if unknown. + */ +const char *av_color_primaries_name(enum AVColorPrimaries primaries); + +/** + * @return the name for provided color transfer or NULL if unknown. + */ +const char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer); + +/** + * @return the name for provided color space or NULL if unknown. + */ +const char *av_color_space_name(enum AVColorSpace space); + +/** + * @return the name for provided chroma location or NULL if unknown. + */ +const char *av_chroma_location_name(enum AVChromaLocation location); + +/** + * Return the pixel format corresponding to name. + * + * If there is no pixel format with name name, then looks for a + * pixel format with the name corresponding to the native endian + * format of name. + * For example in a little-endian system, first looks for "gray16", + * then for "gray16le". + * + * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE. + */ +enum AVPixelFormat av_get_pix_fmt(const char *name); + +/** + * Return the short name for a pixel format, NULL in case pix_fmt is + * unknown. + * + * @see av_get_pix_fmt(), av_get_pix_fmt_string() + */ +const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt); + +/** + * Print in buf the string corresponding to the pixel format with + * number pix_fmt, or a header if pix_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param pix_fmt the number of the pixel format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + */ +char *av_get_pix_fmt_string(char *buf, int buf_size, + enum AVPixelFormat pix_fmt); + +/** + * Read a line from an image, and write the values of the + * pixel format component c to dst. + * + * @param data the array containing the pointers to the planes of the image + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to read + * @param y the vertical coordinate of the first pixel to read + * @param w the width of the line to read, that is the number of + * values to write to dst + * @param read_pal_component if not zero and the format is a paletted + * format writes the values corresponding to the palette + * component c in data[1] to dst, rather than the palette indexes in + * data[0]. The behavior is undefined if the format is not paletted. + */ +void av_read_image_line(uint16_t *dst, const uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int read_pal_component); + +/** + * Write the values from src to the pixel format component c of an + * image line. + * + * @param src array containing the values to write + * @param data the array containing the pointers to the planes of the + * image to write into. It is supposed to be zeroed. + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to write + * @param y the vertical coordinate of the first pixel to write + * @param w the width of the line to write, that is the number of + * values to write to the image line + */ +void av_write_image_line(const uint16_t *src, uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w); + +/** + * Utility function to swap the endianness of a pixel format. + * + * @param[in] pix_fmt the pixel format + * + * @return pixel format with swapped endianness if it exists, + * otherwise AV_PIX_FMT_NONE + */ +enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt); + +#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */ +#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */ +#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */ +#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */ +#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */ +#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */ + +/** + * Compute what kind of losses will occur when converting from one specific + * pixel format to another. + * When converting from one pixel format to another, information loss may occur. + * For example, when converting from RGB24 to GRAY, the color information will + * be lost. Similarly, other losses occur when converting from some formats to + * other formats. These losses can involve loss of chroma, but also loss of + * resolution, loss of color depth, loss due to the color space conversion, loss + * of the alpha bits or loss due to color quantization. + * av_get_fix_fmt_loss() informs you about the various types of losses + * which will occur when converting from one pixel format to another. + * + * @param[in] dst_pix_fmt destination pixel format + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @return Combination of flags informing you what kind of losses will occur + * (maximum loss for an invalid dst_pix_fmt). + */ +int av_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, + enum AVPixelFormat src_pix_fmt, + int has_alpha); + +/** + * Compute what kind of losses will occur when converting from one specific + * pixel format to another. + * When converting from one pixel format to another, information loss may occur. + * For example, when converting from RGB24 to GRAY, the color information will + * be lost. Similarly, other losses occur when converting from some formats to + * other formats. These losses can involve loss of chroma, but also loss of + * resolution, loss of color depth, loss due to the color space conversion, loss + * of the alpha bits or loss due to color quantization. + * av_get_fix_fmt_loss() informs you about the various types of losses + * which will occur when converting from one pixel format to another. + * + * @param[in] dst_pix_fmt destination pixel format + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @return Combination of flags informing you what kind of losses will occur + * (maximum loss for an invalid dst_pix_fmt). + */ +enum AVPixelFormat av_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +#endif /* AVUTIL_PIXDESC_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/pixelutils.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/pixelutils.h new file mode 100644 index 0000000..a8dbc15 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/pixelutils.h @@ -0,0 +1,52 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXELUTILS_H +#define AVUTIL_PIXELUTILS_H + +#include +#include +#include "common.h" + +/** + * Sum of abs(src1[x] - src2[x]) + */ +typedef int (*av_pixelutils_sad_fn)(const uint8_t *src1, ptrdiff_t stride1, + const uint8_t *src2, ptrdiff_t stride2); + +/** + * Get a potentially optimized pointer to a Sum-of-absolute-differences + * function (see the av_pixelutils_sad_fn prototype). + * + * @param w_bits 1< + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXFMT_H +#define AVUTIL_PIXFMT_H + +/** + * @file + * pixel format definitions + */ + +#include "libavutil/avconfig.h" +#include "version.h" + +#define AVPALETTE_SIZE 1024 +#define AVPALETTE_COUNT 256 + +/** + * Pixel format. + * + * @note + * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA + * color is put together as: + * (A << 24) | (R << 16) | (G << 8) | B + * This is stored as BGRA on little-endian CPU architectures and ARGB on + * big-endian CPUs. + * + * @par + * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized + * image data is stored in AVFrame.data[0]. The palette is transported in + * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is + * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is + * also endian-specific). Note also that the individual RGB32 palette + * components stored in AVFrame.data[1] should be in the range 0..255. + * This is important as many custom PAL8 video codecs that were designed + * to run on the IBM VGA graphics adapter use 6-bit palette components. + * + * @par + * For all the 8 bits per pixel formats, an RGB32 palette is in data[1] like + * for pal8. This palette is filled in automatically by the function + * allocating the picture. + */ +enum AVPixelFormat { + AV_PIX_FMT_NONE = -1, + AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... + AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... + AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + AV_PIX_FMT_GRAY8, ///< Y , 8bpp + AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_PAL8, ///< 8 bits with AV_PIX_FMT_RGB32 palette + AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range + AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range + AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range +#if FF_API_XVMC + AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing + AV_PIX_FMT_XVMC_MPEG2_IDCT, + AV_PIX_FMT_XVMC = AV_PIX_FMT_XVMC_MPEG2_IDCT, +#endif /* FF_API_XVMC */ + AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped + + AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian + AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian + AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range + AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) +#if FF_API_VDPAU + AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined + AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined + + AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined + AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined + +#if FF_API_VAAPI + /** @name Deprecated pixel formats */ + /**@{*/ + AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a VASurfaceID + /**@}*/ + AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD, +#else + /** + * Hardware acceleration through VA-API, data[3] contains a + * VASurfaceID. + */ + AV_PIX_FMT_VAAPI, +#endif + + AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian +#if FF_API_VDPAU + AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG-4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined + AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined + AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined + AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined + AV_PIX_FMT_YA8, ///< 8 bits gray, 8 bits alpha + + AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8 + AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8 + + AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + + /** + * The following 12 formats have the disadvantage of needing 1 format for each bit depth. + * Notice that each 9/10 bits sample is stored in 16 bits with extra padding. + * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better. + */ + AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA + AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp + AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP + AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian + AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian + AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian + AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian + AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian + AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian + AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian + AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + + AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface + + AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + + AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + + AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb + + AV_PIX_FMT_VDA, ///< HW acceleration through VDA, data[3] contains a CVPixelBufferRef + + AV_PIX_FMT_YA16BE, ///< 16 bits gray, 16 bits alpha (big-endian) + AV_PIX_FMT_YA16LE, ///< 16 bits gray, 16 bits alpha (little-endian) + + AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp + AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian + AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian + /** + * HW acceleration through QSV, data[3] contains a pointer to the + * mfxFrameSurface1 structure. + */ + AV_PIX_FMT_QSV, + /** + * HW acceleration though MMAL, data[3] contains a pointer to the + * MMAL_BUFFER_HEADER_T structure. + */ + AV_PIX_FMT_MMAL, + + AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer + + /** + * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers + * exactly as for system memory frames. + */ + AV_PIX_FMT_CUDA, + + AV_PIX_FMT_0RGB=0x123+4,///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined + AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined + AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined + AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined + + AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian + AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian + AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian + AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian + AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range + + AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ +#if !FF_API_XVMC + AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing +#endif /* !FF_API_XVMC */ + AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + + AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox + + AV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian + AV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian + + AV_PIX_FMT_GBRAP12BE, ///< planar GBR 4:4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRAP12LE, ///< planar GBR 4:4:4:4 48bpp, little-endian + + AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian + AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian + + AV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodec + + AV_PIX_FMT_GRAY12BE, ///< Y , 12bpp, big-endian + AV_PIX_FMT_GRAY12LE, ///< Y , 12bpp, little-endian + AV_PIX_FMT_GRAY10BE, ///< Y , 10bpp, big-endian + AV_PIX_FMT_GRAY10LE, ///< Y , 10bpp, little-endian + + AV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component, little-endian + AV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian + + AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions +}; + +#if AV_HAVE_BIGENDIAN +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be +#else +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le +#endif + +#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA) +#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR) +#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA) +#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB) +#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0) +#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0) + +#define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE) +#define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE) +#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE) +#define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE) +#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE) +#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE) +#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE) +#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE) +#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE) +#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE) +#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE) +#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE) +#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE) +#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE) + +#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE) +#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE) +#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE) +#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE) +#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE) +#define AV_PIX_FMT_YUV440P10 AV_PIX_FMT_NE(YUV440P10BE, YUV440P10LE) +#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE) +#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE) +#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE) +#define AV_PIX_FMT_YUV440P12 AV_PIX_FMT_NE(YUV440P12BE, YUV440P12LE) +#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE) +#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE) +#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE) +#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE) +#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE) +#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE) +#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE) + +#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE) +#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE) +#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE) +#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE) +#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE) +#define AV_PIX_FMT_GBRAP10 AV_PIX_FMT_NE(GBRAP10BE, GBRAP10LE) +#define AV_PIX_FMT_GBRAP12 AV_PIX_FMT_NE(GBRAP12BE, GBRAP12LE) +#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE) + +#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE) +#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE) +#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE) +#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE) + + +#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE) +#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE) +#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE) +#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE) +#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE) +#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE) +#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE) +#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE) +#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE) + +#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE) +#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE) +#define AV_PIX_FMT_AYUV64 AV_PIX_FMT_NE(AYUV64BE, AYUV64LE) +#define AV_PIX_FMT_P010 AV_PIX_FMT_NE(P010BE, P010LE) +#define AV_PIX_FMT_P016 AV_PIX_FMT_NE(P016BE, P016LE) + +/** + * Chromaticity coordinates of the source primaries. + */ +enum AVColorPrimaries { + AVCOL_PRI_RESERVED0 = 0, + AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B + AVCOL_PRI_UNSPECIFIED = 2, + AVCOL_PRI_RESERVED = 3, + AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + + AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM + AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above + AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C + AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020 + AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ) + AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428, + AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3 + AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3 + AVCOL_PRI_JEDEC_P22 = 22, ///< JEDEC P22 phosphors + AVCOL_PRI_NB ///< Not part of ABI +}; + +/** + * Color Transfer Characteristic. + */ +enum AVColorTransferCharacteristic { + AVCOL_TRC_RESERVED0 = 0, + AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 + AVCOL_TRC_UNSPECIFIED = 2, + AVCOL_TRC_RESERVED = 3, + AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM + AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG + AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC + AVCOL_TRC_SMPTE240M = 7, + AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics" + AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)" + AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)" + AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4 + AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut + AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC) + AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system + AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system + AVCOL_TRC_SMPTE2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems + AVCOL_TRC_SMPTEST2084 = AVCOL_TRC_SMPTE2084, + AVCOL_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1 + AVCOL_TRC_SMPTEST428_1 = AVCOL_TRC_SMPTE428, + AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma" + AVCOL_TRC_NB ///< Not part of ABI +}; + +/** + * YUV colorspace type. + */ +enum AVColorSpace { + AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB) + AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B + AVCOL_SPC_UNSPECIFIED = 2, + AVCOL_SPC_RESERVED = 3, + AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 + AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_SPC_SMPTE240M = 7, ///< functionally identical to above + AVCOL_SPC_YCGCO = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO, + AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system + AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system + AVCOL_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x + AVCOL_SPC_NB ///< Not part of ABI +}; +#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG + + +/** + * MPEG vs JPEG YUV range. + */ +enum AVColorRange { + AVCOL_RANGE_UNSPECIFIED = 0, + AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges + AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges + AVCOL_RANGE_NB ///< Not part of ABI +}; + +/** + * Location of chroma samples. + * + * Illustration showing the location of the first (top left) chroma sample of the + * image, the left shows only luma, the right + * shows the location of the chroma sample, the 2 could be imagined to overlay + * each other but are drawn separately due to limitations of ASCII + * + * 1st 2nd 1st 2nd horizontal luma sample positions + * v v v v + * ______ ______ + *1st luma line > |X X ... |3 4 X ... X are luma samples, + * | |1 2 1-6 are possible chroma positions + *2nd luma line > |X X ... |5 6 X ... 0 is undefined/unknown position + */ +enum AVChromaLocation { + AVCHROMA_LOC_UNSPECIFIED = 0, + AVCHROMA_LOC_LEFT = 1, ///< MPEG-2/4 4:2:0, H.264 default for 4:2:0 + AVCHROMA_LOC_CENTER = 2, ///< MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0 + AVCHROMA_LOC_TOPLEFT = 3, ///< ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2 + AVCHROMA_LOC_TOP = 4, + AVCHROMA_LOC_BOTTOMLEFT = 5, + AVCHROMA_LOC_BOTTOM = 6, + AVCHROMA_LOC_NB ///< Not part of ABI +}; + +#endif /* AVUTIL_PIXFMT_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/random_seed.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/random_seed.h new file mode 100644 index 0000000..0462a04 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/random_seed.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2009 Baptiste Coudurier + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RANDOM_SEED_H +#define AVUTIL_RANDOM_SEED_H + +#include +/** + * @addtogroup lavu_crypto + * @{ + */ + +/** + * Get a seed to use in conjunction with random functions. + * This function tries to provide a good seed at a best effort bases. + * Its possible to call this function multiple times if more bits are needed. + * It can be quite slow, which is why it should only be used as seed for a faster + * PRNG. The quality of the seed depends on the platform. + */ +uint32_t av_get_random_seed(void); + +/** + * @} + */ + +#endif /* AVUTIL_RANDOM_SEED_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/rational.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/rational.h new file mode 100644 index 0000000..5c6b67b --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/rational.h @@ -0,0 +1,214 @@ +/* + * rational numbers + * Copyright (c) 2003 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_math_rational + * Utilties for rational number calculation. + * @author Michael Niedermayer + */ + +#ifndef AVUTIL_RATIONAL_H +#define AVUTIL_RATIONAL_H + +#include +#include +#include "attributes.h" + +/** + * @defgroup lavu_math_rational AVRational + * @ingroup lavu_math + * Rational number calculation. + * + * While rational numbers can be expressed as floating-point numbers, the + * conversion process is a lossy one, so are floating-point operations. On the + * other hand, the nature of FFmpeg demands highly accurate calculation of + * timestamps. This set of rational number utilities serves as a generic + * interface for manipulating rational numbers as pairs of numerators and + * denominators. + * + * Many of the functions that operate on AVRational's have the suffix `_q`, in + * reference to the mathematical symbol "ℚ" (Q) which denotes the set of all + * rational numbers. + * + * @{ + */ + +/** + * Rational number (pair of numerator and denominator). + */ +typedef struct AVRational{ + int num; ///< Numerator + int den; ///< Denominator +} AVRational; + +/** + * Create an AVRational. + * + * Useful for compilers that do not support compound literals. + * + * @note The return value is not reduced. + * @see av_reduce() + */ +static inline AVRational av_make_q(int num, int den) +{ + AVRational r = { num, den }; + return r; +} + +/** + * Compare two rationals. + * + * @param a First rational + * @param b Second rational + * + * @return One of the following values: + * - 0 if `a == b` + * - 1 if `a > b` + * - -1 if `a < b` + * - `INT_MIN` if one of the values is of the form `0 / 0` + */ +static inline int av_cmp_q(AVRational a, AVRational b){ + const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den; + + if(tmp) return (int)((tmp ^ a.den ^ b.den)>>63)|1; + else if(b.den && a.den) return 0; + else if(a.num && b.num) return (a.num>>31) - (b.num>>31); + else return INT_MIN; +} + +/** + * Convert an AVRational to a `double`. + * @param a AVRational to convert + * @return `a` in floating-point form + * @see av_d2q() + */ +static inline double av_q2d(AVRational a){ + return a.num / (double) a.den; +} + +/** + * Reduce a fraction. + * + * This is useful for framerate calculations. + * + * @param[out] dst_num Destination numerator + * @param[out] dst_den Destination denominator + * @param[in] num Source numerator + * @param[in] den Source denominator + * @param[in] max Maximum allowed values for `dst_num` & `dst_den` + * @return 1 if the operation is exact, 0 otherwise + */ +int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max); + +/** + * Multiply two rationals. + * @param b First rational + * @param c Second rational + * @return b*c + */ +AVRational av_mul_q(AVRational b, AVRational c) av_const; + +/** + * Divide one rational by another. + * @param b First rational + * @param c Second rational + * @return b/c + */ +AVRational av_div_q(AVRational b, AVRational c) av_const; + +/** + * Add two rationals. + * @param b First rational + * @param c Second rational + * @return b+c + */ +AVRational av_add_q(AVRational b, AVRational c) av_const; + +/** + * Subtract one rational from another. + * @param b First rational + * @param c Second rational + * @return b-c + */ +AVRational av_sub_q(AVRational b, AVRational c) av_const; + +/** + * Invert a rational. + * @param q value + * @return 1 / q + */ +static av_always_inline AVRational av_inv_q(AVRational q) +{ + AVRational r = { q.den, q.num }; + return r; +} + +/** + * Convert a double precision floating point number to a rational. + * + * In case of infinity, the returned value is expressed as `{1, 0}` or + * `{-1, 0}` depending on the sign. + * + * @param d `double` to convert + * @param max Maximum allowed numerator and denominator + * @return `d` in AVRational form + * @see av_q2d() + */ +AVRational av_d2q(double d, int max) av_const; + +/** + * Find which of the two rationals is closer to another rational. + * + * @param q Rational to be compared against + * @param q1,q2 Rationals to be tested + * @return One of the following values: + * - 1 if `q1` is nearer to `q` than `q2` + * - -1 if `q2` is nearer to `q` than `q1` + * - 0 if they have the same distance + */ +int av_nearer_q(AVRational q, AVRational q1, AVRational q2); + +/** + * Find the value in a list of rationals nearest a given reference rational. + * + * @param q Reference rational + * @param q_list Array of rationals terminated by `{0, 0}` + * @return Index of the nearest value found in the array + */ +int av_find_nearest_q_idx(AVRational q, const AVRational* q_list); + +/** + * Convert an AVRational to a IEEE 32-bit `float` expressed in fixed-point + * format. + * + * @param q Rational to be converted + * @return Equivalent floating-point value, expressed as an unsigned 32-bit + * integer. + * @note The returned value is platform-indepedant. + */ +uint32_t av_q2intfloat(AVRational q); + +/** + * @} + */ + +#endif /* AVUTIL_RATIONAL_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/rc4.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/rc4.h new file mode 100644 index 0000000..029cd2a --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/rc4.h @@ -0,0 +1,66 @@ +/* + * RC4 encryption/decryption/pseudo-random number generator + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RC4_H +#define AVUTIL_RC4_H + +#include + +/** + * @defgroup lavu_rc4 RC4 + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVRC4 { + uint8_t state[256]; + int x, y; +} AVRC4; + +/** + * Allocate an AVRC4 context. + */ +AVRC4 *av_rc4_alloc(void); + +/** + * @brief Initializes an AVRC4 context. + * + * @param key_bits must be a multiple of 8 + * @param decrypt 0 for encryption, 1 for decryption, currently has no effect + * @return zero on success, negative value otherwise + */ +int av_rc4_init(struct AVRC4 *d, const uint8_t *key, int key_bits, int decrypt); + +/** + * @brief Encrypts / decrypts using the RC4 algorithm. + * + * @param count number of bytes + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst, may be NULL + * @param iv not (yet) used for RC4, should be NULL + * @param decrypt 0 for encryption, 1 for decryption, not (yet) used + */ +void av_rc4_crypt(struct AVRC4 *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_RC4_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/replaygain.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/replaygain.h new file mode 100644 index 0000000..b49bf1a --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/replaygain.h @@ -0,0 +1,50 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_REPLAYGAIN_H +#define AVUTIL_REPLAYGAIN_H + +#include + +/** + * ReplayGain information (see + * http://wiki.hydrogenaudio.org/index.php?title=ReplayGain_1.0_specification). + * The size of this struct is a part of the public ABI. + */ +typedef struct AVReplayGain { + /** + * Track replay gain in microbels (divide by 100000 to get the value in dB). + * Should be set to INT32_MIN when unknown. + */ + int32_t track_gain; + /** + * Peak track amplitude, with 100000 representing full scale (but values + * may overflow). 0 when unknown. + */ + uint32_t track_peak; + /** + * Same as track_gain, but for the whole album. + */ + int32_t album_gain; + /** + * Same as track_peak, but for the whole album, + */ + uint32_t album_peak; +} AVReplayGain; + +#endif /* AVUTIL_REPLAYGAIN_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/ripemd.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/ripemd.h new file mode 100644 index 0000000..6d6bb32 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/ripemd.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * Copyright (C) 2013 James Almer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_ripemd + * Public header for RIPEMD hash function implementation. + */ + +#ifndef AVUTIL_RIPEMD_H +#define AVUTIL_RIPEMD_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_ripemd RIPEMD + * @ingroup lavu_hash + * RIPEMD hash function implementation. + * + * @{ + */ + +extern const int av_ripemd_size; + +struct AVRIPEMD; + +/** + * Allocate an AVRIPEMD context. + */ +struct AVRIPEMD *av_ripemd_alloc(void); + +/** + * Initialize RIPEMD hashing. + * + * @param context pointer to the function context (of size av_ripemd_size) + * @param bits number of bits in digest (128, 160, 256 or 320 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_ripemd_init(struct AVRIPEMD* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_ripemd_final(struct AVRIPEMD* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_RIPEMD_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/samplefmt.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/samplefmt.h new file mode 100644 index 0000000..8cd43ae --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/samplefmt.h @@ -0,0 +1,272 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SAMPLEFMT_H +#define AVUTIL_SAMPLEFMT_H + +#include + +#include "avutil.h" +#include "attributes.h" + +/** + * @addtogroup lavu_audio + * @{ + * + * @defgroup lavu_sampfmts Audio sample formats + * + * Audio sample format enumeration and related convenience functions. + * @{ + */ + +/** + * Audio sample formats + * + * - The data described by the sample format is always in native-endian order. + * Sample values can be expressed by native C types, hence the lack of a signed + * 24-bit sample format even though it is a common raw audio data format. + * + * - The floating-point formats are based on full volume being in the range + * [-1.0, 1.0]. Any values outside this range are beyond full volume level. + * + * - The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg + * (such as AVFrame in libavcodec) is as follows: + * + * @par + * For planar sample formats, each audio channel is in a separate data plane, + * and linesize is the buffer size, in bytes, for a single plane. All data + * planes must be the same size. For packed sample formats, only the first data + * plane is used, and samples for each channel are interleaved. In this case, + * linesize is the buffer size, in bytes, for the 1 plane. + * + */ +enum AVSampleFormat { + AV_SAMPLE_FMT_NONE = -1, + AV_SAMPLE_FMT_U8, ///< unsigned 8 bits + AV_SAMPLE_FMT_S16, ///< signed 16 bits + AV_SAMPLE_FMT_S32, ///< signed 32 bits + AV_SAMPLE_FMT_FLT, ///< float + AV_SAMPLE_FMT_DBL, ///< double + + AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar + AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar + AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar + AV_SAMPLE_FMT_FLTP, ///< float, planar + AV_SAMPLE_FMT_DBLP, ///< double, planar + AV_SAMPLE_FMT_S64, ///< signed 64 bits + AV_SAMPLE_FMT_S64P, ///< signed 64 bits, planar + + AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically +}; + +/** + * Return the name of sample_fmt, or NULL if sample_fmt is not + * recognized. + */ +const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt); + +/** + * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE + * on error. + */ +enum AVSampleFormat av_get_sample_fmt(const char *name); + +/** + * Return the planar<->packed alternative form of the given sample format, or + * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the + * requested planar/packed format, the format returned is the same as the + * input. + */ +enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar); + +/** + * Get the packed alternative form of the given sample format. + * + * If the passed sample_fmt is already in packed format, the format returned is + * the same as the input. + * + * @return the packed alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Get the planar alternative form of the given sample format. + * + * If the passed sample_fmt is already in planar format, the format returned is + * the same as the input. + * + * @return the planar alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Generate a string corresponding to the sample format with + * sample_fmt, or a header if sample_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param sample_fmt the number of the sample format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + * @return the pointer to the filled buffer or NULL if sample_fmt is + * unknown or in case of other errors + */ +char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt); + +/** + * Return number of bytes per sample. + * + * @param sample_fmt the sample format + * @return number of bytes per sample or zero if unknown for the given + * sample format + */ +int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt); + +/** + * Check if the sample format is planar. + * + * @param sample_fmt the sample format to inspect + * @return 1 if the sample format is planar, 0 if it is interleaved + */ +int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt); + +/** + * Get the required buffer size for the given audio parameters. + * + * @param[out] linesize calculated linesize, may be NULL + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return required buffer size, or negative error code on failure + */ +int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * @} + * + * @defgroup lavu_sampmanip Samples manipulation + * + * Functions that manipulate audio samples + * @{ + */ + +/** + * Fill plane data pointers and linesize for samples with sample + * format sample_fmt. + * + * The audio_data array is filled with the pointers to the samples data planes: + * for planar, set the start point of each channel's data within the buffer, + * for packed, set the start point of the entire buffer only. + * + * The value pointed to by linesize is set to the aligned size of each + * channel's data buffer for planar layout, or to the aligned size of the + * buffer for all channels for packed layout. + * + * The buffer in buf must be big enough to contain all the samples + * (use av_samples_get_buffer_size() to compute its minimum size), + * otherwise the audio_data pointers will point to invalid data. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize calculated linesize, may be NULL + * @param buf the pointer to a buffer containing the samples + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return minimum size in bytes required for the buffer in case + * of success at the next bump + */ +int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, + const uint8_t *buf, + int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a samples buffer for nb_samples samples, and fill data pointers and + * linesize accordingly. + * The allocated samples buffer can be freed by using av_freep(&audio_data[0]) + * Allocated data will be initialized to silence. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize aligned size for audio buffer(s), may be NULL + * @param nb_channels number of audio channels + * @param nb_samples number of samples per channel + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return the size of the allocated buffer in case of success at the next bump + * @see av_samples_fill_arrays() + * @see av_samples_alloc_array_and_samples() + */ +int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a data pointers array, samples buffer for nb_samples + * samples, and fill data pointers and linesize accordingly. + * + * This is the same as av_samples_alloc(), but also allocates the data + * pointers array. + * + * @see av_samples_alloc() + */ +int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Copy samples from src to dst. + * + * @param dst destination array of pointers to data planes + * @param src source array of pointers to data planes + * @param dst_offset offset in samples at which the data will be written to dst + * @param src_offset offset in samples at which the data will be read from src + * @param nb_samples number of samples to be copied + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset, + int src_offset, int nb_samples, int nb_channels, + enum AVSampleFormat sample_fmt); + +/** + * Fill an audio buffer with silence. + * + * @param audio_data array of pointers to data planes + * @param offset offset in samples at which to start filling + * @param nb_samples number of samples to fill + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, + int nb_channels, enum AVSampleFormat sample_fmt); + +/** + * @} + * @} + */ +#endif /* AVUTIL_SAMPLEFMT_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/sha.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/sha.h new file mode 100644 index 0000000..c7558a8 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/sha.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_sha + * Public header for SHA-1 & SHA-256 hash function implementations. + */ + +#ifndef AVUTIL_SHA_H +#define AVUTIL_SHA_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha SHA + * @ingroup lavu_hash + * SHA-1 and SHA-256 (Secure Hash Algorithm) hash function implementations. + * + * This module supports the following SHA hash functions: + * + * - SHA-1: 160 bits + * - SHA-224: 224 bits, as a variant of SHA-2 + * - SHA-256: 256 bits, as a variant of SHA-2 + * + * @see For SHA-384, SHA-512, and variants thereof, see @ref lavu_sha512. + * + * @{ + */ + +extern const int av_sha_size; + +struct AVSHA; + +/** + * Allocate an AVSHA context. + */ +struct AVSHA *av_sha_alloc(void); + +/** + * Initialize SHA-1 or SHA-2 hashing. + * + * @param context pointer to the function context (of size av_sha_size) + * @param bits number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_sha_init(struct AVSHA* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_sha_final(struct AVSHA* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/sha512.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/sha512.h new file mode 100644 index 0000000..5bac184 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/sha512.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * Copyright (C) 2013 James Almer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_sha512 + * Public header for SHA-512 implementation. + */ + +#ifndef AVUTIL_SHA512_H +#define AVUTIL_SHA512_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha512 SHA-512 + * @ingroup lavu_hash + * SHA-512 (Secure Hash Algorithm) hash function implementations. + * + * This module supports the following SHA-2 hash functions: + * + * - SHA-512/224: 224 bits + * - SHA-512/256: 256 bits + * - SHA-384: 384 bits + * - SHA-512: 512 bits + * + * @see For SHA-1, SHA-256, and variants thereof, see @ref lavu_sha. + * + * @{ + */ + +extern const int av_sha512_size; + +struct AVSHA512; + +/** + * Allocate an AVSHA512 context. + */ +struct AVSHA512 *av_sha512_alloc(void); + +/** + * Initialize SHA-2 512 hashing. + * + * @param context pointer to the function context (of size av_sha512_size) + * @param bits number of bits in digest (224, 256, 384 or 512 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_sha512_init(struct AVSHA512* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_sha512_final(struct AVSHA512* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA512_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/spherical.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/spherical.h new file mode 100644 index 0000000..cef759c --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/spherical.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2016 Vittorio Giovara + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Spherical video + */ + +#ifndef AVUTIL_SPHERICAL_H +#define AVUTIL_SPHERICAL_H + +#include +#include + +/** + * @addtogroup lavu_video + * @{ + * + * @defgroup lavu_video_spherical Spherical video mapping + * @{ + */ + +/** + * @addtogroup lavu_video_spherical + * A spherical video file contains surfaces that need to be mapped onto a + * sphere. Depending on how the frame was converted, a different distortion + * transformation or surface recomposition function needs to be applied before + * the video should be mapped and displayed. + */ + +/** + * Projection of the video surface(s) on a sphere. + */ +enum AVSphericalProjection { + /** + * Video represents a sphere mapped on a flat surface using + * equirectangular projection. + */ + AV_SPHERICAL_EQUIRECTANGULAR, + + /** + * Video frame is split into 6 faces of a cube, and arranged on a + * 3x2 layout. Faces are oriented upwards for the front, left, right, + * and back faces. The up face is oriented so the top of the face is + * forwards and the down face is oriented so the top of the face is + * to the back. + */ + AV_SPHERICAL_CUBEMAP, + + /** + * Video represents a portion of a sphere mapped on a flat surface + * using equirectangular projection. The @ref bounding fields indicate + * the position of the current video in a larger surface. + */ + AV_SPHERICAL_EQUIRECTANGULAR_TILE, +}; + +/** + * This structure describes how to handle spherical videos, outlining + * information about projection, initial layout, and any other view modifier. + * + * @note The struct must be allocated with av_spherical_alloc() and + * its size is not a part of the public ABI. + */ +typedef struct AVSphericalMapping { + /** + * Projection type. + */ + enum AVSphericalProjection projection; + + /** + * @name Initial orientation + * @{ + * There fields describe additional rotations applied to the sphere after + * the video frame is mapped onto it. The sphere is rotated around the + * viewer, who remains stationary. The order of transformation is always + * yaw, followed by pitch, and finally by roll. + * + * The coordinate system matches the one defined in OpenGL, where the + * forward vector (z) is coming out of screen, and it is equivalent to + * a rotation matrix of R = r_y(yaw) * r_x(pitch) * r_z(roll). + * + * A positive yaw rotates the portion of the sphere in front of the viewer + * toward their right. A positive pitch rotates the portion of the sphere + * in front of the viewer upwards. A positive roll tilts the portion of + * the sphere in front of the viewer to the viewer's right. + * + * These values are exported as 16.16 fixed point. + * + * See this equirectangular projection as example: + * + * @code{.unparsed} + * Yaw + * -180 0 180 + * 90 +-------------+-------------+ 180 + * | | | up + * P | | | y| forward + * i | ^ | | /z + * t 0 +-------------X-------------+ 0 Roll | / + * c | | | | / + * h | | | 0|/_____right + * | | | x + * -90 +-------------+-------------+ -180 + * + * X - the default camera center + * ^ - the default up vector + * @endcode + */ + int32_t yaw; ///< Rotation around the up vector [-180, 180]. + int32_t pitch; ///< Rotation around the right vector [-90, 90]. + int32_t roll; ///< Rotation around the forward vector [-180, 180]. + /** + * @} + */ + + /** + * @name Bounding rectangle + * @anchor bounding + * @{ + * These fields indicate the location of the current tile, and where + * it should be mapped relative to the original surface. They are + * exported as 0.32 fixed point, and can be converted to classic + * pixel values with av_spherical_bounds(). + * + * @code{.unparsed} + * +----------------+----------+ + * | |bound_top | + * | +--------+ | + * | bound_left |tile | | + * +<---------->| |<--->+bound_right + * | +--------+ | + * | | | + * | bound_bottom| | + * +----------------+----------+ + * @endcode + * + * If needed, the original video surface dimensions can be derived + * by adding the current stream or frame size to the related bounds, + * like in the following example: + * + * @code{c} + * original_width = tile->width + bound_left + bound_right; + * original_height = tile->height + bound_top + bound_bottom; + * @endcode + * + * @note These values are valid only for the tiled equirectangular + * projection type (@ref AV_SPHERICAL_EQUIRECTANGULAR_TILE), + * and should be ignored in all other cases. + */ + uint32_t bound_left; ///< Distance from the left edge + uint32_t bound_top; ///< Distance from the top edge + uint32_t bound_right; ///< Distance from the right edge + uint32_t bound_bottom; ///< Distance from the bottom edge + /** + * @} + */ + + /** + * Number of pixels to pad from the edge of each cube face. + * + * @note This value is valid for only for the cubemap projection type + * (@ref AV_SPHERICAL_CUBEMAP), and should be ignored in all other + * cases. + */ + uint32_t padding; +} AVSphericalMapping; + +/** + * Allocate a AVSphericalVideo structure and initialize its fields to default + * values. + * + * @return the newly allocated struct or NULL on failure + */ +AVSphericalMapping *av_spherical_alloc(size_t *size); + +/** + * Convert the @ref bounding fields from an AVSphericalVideo + * from 0.32 fixed point to pixels. + * + * @param map The AVSphericalVideo map to read bound values from. + * @param width Width of the current frame or stream. + * @param height Height of the current frame or stream. + * @param left Pixels from the left edge. + * @param top Pixels from the top edge. + * @param right Pixels from the right edge. + * @param bottom Pixels from the bottom edge. + */ +void av_spherical_tile_bounds(const AVSphericalMapping *map, + size_t width, size_t height, + size_t *left, size_t *top, + size_t *right, size_t *bottom); + +/** + * Provide a human-readable name of a given AVSphericalProjection. + * + * @param projection The input AVSphericalProjection. + * + * @return The name of the AVSphericalProjection, or "unknown". + */ +const char *av_spherical_projection_name(enum AVSphericalProjection projection); + +/** + * Get the AVSphericalProjection form a human-readable name. + * + * @param name The input string. + * + * @return The AVSphericalProjection value, or -1 if not found. + */ +int av_spherical_from_name(const char *name); +/** + * @} + * @} + */ + +#endif /* AVUTIL_SPHERICAL_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/stereo3d.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/stereo3d.h new file mode 100644 index 0000000..19c5416 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/stereo3d.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2013 Vittorio Giovara + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_STEREO3D_H +#define AVUTIL_STEREO3D_H + +#include + +#include "frame.h" + +/** + * List of possible 3D Types + */ +enum AVStereo3DType { + /** + * Video is not stereoscopic (and metadata has to be there). + */ + AV_STEREO3D_2D, + + /** + * Views are next to each other. + * + * LLLLRRRR + * LLLLRRRR + * LLLLRRRR + * ... + */ + AV_STEREO3D_SIDEBYSIDE, + + /** + * Views are on top of each other. + * + * LLLLLLLL + * LLLLLLLL + * RRRRRRRR + * RRRRRRRR + */ + AV_STEREO3D_TOPBOTTOM, + + /** + * Views are alternated temporally. + * + * frame0 frame1 frame2 ... + * LLLLLLLL RRRRRRRR LLLLLLLL + * LLLLLLLL RRRRRRRR LLLLLLLL + * LLLLLLLL RRRRRRRR LLLLLLLL + * ... ... ... + */ + AV_STEREO3D_FRAMESEQUENCE, + + /** + * Views are packed in a checkerboard-like structure per pixel. + * + * LRLRLRLR + * RLRLRLRL + * LRLRLRLR + * ... + */ + AV_STEREO3D_CHECKERBOARD, + + /** + * Views are next to each other, but when upscaling + * apply a checkerboard pattern. + * + * LLLLRRRR L L L L R R R R + * LLLLRRRR => L L L L R R R R + * LLLLRRRR L L L L R R R R + * LLLLRRRR L L L L R R R R + */ + AV_STEREO3D_SIDEBYSIDE_QUINCUNX, + + /** + * Views are packed per line, as if interlaced. + * + * LLLLLLLL + * RRRRRRRR + * LLLLLLLL + * ... + */ + AV_STEREO3D_LINES, + + /** + * Views are packed per column. + * + * LRLRLRLR + * LRLRLRLR + * LRLRLRLR + * ... + */ + AV_STEREO3D_COLUMNS, +}; + + +/** + * Inverted views, Right/Bottom represents the left view. + */ +#define AV_STEREO3D_FLAG_INVERT (1 << 0) + +/** + * Stereo 3D type: this structure describes how two videos are packed + * within a single video surface, with additional information as needed. + * + * @note The struct must be allocated with av_stereo3d_alloc() and + * its size is not a part of the public ABI. + */ +typedef struct AVStereo3D { + /** + * How views are packed within the video. + */ + enum AVStereo3DType type; + + /** + * Additional information about the frame packing. + */ + int flags; +} AVStereo3D; + +/** + * Allocate an AVStereo3D structure and set its fields to default values. + * The resulting struct can be freed using av_freep(). + * + * @return An AVStereo3D filled with default values or NULL on failure. + */ +AVStereo3D *av_stereo3d_alloc(void); + +/** + * Allocate a complete AVFrameSideData and add it to the frame. + * + * @param frame The frame which side data is added to. + * + * @return The AVStereo3D structure to be filled by caller. + */ +AVStereo3D *av_stereo3d_create_side_data(AVFrame *frame); + +/** + * Provide a human-readable name of a given stereo3d type. + * + * @param type The input stereo3d type value. + * + * @return The name of the stereo3d value, or "unknown". + */ +const char *av_stereo3d_type_name(unsigned int type); + +/** + * Get the AVStereo3DType form a human-readable name. + * + * @param type The input string. + * + * @return The AVStereo3DType value, or -1 if not found. + */ +int av_stereo3d_from_name(const char *name); + +#endif /* AVUTIL_STEREO3D_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/tea.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/tea.h new file mode 100644 index 0000000..dd929bd --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/tea.h @@ -0,0 +1,71 @@ +/* + * A 32-bit implementation of the TEA algorithm + * Copyright (c) 2015 Vesselin Bontchev + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TEA_H +#define AVUTIL_TEA_H + +#include + +/** + * @file + * @brief Public header for libavutil TEA algorithm + * @defgroup lavu_tea TEA + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_tea_size; + +struct AVTEA; + +/** + * Allocate an AVTEA context + * To free the struct: av_free(ptr) + */ +struct AVTEA *av_tea_alloc(void); + +/** + * Initialize an AVTEA context. + * + * @param ctx an AVTEA context + * @param key a key of 16 bytes used for encryption/decryption + * @param rounds the number of rounds in TEA (64 is the "standard") + */ +void av_tea_init(struct AVTEA *ctx, const uint8_t key[16], int rounds); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_tea_crypt(struct AVTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_TEA_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/thread.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/thread.h new file mode 100644 index 0000000..6e57447 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/thread.h @@ -0,0 +1,168 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +// This header should only be used to simplify code where +// threading is optional, not as a generic threading abstraction. + +#ifndef AVUTIL_THREAD_H +#define AVUTIL_THREAD_H + +#include "config.h" + +#if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS + +#if HAVE_PTHREADS +#include + +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1 + +#include "log.h" + +#define ASSERT_PTHREAD_NORET(func, ...) do { \ + int ret = func(__VA_ARGS__); \ + if (ret) { \ + av_log(NULL, AV_LOG_FATAL, AV_STRINGIFY(func) \ + " failed with error: %s\n", av_err2str(AVERROR(ret))); \ + abort(); \ + } \ +} while (0) + +#define ASSERT_PTHREAD(func, ...) do { \ + ASSERT_PTHREAD_NORET(func, __VA_ARGS__); \ + return 0; \ +} while (0) + +static inline int strict_pthread_join(pthread_t thread, void **value_ptr) +{ + ASSERT_PTHREAD(pthread_join, thread, value_ptr); +} + +static inline int strict_pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr) +{ + if (attr) { + ASSERT_PTHREAD_NORET(pthread_mutex_init, mutex, attr); + } else { + pthread_mutexattr_t local_attr; + ASSERT_PTHREAD_NORET(pthread_mutexattr_init, &local_attr); + ASSERT_PTHREAD_NORET(pthread_mutexattr_settype, &local_attr, PTHREAD_MUTEX_ERRORCHECK); + ASSERT_PTHREAD_NORET(pthread_mutex_init, mutex, &local_attr); + ASSERT_PTHREAD_NORET(pthread_mutexattr_destroy, &local_attr); + } + return 0; +} + +static inline int strict_pthread_mutex_destroy(pthread_mutex_t *mutex) +{ + ASSERT_PTHREAD(pthread_mutex_destroy, mutex); +} + +static inline int strict_pthread_mutex_lock(pthread_mutex_t *mutex) +{ + ASSERT_PTHREAD(pthread_mutex_lock, mutex); +} + +static inline int strict_pthread_mutex_unlock(pthread_mutex_t *mutex) +{ + ASSERT_PTHREAD(pthread_mutex_unlock, mutex); +} + +static inline int strict_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr) +{ + ASSERT_PTHREAD(pthread_cond_init, cond, attr); +} + +static inline int strict_pthread_cond_destroy(pthread_cond_t *cond) +{ + ASSERT_PTHREAD(pthread_cond_destroy, cond); +} + +static inline int strict_pthread_cond_signal(pthread_cond_t *cond) +{ + ASSERT_PTHREAD(pthread_cond_signal, cond); +} + +static inline int strict_pthread_cond_broadcast(pthread_cond_t *cond) +{ + ASSERT_PTHREAD(pthread_cond_broadcast, cond); +} + +static inline int strict_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) +{ + ASSERT_PTHREAD(pthread_cond_wait, cond, mutex); +} + +static inline int strict_pthread_once(pthread_once_t *once_control, void (*init_routine)(void)) +{ + ASSERT_PTHREAD(pthread_once, once_control, init_routine); +} + +#define pthread_join strict_pthread_join +#define pthread_mutex_init strict_pthread_mutex_init +#define pthread_mutex_destroy strict_pthread_mutex_destroy +#define pthread_mutex_lock strict_pthread_mutex_lock +#define pthread_mutex_unlock strict_pthread_mutex_unlock +#define pthread_cond_init strict_pthread_cond_init +#define pthread_cond_destroy strict_pthread_cond_destroy +#define pthread_cond_signal strict_pthread_cond_signal +#define pthread_cond_broadcast strict_pthread_cond_broadcast +#define pthread_cond_wait strict_pthread_cond_wait +#define pthread_once strict_pthread_once +#endif + +#elif HAVE_OS2THREADS +#include "compat/os2threads.h" +#else +#include "compat/w32pthreads.h" +#endif + +#define AVMutex pthread_mutex_t + +#define ff_mutex_init pthread_mutex_init +#define ff_mutex_lock pthread_mutex_lock +#define ff_mutex_unlock pthread_mutex_unlock +#define ff_mutex_destroy pthread_mutex_destroy + +#define AVOnce pthread_once_t +#define AV_ONCE_INIT PTHREAD_ONCE_INIT + +#define ff_thread_once(control, routine) pthread_once(control, routine) + +#else + +#define AVMutex char + +#define ff_mutex_init(mutex, attr) (0) +#define ff_mutex_lock(mutex) (0) +#define ff_mutex_unlock(mutex) (0) +#define ff_mutex_destroy(mutex) (0) + +#define AVOnce char +#define AV_ONCE_INIT 0 + +static inline int ff_thread_once(char *control, void (*routine)(void)) +{ + if (!*control) { + routine(); + *control = 1; + } + return 0; +} + +#endif + +#endif /* AVUTIL_THREAD_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/threadmessage.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/threadmessage.h new file mode 100644 index 0000000..8480a0a --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/threadmessage.h @@ -0,0 +1,107 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_THREADMESSAGE_H +#define AVUTIL_THREADMESSAGE_H + +typedef struct AVThreadMessageQueue AVThreadMessageQueue; + +typedef enum AVThreadMessageFlags { + + /** + * Perform non-blocking operation. + * If this flag is set, send and recv operations are non-blocking and + * return AVERROR(EAGAIN) immediately if they can not proceed. + */ + AV_THREAD_MESSAGE_NONBLOCK = 1, + +} AVThreadMessageFlags; + +/** + * Allocate a new message queue. + * + * @param mq pointer to the message queue + * @param nelem maximum number of elements in the queue + * @param elsize size of each element in the queue + * @return >=0 for success; <0 for error, in particular AVERROR(ENOSYS) if + * lavu was built without thread support + */ +int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, + unsigned nelem, + unsigned elsize); + +/** + * Free a message queue. + * + * The message queue must no longer be in use by another thread. + */ +void av_thread_message_queue_free(AVThreadMessageQueue **mq); + +/** + * Send a message on the queue. + */ +int av_thread_message_queue_send(AVThreadMessageQueue *mq, + void *msg, + unsigned flags); + +/** + * Receive a message from the queue. + */ +int av_thread_message_queue_recv(AVThreadMessageQueue *mq, + void *msg, + unsigned flags); + +/** + * Set the sending error code. + * + * If the error code is set to non-zero, av_thread_message_queue_send() will + * return it immediately. Conventional values, such as AVERROR_EOF or + * AVERROR(EAGAIN), can be used to cause the sending thread to stop or + * suspend its operation. + */ +void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, + int err); + +/** + * Set the receiving error code. + * + * If the error code is set to non-zero, av_thread_message_queue_recv() will + * return it immediately when there are no longer available messages. + * Conventional values, such as AVERROR_EOF or AVERROR(EAGAIN), can be used + * to cause the receiving thread to stop or suspend its operation. + */ +void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, + int err); + +/** + * Set the optional free message callback function which will be called if an + * operation is removing messages from the queue. + */ +void av_thread_message_queue_set_free_func(AVThreadMessageQueue *mq, + void (*free_func)(void *msg)); + +/** + * Flush the message queue + * + * This function is mostly equivalent to reading and free-ing every message + * except that it will be done in a single operation (no lock/unlock between + * reads). + */ +void av_thread_message_flush(AVThreadMessageQueue *mq); + +#endif /* AVUTIL_THREADMESSAGE_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/time.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/time.h new file mode 100644 index 0000000..dc169b0 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/time.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000-2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TIME_H +#define AVUTIL_TIME_H + +#include + +/** + * Get the current time in microseconds. + */ +int64_t av_gettime(void); + +/** + * Get the current time in microseconds since some unspecified starting point. + * On platforms that support it, the time comes from a monotonic clock + * This property makes this time source ideal for measuring relative time. + * The returned values may not be monotonic on platforms where a monotonic + * clock is not available. + */ +int64_t av_gettime_relative(void); + +/** + * Indicates with a boolean result if the av_gettime_relative() time source + * is monotonic. + */ +int av_gettime_relative_is_monotonic(void); + +/** + * Sleep for a period of time. Although the duration is expressed in + * microseconds, the actual delay may be rounded to the precision of the + * system timer. + * + * @param usec Number of microseconds to sleep. + * @return zero on success or (negative) error code. + */ +int av_usleep(unsigned usec); + +#endif /* AVUTIL_TIME_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/timecode.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/timecode.h new file mode 100644 index 0000000..56e3975 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/timecode.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier + * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Timecode helpers header + */ + +#ifndef AVUTIL_TIMECODE_H +#define AVUTIL_TIMECODE_H + +#include +#include "rational.h" + +#define AV_TIMECODE_STR_SIZE 16 + +enum AVTimecodeFlag { + AV_TIMECODE_FLAG_DROPFRAME = 1<<0, ///< timecode is drop frame + AV_TIMECODE_FLAG_24HOURSMAX = 1<<1, ///< timecode wraps after 24 hours + AV_TIMECODE_FLAG_ALLOWNEGATIVE = 1<<2, ///< negative time values are allowed +}; + +typedef struct { + int start; ///< timecode frame start (first base frame number) + uint32_t flags; ///< flags such as drop frame, +24 hours support, ... + AVRational rate; ///< frame rate in rational form + unsigned fps; ///< frame per second; must be consistent with the rate field +} AVTimecode; + +/** + * Adjust frame number for NTSC drop frame time code. + * + * @param framenum frame number to adjust + * @param fps frame per second, 30 or 60 + * @return adjusted frame number + * @warning adjustment is only valid in NTSC 29.97 and 59.94 + */ +int av_timecode_adjust_ntsc_framenum2(int framenum, int fps); + +/** + * Convert frame number to SMPTE 12M binary representation. + * + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the SMPTE binary representation + * + * @note Frame number adjustment is automatically done in case of drop timecode, + * you do NOT have to call av_timecode_adjust_ntsc_framenum2(). + * @note The frame number is relative to tc->start. + * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity + * correction (PC) bits are set to zero. + */ +uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum); + +/** + * Load timecode string in buf. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the buf parameter + * + * @note Timecode representation can be a negative timecode and have more than + * 24 hours, but will only be honored if the flags are correctly set. + * @note The frame number is relative to tc->start. + */ +char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum); + +/** + * Get the timecode string from the SMPTE timecode format. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tcsmpte the 32-bit SMPTE timecode + * @param prevent_df prevent the use of a drop flag when it is known the DF bit + * is arbitrary + * @return the buf parameter + */ +char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df); + +/** + * Get the timecode string from the 25-bit timecode format (MPEG GOP format). + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc25bit the 25-bits timecode + * @return the buf parameter + */ +char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit); + +/** + * Init a timecode struct with the passed parameters. + * + * @param log_ctx a pointer to an arbitrary struct of which the first field + * is a pointer to an AVClass struct (used for av_log) + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param flags miscellaneous flags such as drop frame, +24 hours, ... + * (see AVTimecodeFlag) + * @param frame_start the first frame number + * @return 0 on success, AVERROR otherwise + */ +int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx); + +/** + * Parse timecode representation (hh:mm:ss[:;.]ff). + * + * @param log_ctx a pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct (used for av_log). + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param str timecode string which will determine the frame start + * @return 0 on success, AVERROR otherwise + */ +int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx); + +/** + * Check if the timecode feature is available for the given frame rate + * + * @return 0 if supported, <0 otherwise + */ +int av_timecode_check_frame_rate(AVRational rate); + +#endif /* AVUTIL_TIMECODE_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/timestamp.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/timestamp.h new file mode 100644 index 0000000..e082f01 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/timestamp.h @@ -0,0 +1,78 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * timestamp utils, mostly useful for debugging/logging purposes + */ + +#ifndef AVUTIL_TIMESTAMP_H +#define AVUTIL_TIMESTAMP_H + +#include "common.h" + +#if defined(__cplusplus) && !defined(__STDC_FORMAT_MACROS) && !defined(PRId64) +#error missing -D__STDC_FORMAT_MACROS / #define __STDC_FORMAT_MACROS +#endif + +#define AV_TS_MAX_STRING_SIZE 32 + +/** + * Fill the provided buffer with a string containing a timestamp + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @return the buffer in input + */ +static inline char *av_ts_make_string(char *buf, int64_t ts) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%" PRId64, ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts) + +/** + * Fill the provided buffer with a string containing a timestamp time + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @param tb the timebase of the timestamp + * @return the buffer in input + */ +static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb) + +#endif /* AVUTIL_TIMESTAMP_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/tree.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/tree.h new file mode 100644 index 0000000..d5e0aeb --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/tree.h @@ -0,0 +1,138 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * A tree container. + * @author Michael Niedermayer + */ + +#ifndef AVUTIL_TREE_H +#define AVUTIL_TREE_H + +#include "attributes.h" +#include "version.h" + +/** + * @addtogroup lavu_tree AVTree + * @ingroup lavu_data + * + * Low-complexity tree container + * + * Insertion, removal, finding equal, largest which is smaller than and + * smallest which is larger than, all have O(log n) worst-case complexity. + * @{ + */ + + +struct AVTreeNode; +extern const int av_tree_node_size; + +/** + * Allocate an AVTreeNode. + */ +struct AVTreeNode *av_tree_node_alloc(void); + +/** + * Find an element. + * @param root a pointer to the root node of the tree + * @param next If next is not NULL, then next[0] will contain the previous + * element and next[1] the next element. If either does not exist, + * then the corresponding entry in next is unchanged. + * @param cmp compare function used to compare elements in the tree, + * API identical to that of Standard C's qsort + * It is guaranteed that the first and only the first argument to cmp() + * will be the key parameter to av_tree_find(), thus it could if the + * user wants, be a different type (like an opaque context). + * @return An element with cmp(key, elem) == 0 or NULL if no such element + * exists in the tree. + */ +void *av_tree_find(const struct AVTreeNode *root, void *key, + int (*cmp)(const void *key, const void *b), void *next[2]); + +/** + * Insert or remove an element. + * + * If *next is NULL, then the supplied element will be removed if it exists. + * If *next is non-NULL, then the supplied element will be inserted, unless + * it already exists in the tree. + * + * @param rootp A pointer to a pointer to the root node of the tree; note that + * the root node can change during insertions, this is required + * to keep the tree balanced. + * @param key pointer to the element key to insert in the tree + * @param next Used to allocate and free AVTreeNodes. For insertion the user + * must set it to an allocated and zeroed object of at least + * av_tree_node_size bytes size. av_tree_insert() will set it to + * NULL if it has been consumed. + * For deleting elements *next is set to NULL by the user and + * av_tree_insert() will set it to the AVTreeNode which was + * used for the removed element. + * This allows the use of flat arrays, which have + * lower overhead compared to many malloced elements. + * You might want to define a function like: + * @code + * void *tree_insert(struct AVTreeNode **rootp, void *key, + * int (*cmp)(void *key, const void *b), + * AVTreeNode **next) + * { + * if (!*next) + * *next = av_mallocz(av_tree_node_size); + * return av_tree_insert(rootp, key, cmp, next); + * } + * void *tree_remove(struct AVTreeNode **rootp, void *key, + * int (*cmp)(void *key, const void *b, AVTreeNode **next)) + * { + * av_freep(next); + * return av_tree_insert(rootp, key, cmp, next); + * } + * @endcode + * @param cmp compare function used to compare elements in the tree, API identical + * to that of Standard C's qsort + * @return If no insertion happened, the found element; if an insertion or + * removal happened, then either key or NULL will be returned. + * Which one it is depends on the tree state and the implementation. You + * should make no assumptions that it's one or the other in the code. + */ +void *av_tree_insert(struct AVTreeNode **rootp, void *key, + int (*cmp)(const void *key, const void *b), + struct AVTreeNode **next); + +void av_tree_destroy(struct AVTreeNode *t); + +/** + * Apply enu(opaque, &elem) to all the elements in the tree in a given range. + * + * @param cmp a comparison function that returns < 0 for an element below the + * range, > 0 for an element above the range and == 0 for an + * element inside the range + * + * @note The cmp function should use the same ordering used to construct the + * tree. + */ +void av_tree_enumerate(struct AVTreeNode *t, void *opaque, + int (*cmp)(void *opaque, void *elem), + int (*enu)(void *opaque, void *elem)); + +/** + * @} + */ + +#endif /* AVUTIL_TREE_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/twofish.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/twofish.h new file mode 100644 index 0000000..813cfec --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/twofish.h @@ -0,0 +1,70 @@ +/* + * An implementation of the TwoFish algorithm + * Copyright (c) 2015 Supraja Meedinti + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TWOFISH_H +#define AVUTIL_TWOFISH_H + +#include + + +/** + * @file + * @brief Public header for libavutil TWOFISH algorithm + * @defgroup lavu_twofish TWOFISH + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_twofish_size; + +struct AVTWOFISH; + +/** + * Allocate an AVTWOFISH context + * To free the struct: av_free(ptr) + */ +struct AVTWOFISH *av_twofish_alloc(void); + +/** + * Initialize an AVTWOFISH context. + * + * @param ctx an AVTWOFISH context + * @param key a key of size ranging from 1 to 32 bytes used for encryption/decryption + * @param key_bits number of keybits: 128, 192, 256 If less than the required, padded with zeroes to nearest valid value; return value is 0 if key_bits is 128/192/256, -1 if less than 0, 1 otherwise + */ +int av_twofish_init(struct AVTWOFISH *ctx, const uint8_t *key, int key_bits); + +/** + * Encrypt or decrypt a buffer using a previously initialized context + * + * @param ctx an AVTWOFISH context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 16 byte blocks + * @paran iv initialization vector for CBC mode, NULL for ECB mode + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_twofish_crypt(struct AVTWOFISH *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t* iv, int decrypt); + +/** + * @} + */ +#endif /* AVUTIL_TWOFISH_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/version.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/version.h new file mode 100644 index 0000000..abea216 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/version.h @@ -0,0 +1,145 @@ +/* + * copyright (c) 2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu + * Libavutil version macros + */ + +#ifndef AVUTIL_VERSION_H +#define AVUTIL_VERSION_H + +#include "macros.h" + +/** + * @addtogroup version_utils + * + * Useful to check and match library version in order to maintain + * backward compatibility. + * + * The FFmpeg libraries follow a versioning sheme very similar to + * Semantic Versioning (http://semver.org/) + * The difference is that the component called PATCH is called MICRO in FFmpeg + * and its value is reset to 100 instead of 0 to keep it above or equal to 100. + * Also we do not increase MICRO for every bugfix or change in git master. + * + * Prior to FFmpeg 3.2 point releases did not change any lib version number to + * avoid aliassing different git master checkouts. + * Starting with FFmpeg 3.2, the released library versions will occupy + * a separate MAJOR.MINOR that is not used on the master development branch. + * That is if we branch a release of master 55.10.123 we will bump to 55.11.100 + * for the release and master will continue at 55.12.100 after it. Each new + * point release will then bump the MICRO improving the usefulness of the lib + * versions. + * + * @{ + */ + +#define AV_VERSION_INT(a, b, c) ((a)<<16 | (b)<<8 | (c)) +#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c +#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c) + +/** + * Extract version components from the full ::AV_VERSION_INT int as returned + * by functions like ::avformat_version() and ::avcodec_version() + */ +#define AV_VERSION_MAJOR(a) ((a) >> 16) +#define AV_VERSION_MINOR(a) (((a) & 0x00FF00) >> 8) +#define AV_VERSION_MICRO(a) ((a) & 0xFF) + +/** + * @} + */ + +/** + * @defgroup lavu_ver Version and Build diagnostics + * + * Macros and function useful to check at compiletime and at runtime + * which version of libavutil is in use. + * + * @{ + */ + +#define LIBAVUTIL_VERSION_MAJOR 55 +#define LIBAVUTIL_VERSION_MINOR 58 +#define LIBAVUTIL_VERSION_MICRO 100 + +#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT + +#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION) + +/** + * @defgroup lavu_depr_guards Deprecation Guards + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @note, when bumping the major version it is recommended to manually + * disable each FF_API_* in its own commit instead of disabling them all + * at once through the bump. This improves the git bisect-ability of the change. + * + * @{ + */ + +#ifndef FF_API_VDPAU +#define FF_API_VDPAU (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_XVMC +#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_OPT_TYPE_METADATA +#define FF_API_OPT_TYPE_METADATA (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_DLOG +#define FF_API_DLOG (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_VAAPI +#define FF_API_VAAPI (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_FRAME_QP +#define FF_API_FRAME_QP (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_PLUS1_MINUS1 +#define FF_API_PLUS1_MINUS1 (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_ERROR_FRAME +#define FF_API_ERROR_FRAME (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CRC_BIG_TABLE +#define FF_API_CRC_BIG_TABLE (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_PKT_PTS +#define FF_API_PKT_PTS (LIBAVUTIL_VERSION_MAJOR < 56) +#endif + + +/** + * @} + * @} + */ + +#endif /* AVUTIL_VERSION_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/x86_64/avconfig.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/x86_64/avconfig.h new file mode 100644 index 0000000..36f72aa --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/x86_64/avconfig.h @@ -0,0 +1,6 @@ +/* Generated by ffconf */ +#ifndef AVUTIL_AVCONFIG_H +#define AVUTIL_AVCONFIG_H +#define AV_HAVE_BIGENDIAN 0 +#define AV_HAVE_FAST_UNALIGNED 0 +#endif /* AVUTIL_AVCONFIG_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/x86_64/ffversion.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/x86_64/ffversion.h new file mode 100644 index 0000000..7ab8f1a --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/x86_64/ffversion.h @@ -0,0 +1,5 @@ +/* Automatically generated by version.sh, do not manually edit! */ +#ifndef AVUTIL_FFVERSION_H +#define AVUTIL_FFVERSION_H +#define FFMPEG_VERSION "ff3.3--fx0.8.0--20210325--fix_android11_crash" +#endif /* AVUTIL_FFVERSION_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/xtea.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/xtea.h new file mode 100644 index 0000000..735427c --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libavutil/xtea.h @@ -0,0 +1,94 @@ +/* + * A 32-bit implementation of the XTEA algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_XTEA_H +#define AVUTIL_XTEA_H + +#include + +/** + * @file + * @brief Public header for libavutil XTEA algorithm + * @defgroup lavu_xtea XTEA + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVXTEA { + uint32_t key[16]; +} AVXTEA; + +/** + * Allocate an AVXTEA context. + */ +AVXTEA *av_xtea_alloc(void); + +/** + * Initialize an AVXTEA context. + * + * @param ctx an AVXTEA context + * @param key a key of 16 bytes used for encryption/decryption, + * interpreted as big endian 32 bit numbers + */ +void av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]); + +/** + * Initialize an AVXTEA context. + * + * @param ctx an AVXTEA context + * @param key a key of 16 bytes used for encryption/decryption, + * interpreted as little endian 32 bit numbers + */ +void av_xtea_le_init(struct AVXTEA *ctx, const uint8_t key[16]); + +/** + * Encrypt or decrypt a buffer using a previously initialized context, + * in big endian format. + * + * @param ctx an AVXTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context, + * in little endian format. + * + * @param ctx an AVXTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_xtea_le_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_XTEA_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/arm64/config.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/arm64/config.h new file mode 100644 index 0000000..6aaff52 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/arm64/config.h @@ -0,0 +1,2276 @@ +/* Automatically generated by configure - do not modify! */ +#ifndef FFMPEG_CONFIG_H +#define FFMPEG_CONFIG_H +#define FFMPEG_CONFIGURATION "--disable-gpl --disable-nonfree --enable-runtime-cpudetect --disable-gray --disable-swscale-alpha --disable-programs --disable-ffmpeg --disable-ffplay --disable-ffprobe --disable-ffserver --disable-doc --disable-htmlpages --disable-manpages --disable-podpages --disable-txtpages --disable-avdevice --enable-avcodec --enable-avformat --enable-avutil --enable-swresample --enable-swscale --disable-postproc --enable-avfilter --disable-avresample --enable-network --disable-d3d11va --disable-dxva2 --disable-vaapi --disable-vda --disable-vdpau --disable-videotoolbox --disable-encoders --enable-encoder=png --disable-decoders --enable-decoder=aac --enable-decoder=aac_latm --enable-decoder=flv --enable-decoder=h264 --enable-decoder='mp3*' --enable-decoder=vp6f --enable-decoder=flac --enable-decoder=mpeg4 --enable-decoder=wavpack --enable-decoder=wav --enable-decoder=pcm_s16le --disable-hwaccels --disable-muxers --enable-muxer=mp4 --disable-demuxers --enable-demuxer=aac --enable-demuxer=concat --enable-demuxer=data --enable-demuxer=flv --enable-demuxer=hls --enable-demuxer=live_flv --enable-demuxer=mov --enable-demuxer=mp3 --enable-demuxer=mpegps --enable-demuxer=mpegts --enable-demuxer=mpegvideo --enable-demuxer=flac --enable-demuxer=hevc --enable-demuxer=wav --disable-parsers --enable-parser=aac --enable-parser=aac_latm --enable-parser=h264 --enable-parser=flac --enable-parser=hevc --enable-bsfs --disable-bsf=chomp --disable-bsf=dca_core --disable-bsf=dump_extradata --disable-bsf=hevc_mp4toannexb --disable-bsf=imx_dump_header --disable-bsf=mjpeg2jpeg --disable-bsf=mjpega_dump_header --disable-bsf=mov2textsub --disable-bsf=mp3_header_decompress --disable-bsf=mpeg4_unpack_bframes --disable-bsf=noise --disable-bsf=remove_extradata --disable-bsf=text2movsub --disable-bsf=vp9_superframe --enable-protocols --enable-protocol=async --disable-protocol=bluray --disable-protocol=concat --disable-protocol=ffrtmpcrypt --enable-protocol=ffrtmphttp --disable-protocol=gopher --disable-protocol=icecast --disable-protocol='librtmp*' --disable-protocol=libssh --disable-protocol=md5 --disable-protocol=mmsh --disable-protocol=mmst --disable-protocol='rtmp*' --enable-protocol=rtmp --enable-protocol=rtmpt --disable-protocol=rtp --disable-protocol=sctp --disable-protocol=srtp --disable-protocol=subfile --disable-protocol=unix --disable-devices --disable-filters --disable-iconv --disable-audiotoolbox --disable-videotoolbox --enable-cross-compile --disable-stripping --arch=arm64 --target-os=darwin --enable-static --disable-shared --enable-pic --enable-neon --enable-optimizations --enable-debug --enable-small --prefix=/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/build/ffmpeg-arm64/output --enable-openssl --cc='xcrun -sdk iphoneos clang' --extra-cflags=' -arch arm64 -miphoneos-version-min=7.0 -I/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/include -DQCLOUDSSL' --extra-cxxflags=' -arch arm64 -miphoneos-version-min=7.0 -I/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/include -DQCLOUDSSL' --extra-ldflags=' -arch arm64 -miphoneos-version-min=7.0 -arch arm64 -miphoneos-version-min=7.0 -I/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/include -DQCLOUDSSL -L/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/lib -lboringssl'" +#define FFMPEG_LICENSE "LGPL version 2.1 or later" +#define CONFIG_THIS_YEAR 2017 +#define FFMPEG_DATADIR "/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/build/ffmpeg-arm64/output/share/ffmpeg" +#define AVCONV_DATADIR "/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/build/ffmpeg-arm64/output/share/ffmpeg" +#define CC_IDENT "Apple LLVM version 10.0.1 (clang-1001.0.46.4)" +#define av_restrict restrict +#define EXTERN_PREFIX "_" +#define EXTERN_ASM _ +#define BUILDSUF "" +#define SLIBSUF ".dylib" +#define HAVE_MMX2 HAVE_MMXEXT +#define SWS_MAX_FILTER_SIZE 256 +#define ARCH_AARCH64 1 +#define ARCH_ALPHA 0 +#define ARCH_ARM 0 +#define ARCH_AVR32 0 +#define ARCH_AVR32_AP 0 +#define ARCH_AVR32_UC 0 +#define ARCH_BFIN 0 +#define ARCH_IA64 0 +#define ARCH_M68K 0 +#define ARCH_MIPS 0 +#define ARCH_MIPS64 0 +#define ARCH_PARISC 0 +#define ARCH_PPC 0 +#define ARCH_PPC64 0 +#define ARCH_S390 0 +#define ARCH_SH4 0 +#define ARCH_SPARC 0 +#define ARCH_SPARC64 0 +#define ARCH_TILEGX 0 +#define ARCH_TILEPRO 0 +#define ARCH_TOMI 0 +#define ARCH_X86 0 +#define ARCH_X86_32 0 +#define ARCH_X86_64 0 +#define HAVE_ARMV5TE 0 +#define HAVE_ARMV6 0 +#define HAVE_ARMV6T2 0 +#define HAVE_ARMV8 1 +#define HAVE_NEON 1 +#define HAVE_VFP 1 +#define HAVE_VFPV3 0 +#define HAVE_SETEND 0 +#define HAVE_ALTIVEC 0 +#define HAVE_DCBZL 0 +#define HAVE_LDBRX 0 +#define HAVE_POWER8 0 +#define HAVE_PPC4XX 0 +#define HAVE_VSX 0 +#define HAVE_AESNI 0 +#define HAVE_AMD3DNOW 0 +#define HAVE_AMD3DNOWEXT 0 +#define HAVE_AVX 0 +#define HAVE_AVX2 0 +#define HAVE_FMA3 0 +#define HAVE_FMA4 0 +#define HAVE_MMX 0 +#define HAVE_MMXEXT 0 +#define HAVE_SSE 0 +#define HAVE_SSE2 0 +#define HAVE_SSE3 0 +#define HAVE_SSE4 0 +#define HAVE_SSE42 0 +#define HAVE_SSSE3 0 +#define HAVE_XOP 0 +#define HAVE_CPUNOP 0 +#define HAVE_I686 0 +#define HAVE_MIPSFPU 0 +#define HAVE_MIPS32R2 0 +#define HAVE_MIPS32R5 0 +#define HAVE_MIPS64R2 0 +#define HAVE_MIPS32R6 0 +#define HAVE_MIPS64R6 0 +#define HAVE_MIPSDSP 0 +#define HAVE_MIPSDSPR2 0 +#define HAVE_MSA 0 +#define HAVE_LOONGSON2 0 +#define HAVE_LOONGSON3 0 +#define HAVE_MMI 0 +#define HAVE_ARMV5TE_EXTERNAL 0 +#define HAVE_ARMV6_EXTERNAL 0 +#define HAVE_ARMV6T2_EXTERNAL 0 +#define HAVE_ARMV8_EXTERNAL 1 +#define HAVE_NEON_EXTERNAL 1 +#define HAVE_VFP_EXTERNAL 1 +#define HAVE_VFPV3_EXTERNAL 0 +#define HAVE_SETEND_EXTERNAL 0 +#define HAVE_ALTIVEC_EXTERNAL 0 +#define HAVE_DCBZL_EXTERNAL 0 +#define HAVE_LDBRX_EXTERNAL 0 +#define HAVE_POWER8_EXTERNAL 0 +#define HAVE_PPC4XX_EXTERNAL 0 +#define HAVE_VSX_EXTERNAL 0 +#define HAVE_AESNI_EXTERNAL 0 +#define HAVE_AMD3DNOW_EXTERNAL 0 +#define HAVE_AMD3DNOWEXT_EXTERNAL 0 +#define HAVE_AVX_EXTERNAL 0 +#define HAVE_AVX2_EXTERNAL 0 +#define HAVE_FMA3_EXTERNAL 0 +#define HAVE_FMA4_EXTERNAL 0 +#define HAVE_MMX_EXTERNAL 0 +#define HAVE_MMXEXT_EXTERNAL 0 +#define HAVE_SSE_EXTERNAL 0 +#define HAVE_SSE2_EXTERNAL 0 +#define HAVE_SSE3_EXTERNAL 0 +#define HAVE_SSE4_EXTERNAL 0 +#define HAVE_SSE42_EXTERNAL 0 +#define HAVE_SSSE3_EXTERNAL 0 +#define HAVE_XOP_EXTERNAL 0 +#define HAVE_CPUNOP_EXTERNAL 0 +#define HAVE_I686_EXTERNAL 0 +#define HAVE_MIPSFPU_EXTERNAL 0 +#define HAVE_MIPS32R2_EXTERNAL 0 +#define HAVE_MIPS32R5_EXTERNAL 0 +#define HAVE_MIPS64R2_EXTERNAL 0 +#define HAVE_MIPS32R6_EXTERNAL 0 +#define HAVE_MIPS64R6_EXTERNAL 0 +#define HAVE_MIPSDSP_EXTERNAL 0 +#define HAVE_MIPSDSPR2_EXTERNAL 0 +#define HAVE_MSA_EXTERNAL 0 +#define HAVE_LOONGSON2_EXTERNAL 0 +#define HAVE_LOONGSON3_EXTERNAL 0 +#define HAVE_MMI_EXTERNAL 0 +#define HAVE_ARMV5TE_INLINE 0 +#define HAVE_ARMV6_INLINE 0 +#define HAVE_ARMV6T2_INLINE 0 +#define HAVE_ARMV8_INLINE 1 +#define HAVE_NEON_INLINE 1 +#define HAVE_VFP_INLINE 1 +#define HAVE_VFPV3_INLINE 0 +#define HAVE_SETEND_INLINE 0 +#define HAVE_ALTIVEC_INLINE 0 +#define HAVE_DCBZL_INLINE 0 +#define HAVE_LDBRX_INLINE 0 +#define HAVE_POWER8_INLINE 0 +#define HAVE_PPC4XX_INLINE 0 +#define HAVE_VSX_INLINE 0 +#define HAVE_AESNI_INLINE 0 +#define HAVE_AMD3DNOW_INLINE 0 +#define HAVE_AMD3DNOWEXT_INLINE 0 +#define HAVE_AVX_INLINE 0 +#define HAVE_AVX2_INLINE 0 +#define HAVE_FMA3_INLINE 0 +#define HAVE_FMA4_INLINE 0 +#define HAVE_MMX_INLINE 0 +#define HAVE_MMXEXT_INLINE 0 +#define HAVE_SSE_INLINE 0 +#define HAVE_SSE2_INLINE 0 +#define HAVE_SSE3_INLINE 0 +#define HAVE_SSE4_INLINE 0 +#define HAVE_SSE42_INLINE 0 +#define HAVE_SSSE3_INLINE 0 +#define HAVE_XOP_INLINE 0 +#define HAVE_CPUNOP_INLINE 0 +#define HAVE_I686_INLINE 0 +#define HAVE_MIPSFPU_INLINE 0 +#define HAVE_MIPS32R2_INLINE 0 +#define HAVE_MIPS32R5_INLINE 0 +#define HAVE_MIPS64R2_INLINE 0 +#define HAVE_MIPS32R6_INLINE 0 +#define HAVE_MIPS64R6_INLINE 0 +#define HAVE_MIPSDSP_INLINE 0 +#define HAVE_MIPSDSPR2_INLINE 0 +#define HAVE_MSA_INLINE 0 +#define HAVE_LOONGSON2_INLINE 0 +#define HAVE_LOONGSON3_INLINE 0 +#define HAVE_MMI_INLINE 0 +#define HAVE_ALIGNED_STACK 1 +#define HAVE_FAST_64BIT 1 +#define HAVE_FAST_CLZ 1 +#define HAVE_FAST_CMOV 0 +#define HAVE_LOCAL_ALIGNED_8 0 +#define HAVE_LOCAL_ALIGNED_16 0 +#define HAVE_LOCAL_ALIGNED_32 0 +#define HAVE_SIMD_ALIGN_16 1 +#define HAVE_SIMD_ALIGN_32 0 +#define HAVE_ATOMICS_GCC 1 +#define HAVE_ATOMICS_SUNCC 0 +#define HAVE_ATOMICS_WIN32 0 +#define HAVE_ATOMIC_CAS_PTR 0 +#define HAVE_MACHINE_RW_BARRIER 0 +#define HAVE_MEMORYBARRIER 0 +#define HAVE_MM_EMPTY 0 +#define HAVE_RDTSC 0 +#define HAVE_SARESTART 1 +#define HAVE_SEM_TIMEDWAIT 0 +#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1 +#define HAVE_CABS 1 +#define HAVE_CEXP 1 +#define HAVE_INLINE_ASM 1 +#define HAVE_SYMVER 1 +#define HAVE_YASM 0 +#define HAVE_BIGENDIAN 0 +#define HAVE_FAST_UNALIGNED 1 +#define HAVE_ALSA_ASOUNDLIB_H 0 +#define HAVE_ALTIVEC_H 0 +#define HAVE_ARPA_INET_H 1 +#define HAVE_ASM_TYPES_H 0 +#define HAVE_CDIO_PARANOIA_H 0 +#define HAVE_CDIO_PARANOIA_PARANOIA_H 0 +#define HAVE_CUDA_H 0 +#define HAVE_DISPATCH_DISPATCH_H 1 +#define HAVE_DEV_BKTR_IOCTL_BT848_H 0 +#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0 +#define HAVE_DEV_IC_BT8XX_H 0 +#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0 +#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0 +#define HAVE_DIRECT_H 0 +#define HAVE_DIRENT_H 1 +#define HAVE_DLFCN_H 1 +#define HAVE_D3D11_H 0 +#define HAVE_DXVA_H 0 +#define HAVE_ES2_GL_H 0 +#define HAVE_GSM_H 0 +#define HAVE_IO_H 0 +#define HAVE_MACH_MACH_TIME_H 1 +#define HAVE_MACHINE_IOCTL_BT848_H 0 +#define HAVE_MACHINE_IOCTL_METEOR_H 0 +#define HAVE_MALLOC_H 0 +#define HAVE_OPENCV2_CORE_CORE_C_H 0 +#define HAVE_OPENJPEG_2_1_OPENJPEG_H 0 +#define HAVE_OPENJPEG_2_0_OPENJPEG_H 0 +#define HAVE_OPENJPEG_1_5_OPENJPEG_H 0 +#define HAVE_OPENGL_GL3_H 0 +#define HAVE_POLL_H 1 +#define HAVE_SNDIO_H 0 +#define HAVE_SOUNDCARD_H 0 +#define HAVE_STDATOMIC_H 1 +#define HAVE_SYS_MMAN_H 1 +#define HAVE_SYS_PARAM_H 1 +#define HAVE_SYS_RESOURCE_H 1 +#define HAVE_SYS_SELECT_H 1 +#define HAVE_SYS_SOUNDCARD_H 0 +#define HAVE_SYS_TIME_H 1 +#define HAVE_SYS_UN_H 1 +#define HAVE_SYS_VIDEOIO_H 0 +#define HAVE_TERMIOS_H 1 +#define HAVE_UDPLITE_H 0 +#define HAVE_UNISTD_H 1 +#define HAVE_VALGRIND_VALGRIND_H 0 +#define HAVE_WINDOWS_H 0 +#define HAVE_WINSOCK2_H 0 +#define HAVE_INTRINSICS_NEON 1 +#define HAVE_ATANF 1 +#define HAVE_ATAN2F 1 +#define HAVE_CBRT 1 +#define HAVE_CBRTF 1 +#define HAVE_COPYSIGN 1 +#define HAVE_COSF 1 +#define HAVE_ERF 1 +#define HAVE_EXP2 1 +#define HAVE_EXP2F 1 +#define HAVE_EXPF 1 +#define HAVE_HYPOT 1 +#define HAVE_ISFINITE 1 +#define HAVE_ISINF 1 +#define HAVE_ISNAN 1 +#define HAVE_LDEXPF 1 +#define HAVE_LLRINT 1 +#define HAVE_LLRINTF 1 +#define HAVE_LOG2 1 +#define HAVE_LOG2F 1 +#define HAVE_LOG10F 1 +#define HAVE_LRINT 1 +#define HAVE_LRINTF 1 +#define HAVE_POWF 1 +#define HAVE_RINT 1 +#define HAVE_ROUND 1 +#define HAVE_ROUNDF 1 +#define HAVE_SINF 1 +#define HAVE_TRUNC 1 +#define HAVE_TRUNCF 1 +#define HAVE_ACCESS 1 +#define HAVE_ALIGNED_MALLOC 0 +#define HAVE_ARC4RANDOM 1 +#define HAVE_CLOCK_GETTIME 1 +#define HAVE_CLOSESOCKET 0 +#define HAVE_COMMANDLINETOARGVW 0 +#define HAVE_COTASKMEMFREE 0 +#define HAVE_CRYPTGENRANDOM 0 +#define HAVE_DLOPEN 1 +#define HAVE_FCNTL 1 +#define HAVE_FLT_LIM 1 +#define HAVE_FORK 1 +#define HAVE_GETADDRINFO 1 +#define HAVE_GETHRTIME 0 +#define HAVE_GETOPT 1 +#define HAVE_GETPROCESSAFFINITYMASK 0 +#define HAVE_GETPROCESSMEMORYINFO 0 +#define HAVE_GETPROCESSTIMES 0 +#define HAVE_GETRUSAGE 1 +#define HAVE_GETSYSTEMTIMEASFILETIME 0 +#define HAVE_GETTIMEOFDAY 1 +#define HAVE_GLOB 1 +#define HAVE_GLXGETPROCADDRESS 0 +#define HAVE_GMTIME_R 1 +#define HAVE_INET_ATON 1 +#define HAVE_ISATTY 1 +#define HAVE_JACK_PORT_GET_LATENCY_RANGE 0 +#define HAVE_KBHIT 0 +#define HAVE_LOADLIBRARY 0 +#define HAVE_LOCALTIME_R 1 +#define HAVE_LSTAT 1 +#define HAVE_LZO1X_999_COMPRESS 0 +#define HAVE_MACH_ABSOLUTE_TIME 1 +#define HAVE_MAPVIEWOFFILE 0 +#define HAVE_MEMALIGN 0 +#define HAVE_MKSTEMP 1 +#define HAVE_MMAP 1 +#define HAVE_MPROTECT 1 +#define HAVE_NANOSLEEP 1 +#define HAVE_PEEKNAMEDPIPE 0 +#define HAVE_POSIX_MEMALIGN 1 +#define HAVE_PTHREAD_CANCEL 1 +#define HAVE_SCHED_GETAFFINITY 0 +#define HAVE_SETCONSOLETEXTATTRIBUTE 0 +#define HAVE_SETCONSOLECTRLHANDLER 0 +#define HAVE_SETMODE 0 +#define HAVE_SETRLIMIT 1 +#define HAVE_SLEEP 0 +#define HAVE_STRERROR_R 1 +#define HAVE_SYSCONF 1 +#define HAVE_SYSCTL 1 +#define HAVE_USLEEP 1 +#define HAVE_UTGETOSTYPEFROMSTRING 0 +#define HAVE_VIRTUALALLOC 0 +#define HAVE_WGLGETPROCADDRESS 0 +#define HAVE_PTHREADS 1 +#define HAVE_OS2THREADS 0 +#define HAVE_W32THREADS 0 +#define HAVE_AS_DN_DIRECTIVE 0 +#define HAVE_AS_FPU_DIRECTIVE 0 +#define HAVE_AS_FUNC 0 +#define HAVE_AS_OBJECT_ARCH 0 +#define HAVE_ASM_MOD_Q 0 +#define HAVE_ATTRIBUTE_MAY_ALIAS 1 +#define HAVE_ATTRIBUTE_PACKED 1 +#define HAVE_EBP_AVAILABLE 0 +#define HAVE_EBX_AVAILABLE 0 +#define HAVE_GNU_AS 1 +#define HAVE_GNU_WINDRES 0 +#define HAVE_IBM_ASM 0 +#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0 +#define HAVE_INLINE_ASM_LABELS 1 +#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1 +#define HAVE_PRAGMA_DEPRECATED 1 +#define HAVE_RSYNC_CONTIMEOUT 0 +#define HAVE_SYMVER_ASM_LABEL 1 +#define HAVE_SYMVER_GNU_ASM 0 +#define HAVE_VFP_ARGS 0 +#define HAVE_XFORM_ASM 0 +#define HAVE_XMM_CLOBBERS 0 +#define HAVE_CONDITION_VARIABLE_PTR 0 +#define HAVE_SOCKLEN_T 1 +#define HAVE_STRUCT_ADDRINFO 1 +#define HAVE_STRUCT_GROUP_SOURCE_REQ 1 +#define HAVE_STRUCT_IP_MREQ_SOURCE 1 +#define HAVE_STRUCT_IPV6_MREQ 1 +#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1 +#define HAVE_STRUCT_POLLFD 1 +#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1 +#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0 +#define HAVE_STRUCT_SOCKADDR_IN6 1 +#define HAVE_STRUCT_SOCKADDR_SA_LEN 1 +#define HAVE_STRUCT_SOCKADDR_STORAGE 1 +#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0 +#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0 +#define HAVE_ATOMICS_NATIVE 1 +#define HAVE_DOS_PATHS 0 +#define HAVE_DXVA2_LIB 0 +#define HAVE_DXVA2API_COBJ 0 +#define HAVE_LIBC_MSVCRT 0 +#define HAVE_LIBDC1394_1 0 +#define HAVE_LIBDC1394_2 0 +#define HAVE_MAKEINFO 1 +#define HAVE_MAKEINFO_HTML 0 +#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0 +#define HAVE_PERL 1 +#define HAVE_POD2MAN 1 +#define HAVE_SDL2 0 +#define HAVE_SECTION_DATA_REL_RO 0 +#define HAVE_TEXI2HTML 0 +#define HAVE_THREADS 1 +#define HAVE_VAAPI_DRM 0 +#define HAVE_VAAPI_X11 0 +#define HAVE_VDPAU_X11 0 +#define HAVE_WINRT 0 +#define HAVE_XLIB 1 +#define CONFIG_BSFS 1 +#define CONFIG_DECODERS 1 +#define CONFIG_ENCODERS 1 +#define CONFIG_HWACCELS 0 +#define CONFIG_PARSERS 1 +#define CONFIG_INDEVS 0 +#define CONFIG_OUTDEVS 0 +#define CONFIG_FILTERS 0 +#define CONFIG_DEMUXERS 1 +#define CONFIG_MUXERS 1 +#define CONFIG_PROTOCOLS 1 +#define CONFIG_DOC 0 +#define CONFIG_HTMLPAGES 0 +#define CONFIG_MANPAGES 0 +#define CONFIG_PODPAGES 0 +#define CONFIG_TXTPAGES 0 +#define CONFIG_AVIO_DIR_CMD_EXAMPLE 1 +#define CONFIG_AVIO_READING_EXAMPLE 1 +#define CONFIG_DECODE_AUDIO_EXAMPLE 1 +#define CONFIG_DECODE_VIDEO_EXAMPLE 1 +#define CONFIG_DEMUXING_DECODING_EXAMPLE 1 +#define CONFIG_ENCODE_AUDIO_EXAMPLE 1 +#define CONFIG_ENCODE_VIDEO_EXAMPLE 1 +#define CONFIG_EXTRACT_MVS_EXAMPLE 1 +#define CONFIG_FILTER_AUDIO_EXAMPLE 1 +#define CONFIG_FILTERING_AUDIO_EXAMPLE 1 +#define CONFIG_FILTERING_VIDEO_EXAMPLE 1 +#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 1 +#define CONFIG_METADATA_EXAMPLE 1 +#define CONFIG_MUXING_EXAMPLE 1 +#define CONFIG_QSVDEC_EXAMPLE 0 +#define CONFIG_REMUXING_EXAMPLE 1 +#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 1 +#define CONFIG_SCALING_VIDEO_EXAMPLE 1 +#define CONFIG_TRANSCODE_AAC_EXAMPLE 1 +#define CONFIG_TRANSCODING_EXAMPLE 1 +#define CONFIG_BZLIB 0 +#define CONFIG_ICONV 0 +#define CONFIG_LIBXCB 0 +#define CONFIG_LIBXCB_SHM 0 +#define CONFIG_LIBXCB_SHAPE 0 +#define CONFIG_LIBXCB_XFIXES 0 +#define CONFIG_LZMA 0 +#define CONFIG_SCHANNEL 0 +#define CONFIG_SDL 0 +#define CONFIG_SDL2 0 +#define CONFIG_SECURETRANSPORT 0 +#define CONFIG_XLIB 1 +#define CONFIG_ZLIB 1 +#define CONFIG_AVISYNTH 0 +#define CONFIG_FREI0R 0 +#define CONFIG_LIBCDIO 0 +#define CONFIG_LIBRUBBERBAND 0 +#define CONFIG_LIBVIDSTAB 0 +#define CONFIG_LIBX264 0 +#define CONFIG_LIBX265 0 +#define CONFIG_LIBXAVS 0 +#define CONFIG_LIBXVID 0 +#define CONFIG_DECKLINK 0 +#define CONFIG_LIBFDK_AAC 0 +#define CONFIG_OPENSSL 1 +#define CONFIG_GMP 0 +#define CONFIG_LIBOPENCORE_AMRNB 0 +#define CONFIG_LIBOPENCORE_AMRWB 0 +#define CONFIG_LIBVO_AMRWBENC 0 +#define CONFIG_LIBSMBCLIENT 0 +#define CONFIG_CHROMAPRINT 0 +#define CONFIG_CRYSTALHD 0 +#define CONFIG_GCRYPT 0 +#define CONFIG_GNUTLS 0 +#define CONFIG_JNI 0 +#define CONFIG_LADSPA 0 +#define CONFIG_LIBASS 0 +#define CONFIG_LIBBLURAY 0 +#define CONFIG_LIBBS2B 0 +#define CONFIG_LIBCACA 0 +#define CONFIG_LIBCELT 0 +#define CONFIG_LIBDC1394 0 +#define CONFIG_LIBFLITE 0 +#define CONFIG_LIBFONTCONFIG 0 +#define CONFIG_LIBFREETYPE 0 +#define CONFIG_LIBFRIBIDI 0 +#define CONFIG_LIBGME 0 +#define CONFIG_LIBGSM 0 +#define CONFIG_LIBIEC61883 0 +#define CONFIG_LIBILBC 0 +#define CONFIG_LIBKVAZAAR 0 +#define CONFIG_LIBMODPLUG 0 +#define CONFIG_LIBMP3LAME 0 +#define CONFIG_LIBNUT 0 +#define CONFIG_LIBOPENCV 0 +#define CONFIG_LIBOPENH264 0 +#define CONFIG_LIBOPENJPEG 0 +#define CONFIG_LIBOPENMPT 0 +#define CONFIG_LIBOPUS 0 +#define CONFIG_LIBPULSE 0 +#define CONFIG_LIBRTMP 0 +#define CONFIG_LIBSCHROEDINGER 0 +#define CONFIG_LIBSHINE 0 +#define CONFIG_LIBSMBCLIENT 0 +#define CONFIG_LIBSNAPPY 0 +#define CONFIG_LIBSOXR 0 +#define CONFIG_LIBSPEEX 0 +#define CONFIG_LIBSSH 0 +#define CONFIG_LIBTESSERACT 0 +#define CONFIG_LIBTHEORA 0 +#define CONFIG_LIBTWOLAME 0 +#define CONFIG_LIBV4L2 0 +#define CONFIG_LIBVORBIS 0 +#define CONFIG_LIBVPX 0 +#define CONFIG_LIBWAVPACK 0 +#define CONFIG_LIBWEBP 0 +#define CONFIG_LIBZIMG 0 +#define CONFIG_LIBZMQ 0 +#define CONFIG_LIBZVBI 0 +#define CONFIG_MEDIACODEC 0 +#define CONFIG_NETCDF 0 +#define CONFIG_OPENAL 0 +#define CONFIG_OPENCL 0 +#define CONFIG_OPENGL 0 +#define CONFIG_VIDEOTOOLBOX 0 +#define CONFIG_AUDIOTOOLBOX 0 +#define CONFIG_CUDA 0 +#define CONFIG_CUVID 0 +#define CONFIG_D3D11VA 0 +#define CONFIG_DXVA2 0 +#define CONFIG_NVENC 0 +#define CONFIG_VAAPI 0 +#define CONFIG_VDA 0 +#define CONFIG_VDPAU 0 +#define CONFIG_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_XVMC 0 +#define CONFIG_LIBNPP 0 +#define CONFIG_LIBMFX 0 +#define CONFIG_MMAL 0 +#define CONFIG_OMX 0 +#define CONFIG_FTRAPV 0 +#define CONFIG_GRAY 0 +#define CONFIG_HARDCODED_TABLES 0 +#define CONFIG_OMX_RPI 0 +#define CONFIG_RUNTIME_CPUDETECT 1 +#define CONFIG_SAFE_BITSTREAM_READER 1 +#define CONFIG_SHARED 0 +#define CONFIG_SMALL 1 +#define CONFIG_STATIC 1 +#define CONFIG_SWSCALE_ALPHA 0 +#define CONFIG_GPL 0 +#define CONFIG_NONFREE 0 +#define CONFIG_VERSION3 0 +#define CONFIG_AVCODEC 1 +#define CONFIG_AVDEVICE 0 +#define CONFIG_AVFILTER 1 +#define CONFIG_AVFORMAT 1 +#define CONFIG_AVRESAMPLE 0 +#define CONFIG_AVUTIL 1 +#define CONFIG_POSTPROC 0 +#define CONFIG_SWRESAMPLE 1 +#define CONFIG_SWSCALE 1 +#define CONFIG_FFPLAY 0 +#define CONFIG_FFPROBE 0 +#define CONFIG_FFSERVER 0 +#define CONFIG_FFMPEG 0 +#define CONFIG_DCT 1 +#define CONFIG_DWT 0 +#define CONFIG_ERROR_RESILIENCE 1 +#define CONFIG_FAAN 1 +#define CONFIG_FAST_UNALIGNED 1 +#define CONFIG_FFT 1 +#define CONFIG_LSP 0 +#define CONFIG_LZO 0 +#define CONFIG_MDCT 1 +#define CONFIG_PIXELUTILS 0 +#define CONFIG_NETWORK 1 +#define CONFIG_RDFT 1 +#define CONFIG_FONTCONFIG 0 +#define CONFIG_MEMORY_POISONING 0 +#define CONFIG_NEON_CLOBBER_TEST 0 +#define CONFIG_PIC 1 +#define CONFIG_RAISE_MAJOR 0 +#define CONFIG_THUMB 0 +#define CONFIG_VALGRIND_BACKTRACE 0 +#define CONFIG_XMM_CLOBBER_TEST 0 +#define CONFIG_AANDCTTABLES 0 +#define CONFIG_AC3DSP 0 +#define CONFIG_AUDIO_FRAME_QUEUE 0 +#define CONFIG_AUDIODSP 0 +#define CONFIG_BLOCKDSP 1 +#define CONFIG_BSWAPDSP 0 +#define CONFIG_CABAC 1 +#define CONFIG_DIRAC_PARSE 0 +#define CONFIG_DVPROFILE 0 +#define CONFIG_EXIF 0 +#define CONFIG_FAANDCT 1 +#define CONFIG_FAANIDCT 1 +#define CONFIG_FDCTDSP 1 +#define CONFIG_FLACDSP 1 +#define CONFIG_FMTCONVERT 0 +#define CONFIG_FRAME_THREAD_ENCODER 1 +#define CONFIG_G722DSP 0 +#define CONFIG_GOLOMB 1 +#define CONFIG_GPLV3 0 +#define CONFIG_H263DSP 1 +#define CONFIG_H264CHROMA 1 +#define CONFIG_H264DSP 1 +#define CONFIG_H264PARSE 1 +#define CONFIG_H264PRED 1 +#define CONFIG_H264QPEL 1 +#define CONFIG_HPELDSP 1 +#define CONFIG_HUFFMAN 1 +#define CONFIG_HUFFYUVDSP 0 +#define CONFIG_HUFFYUVENCDSP 0 +#define CONFIG_IDCTDSP 1 +#define CONFIG_IIRFILTER 0 +#define CONFIG_MDCT15 1 +#define CONFIG_INTRAX8 0 +#define CONFIG_ISO_MEDIA 1 +#define CONFIG_IVIDSP 0 +#define CONFIG_JPEGTABLES 0 +#define CONFIG_LGPLV3 0 +#define CONFIG_LIBX262 0 +#define CONFIG_LLAUDDSP 0 +#define CONFIG_LLVIDDSP 0 +#define CONFIG_LLVIDENCDSP 1 +#define CONFIG_LPC 0 +#define CONFIG_LZF 0 +#define CONFIG_ME_CMP 1 +#define CONFIG_MPEG_ER 1 +#define CONFIG_MPEGAUDIO 1 +#define CONFIG_MPEGAUDIODSP 1 +#define CONFIG_MPEGVIDEO 1 +#define CONFIG_MPEGVIDEOENC 0 +#define CONFIG_MSS34DSP 0 +#define CONFIG_PIXBLOCKDSP 1 +#define CONFIG_QPELDSP 1 +#define CONFIG_QSV 0 +#define CONFIG_QSVDEC 0 +#define CONFIG_QSVENC 0 +#define CONFIG_RANGECODER 0 +#define CONFIG_RIFFDEC 1 +#define CONFIG_RIFFENC 1 +#define CONFIG_RTPDEC 0 +#define CONFIG_RTPENC_CHAIN 1 +#define CONFIG_RV34DSP 0 +#define CONFIG_SINEWIN 1 +#define CONFIG_SNAPPY 0 +#define CONFIG_SRTP 0 +#define CONFIG_STARTCODE 1 +#define CONFIG_TEXTUREDSP 0 +#define CONFIG_TEXTUREDSPENC 0 +#define CONFIG_TPELDSP 0 +#define CONFIG_VAAPI_ENCODE 0 +#define CONFIG_VC1DSP 0 +#define CONFIG_VIDEODSP 1 +#define CONFIG_VP3DSP 1 +#define CONFIG_VP56DSP 1 +#define CONFIG_VP8DSP 0 +#define CONFIG_VT_BT2020 0 +#define CONFIG_WMA_FREQS 0 +#define CONFIG_WMV2DSP 0 +#define CONFIG_AAC_ADTSTOASC_BSF 1 +#define CONFIG_CHOMP_BSF 0 +#define CONFIG_DUMP_EXTRADATA_BSF 0 +#define CONFIG_DCA_CORE_BSF 0 +#define CONFIG_EXTRACT_EXTRADATA_BSF 1 +#define CONFIG_H264_MP4TOANNEXB_BSF 1 +#define CONFIG_HEVC_MP4TOANNEXB_BSF 0 +#define CONFIG_IMX_DUMP_HEADER_BSF 0 +#define CONFIG_MJPEG2JPEG_BSF 0 +#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0 +#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0 +#define CONFIG_MPEG4_UNPACK_BFRAMES_BSF 0 +#define CONFIG_MOV2TEXTSUB_BSF 0 +#define CONFIG_NOISE_BSF 0 +#define CONFIG_REMOVE_EXTRADATA_BSF 0 +#define CONFIG_TEXT2MOVSUB_BSF 0 +#define CONFIG_VP9_SUPERFRAME_BSF 0 +#define CONFIG_AASC_DECODER 0 +#define CONFIG_AIC_DECODER 0 +#define CONFIG_ALIAS_PIX_DECODER 0 +#define CONFIG_AMV_DECODER 0 +#define CONFIG_ANM_DECODER 0 +#define CONFIG_ANSI_DECODER 0 +#define CONFIG_APNG_DECODER 0 +#define CONFIG_ASV1_DECODER 0 +#define CONFIG_ASV2_DECODER 0 +#define CONFIG_AURA_DECODER 0 +#define CONFIG_AURA2_DECODER 0 +#define CONFIG_AVRP_DECODER 0 +#define CONFIG_AVRN_DECODER 0 +#define CONFIG_AVS_DECODER 0 +#define CONFIG_AVUI_DECODER 0 +#define CONFIG_AYUV_DECODER 0 +#define CONFIG_BETHSOFTVID_DECODER 0 +#define CONFIG_BFI_DECODER 0 +#define CONFIG_BINK_DECODER 0 +#define CONFIG_BMP_DECODER 0 +#define CONFIG_BMV_VIDEO_DECODER 0 +#define CONFIG_BRENDER_PIX_DECODER 0 +#define CONFIG_C93_DECODER 0 +#define CONFIG_CAVS_DECODER 0 +#define CONFIG_CDGRAPHICS_DECODER 0 +#define CONFIG_CDXL_DECODER 0 +#define CONFIG_CFHD_DECODER 0 +#define CONFIG_CINEPAK_DECODER 0 +#define CONFIG_CLEARVIDEO_DECODER 0 +#define CONFIG_CLJR_DECODER 0 +#define CONFIG_CLLC_DECODER 0 +#define CONFIG_COMFORTNOISE_DECODER 0 +#define CONFIG_CPIA_DECODER 0 +#define CONFIG_CSCD_DECODER 0 +#define CONFIG_CYUV_DECODER 0 +#define CONFIG_DDS_DECODER 0 +#define CONFIG_DFA_DECODER 0 +#define CONFIG_DIRAC_DECODER 0 +#define CONFIG_DNXHD_DECODER 0 +#define CONFIG_DPX_DECODER 0 +#define CONFIG_DSICINVIDEO_DECODER 0 +#define CONFIG_DVAUDIO_DECODER 0 +#define CONFIG_DVVIDEO_DECODER 0 +#define CONFIG_DXA_DECODER 0 +#define CONFIG_DXTORY_DECODER 0 +#define CONFIG_DXV_DECODER 0 +#define CONFIG_EACMV_DECODER 0 +#define CONFIG_EAMAD_DECODER 0 +#define CONFIG_EATGQ_DECODER 0 +#define CONFIG_EATGV_DECODER 0 +#define CONFIG_EATQI_DECODER 0 +#define CONFIG_EIGHTBPS_DECODER 0 +#define CONFIG_EIGHTSVX_EXP_DECODER 0 +#define CONFIG_EIGHTSVX_FIB_DECODER 0 +#define CONFIG_ESCAPE124_DECODER 0 +#define CONFIG_ESCAPE130_DECODER 0 +#define CONFIG_EXR_DECODER 0 +#define CONFIG_FFV1_DECODER 0 +#define CONFIG_FFVHUFF_DECODER 0 +#define CONFIG_FIC_DECODER 0 +#define CONFIG_FLASHSV_DECODER 0 +#define CONFIG_FLASHSV2_DECODER 0 +#define CONFIG_FLIC_DECODER 0 +#define CONFIG_FLV_DECODER 1 +#define CONFIG_FMVC_DECODER 0 +#define CONFIG_FOURXM_DECODER 0 +#define CONFIG_FRAPS_DECODER 0 +#define CONFIG_FRWU_DECODER 0 +#define CONFIG_G2M_DECODER 0 +#define CONFIG_GIF_DECODER 0 +#define CONFIG_H261_DECODER 0 +#define CONFIG_H263_DECODER 1 +#define CONFIG_H263I_DECODER 0 +#define CONFIG_H263P_DECODER 0 +#define CONFIG_H264_DECODER 1 +#define CONFIG_H264_CRYSTALHD_DECODER 0 +#define CONFIG_H264_MEDIACODEC_DECODER 0 +#define CONFIG_H264_MMAL_DECODER 0 +#define CONFIG_H264_QSV_DECODER 0 +#define CONFIG_H264_VDA_DECODER 0 +#define CONFIG_H264_VDPAU_DECODER 0 +#define CONFIG_HAP_DECODER 0 +#define CONFIG_HEVC_DECODER 0 +#define CONFIG_HEVC_QSV_DECODER 0 +#define CONFIG_HNM4_VIDEO_DECODER 0 +#define CONFIG_HQ_HQA_DECODER 0 +#define CONFIG_HQX_DECODER 0 +#define CONFIG_HUFFYUV_DECODER 0 +#define CONFIG_IDCIN_DECODER 0 +#define CONFIG_IFF_ILBM_DECODER 0 +#define CONFIG_INDEO2_DECODER 0 +#define CONFIG_INDEO3_DECODER 0 +#define CONFIG_INDEO4_DECODER 0 +#define CONFIG_INDEO5_DECODER 0 +#define CONFIG_INTERPLAY_VIDEO_DECODER 0 +#define CONFIG_JPEG2000_DECODER 0 +#define CONFIG_JPEGLS_DECODER 0 +#define CONFIG_JV_DECODER 0 +#define CONFIG_KGV1_DECODER 0 +#define CONFIG_KMVC_DECODER 0 +#define CONFIG_LAGARITH_DECODER 0 +#define CONFIG_LOCO_DECODER 0 +#define CONFIG_M101_DECODER 0 +#define CONFIG_MAGICYUV_DECODER 0 +#define CONFIG_MDEC_DECODER 0 +#define CONFIG_MIMIC_DECODER 0 +#define CONFIG_MJPEG_DECODER 0 +#define CONFIG_MJPEGB_DECODER 0 +#define CONFIG_MMVIDEO_DECODER 0 +#define CONFIG_MOTIONPIXELS_DECODER 0 +#define CONFIG_MPEG_XVMC_DECODER 0 +#define CONFIG_MPEG1VIDEO_DECODER 0 +#define CONFIG_MPEG2VIDEO_DECODER 0 +#define CONFIG_MPEG4_DECODER 1 +#define CONFIG_MPEG4_CRYSTALHD_DECODER 0 +#define CONFIG_MPEG4_MMAL_DECODER 0 +#define CONFIG_MPEG4_VDPAU_DECODER 0 +#define CONFIG_MPEGVIDEO_DECODER 0 +#define CONFIG_MPEG_VDPAU_DECODER 0 +#define CONFIG_MPEG1_VDPAU_DECODER 0 +#define CONFIG_MPEG2_MMAL_DECODER 0 +#define CONFIG_MPEG2_CRYSTALHD_DECODER 0 +#define CONFIG_MPEG2_QSV_DECODER 0 +#define CONFIG_MSA1_DECODER 0 +#define CONFIG_MSMPEG4V1_DECODER 0 +#define CONFIG_MSMPEG4V2_DECODER 0 +#define CONFIG_MSMPEG4V3_DECODER 0 +#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0 +#define CONFIG_MSRLE_DECODER 0 +#define CONFIG_MSS1_DECODER 0 +#define CONFIG_MSS2_DECODER 0 +#define CONFIG_MSVIDEO1_DECODER 0 +#define CONFIG_MSZH_DECODER 0 +#define CONFIG_MTS2_DECODER 0 +#define CONFIG_MVC1_DECODER 0 +#define CONFIG_MVC2_DECODER 0 +#define CONFIG_MXPEG_DECODER 0 +#define CONFIG_NUV_DECODER 0 +#define CONFIG_PAF_VIDEO_DECODER 0 +#define CONFIG_PAM_DECODER 0 +#define CONFIG_PBM_DECODER 0 +#define CONFIG_PCX_DECODER 0 +#define CONFIG_PGM_DECODER 0 +#define CONFIG_PGMYUV_DECODER 0 +#define CONFIG_PICTOR_DECODER 0 +#define CONFIG_PIXLET_DECODER 0 +#define CONFIG_PNG_DECODER 0 +#define CONFIG_PPM_DECODER 0 +#define CONFIG_PRORES_DECODER 0 +#define CONFIG_PRORES_LGPL_DECODER 0 +#define CONFIG_PSD_DECODER 0 +#define CONFIG_PTX_DECODER 0 +#define CONFIG_QDRAW_DECODER 0 +#define CONFIG_QPEG_DECODER 0 +#define CONFIG_QTRLE_DECODER 0 +#define CONFIG_R10K_DECODER 0 +#define CONFIG_R210_DECODER 0 +#define CONFIG_RAWVIDEO_DECODER 0 +#define CONFIG_RL2_DECODER 0 +#define CONFIG_ROQ_DECODER 0 +#define CONFIG_RPZA_DECODER 0 +#define CONFIG_RSCC_DECODER 0 +#define CONFIG_RV10_DECODER 0 +#define CONFIG_RV20_DECODER 0 +#define CONFIG_RV30_DECODER 0 +#define CONFIG_RV40_DECODER 0 +#define CONFIG_S302M_DECODER 0 +#define CONFIG_SANM_DECODER 0 +#define CONFIG_SCPR_DECODER 0 +#define CONFIG_SCREENPRESSO_DECODER 0 +#define CONFIG_SDX2_DPCM_DECODER 0 +#define CONFIG_SGI_DECODER 0 +#define CONFIG_SGIRLE_DECODER 0 +#define CONFIG_SHEERVIDEO_DECODER 0 +#define CONFIG_SMACKER_DECODER 0 +#define CONFIG_SMC_DECODER 0 +#define CONFIG_SMVJPEG_DECODER 0 +#define CONFIG_SNOW_DECODER 0 +#define CONFIG_SP5X_DECODER 0 +#define CONFIG_SPEEDHQ_DECODER 0 +#define CONFIG_SUNRAST_DECODER 0 +#define CONFIG_SVQ1_DECODER 0 +#define CONFIG_SVQ3_DECODER 0 +#define CONFIG_TARGA_DECODER 0 +#define CONFIG_TARGA_Y216_DECODER 0 +#define CONFIG_TDSC_DECODER 0 +#define CONFIG_THEORA_DECODER 0 +#define CONFIG_THP_DECODER 0 +#define CONFIG_TIERTEXSEQVIDEO_DECODER 0 +#define CONFIG_TIFF_DECODER 0 +#define CONFIG_TMV_DECODER 0 +#define CONFIG_TRUEMOTION1_DECODER 0 +#define CONFIG_TRUEMOTION2_DECODER 0 +#define CONFIG_TRUEMOTION2RT_DECODER 0 +#define CONFIG_TSCC_DECODER 0 +#define CONFIG_TSCC2_DECODER 0 +#define CONFIG_TXD_DECODER 0 +#define CONFIG_ULTI_DECODER 0 +#define CONFIG_UTVIDEO_DECODER 0 +#define CONFIG_V210_DECODER 0 +#define CONFIG_V210X_DECODER 0 +#define CONFIG_V308_DECODER 0 +#define CONFIG_V408_DECODER 0 +#define CONFIG_V410_DECODER 0 +#define CONFIG_VB_DECODER 0 +#define CONFIG_VBLE_DECODER 0 +#define CONFIG_VC1_DECODER 0 +#define CONFIG_VC1_CRYSTALHD_DECODER 0 +#define CONFIG_VC1_VDPAU_DECODER 0 +#define CONFIG_VC1IMAGE_DECODER 0 +#define CONFIG_VC1_MMAL_DECODER 0 +#define CONFIG_VC1_QSV_DECODER 0 +#define CONFIG_VCR1_DECODER 0 +#define CONFIG_VMDVIDEO_DECODER 0 +#define CONFIG_VMNC_DECODER 0 +#define CONFIG_VP3_DECODER 0 +#define CONFIG_VP5_DECODER 0 +#define CONFIG_VP6_DECODER 1 +#define CONFIG_VP6A_DECODER 0 +#define CONFIG_VP6F_DECODER 1 +#define CONFIG_VP7_DECODER 0 +#define CONFIG_VP8_DECODER 0 +#define CONFIG_VP9_DECODER 0 +#define CONFIG_VQA_DECODER 0 +#define CONFIG_WEBP_DECODER 0 +#define CONFIG_WMV1_DECODER 0 +#define CONFIG_WMV2_DECODER 0 +#define CONFIG_WMV3_DECODER 0 +#define CONFIG_WMV3_CRYSTALHD_DECODER 0 +#define CONFIG_WMV3_VDPAU_DECODER 0 +#define CONFIG_WMV3IMAGE_DECODER 0 +#define CONFIG_WNV1_DECODER 0 +#define CONFIG_XAN_WC3_DECODER 0 +#define CONFIG_XAN_WC4_DECODER 0 +#define CONFIG_XBM_DECODER 0 +#define CONFIG_XFACE_DECODER 0 +#define CONFIG_XL_DECODER 0 +#define CONFIG_XPM_DECODER 0 +#define CONFIG_XWD_DECODER 0 +#define CONFIG_Y41P_DECODER 0 +#define CONFIG_YLC_DECODER 0 +#define CONFIG_YOP_DECODER 0 +#define CONFIG_YUV4_DECODER 0 +#define CONFIG_ZERO12V_DECODER 0 +#define CONFIG_ZEROCODEC_DECODER 0 +#define CONFIG_ZLIB_DECODER 0 +#define CONFIG_ZMBV_DECODER 0 +#define CONFIG_AAC_DECODER 1 +#define CONFIG_AAC_FIXED_DECODER 0 +#define CONFIG_AAC_LATM_DECODER 1 +#define CONFIG_AC3_DECODER 0 +#define CONFIG_AC3_FIXED_DECODER 0 +#define CONFIG_ALAC_DECODER 0 +#define CONFIG_ALS_DECODER 0 +#define CONFIG_AMRNB_DECODER 0 +#define CONFIG_AMRWB_DECODER 0 +#define CONFIG_APE_DECODER 0 +#define CONFIG_ATRAC1_DECODER 0 +#define CONFIG_ATRAC3_DECODER 0 +#define CONFIG_ATRAC3AL_DECODER 0 +#define CONFIG_ATRAC3P_DECODER 0 +#define CONFIG_ATRAC3PAL_DECODER 0 +#define CONFIG_BINKAUDIO_DCT_DECODER 0 +#define CONFIG_BINKAUDIO_RDFT_DECODER 0 +#define CONFIG_BMV_AUDIO_DECODER 0 +#define CONFIG_COOK_DECODER 0 +#define CONFIG_DCA_DECODER 0 +#define CONFIG_DSD_LSBF_DECODER 0 +#define CONFIG_DSD_MSBF_DECODER 0 +#define CONFIG_DSD_LSBF_PLANAR_DECODER 0 +#define CONFIG_DSD_MSBF_PLANAR_DECODER 0 +#define CONFIG_DSICINAUDIO_DECODER 0 +#define CONFIG_DSS_SP_DECODER 0 +#define CONFIG_DST_DECODER 0 +#define CONFIG_EAC3_DECODER 0 +#define CONFIG_EVRC_DECODER 0 +#define CONFIG_FFWAVESYNTH_DECODER 0 +#define CONFIG_FLAC_DECODER 1 +#define CONFIG_G723_1_DECODER 0 +#define CONFIG_G729_DECODER 0 +#define CONFIG_GSM_DECODER 0 +#define CONFIG_GSM_MS_DECODER 0 +#define CONFIG_IAC_DECODER 0 +#define CONFIG_IMC_DECODER 0 +#define CONFIG_INTERPLAY_ACM_DECODER 0 +#define CONFIG_MACE3_DECODER 0 +#define CONFIG_MACE6_DECODER 0 +#define CONFIG_METASOUND_DECODER 0 +#define CONFIG_MLP_DECODER 0 +#define CONFIG_MP1_DECODER 0 +#define CONFIG_MP1FLOAT_DECODER 0 +#define CONFIG_MP2_DECODER 0 +#define CONFIG_MP2FLOAT_DECODER 0 +#define CONFIG_MP3_DECODER 1 +#define CONFIG_MP3FLOAT_DECODER 1 +#define CONFIG_MP3ADU_DECODER 1 +#define CONFIG_MP3ADUFLOAT_DECODER 1 +#define CONFIG_MP3ON4_DECODER 1 +#define CONFIG_MP3ON4FLOAT_DECODER 1 +#define CONFIG_MPC7_DECODER 0 +#define CONFIG_MPC8_DECODER 0 +#define CONFIG_NELLYMOSER_DECODER 0 +#define CONFIG_ON2AVC_DECODER 0 +#define CONFIG_OPUS_DECODER 0 +#define CONFIG_PAF_AUDIO_DECODER 0 +#define CONFIG_QCELP_DECODER 0 +#define CONFIG_QDM2_DECODER 0 +#define CONFIG_QDMC_DECODER 0 +#define CONFIG_RA_144_DECODER 0 +#define CONFIG_RA_288_DECODER 0 +#define CONFIG_RALF_DECODER 0 +#define CONFIG_SHORTEN_DECODER 0 +#define CONFIG_SIPR_DECODER 0 +#define CONFIG_SMACKAUD_DECODER 0 +#define CONFIG_SONIC_DECODER 0 +#define CONFIG_TAK_DECODER 0 +#define CONFIG_TRUEHD_DECODER 0 +#define CONFIG_TRUESPEECH_DECODER 0 +#define CONFIG_TTA_DECODER 0 +#define CONFIG_TWINVQ_DECODER 0 +#define CONFIG_VMDAUDIO_DECODER 0 +#define CONFIG_VORBIS_DECODER 0 +#define CONFIG_WAVPACK_DECODER 1 +#define CONFIG_WMALOSSLESS_DECODER 0 +#define CONFIG_WMAPRO_DECODER 0 +#define CONFIG_WMAV1_DECODER 0 +#define CONFIG_WMAV2_DECODER 0 +#define CONFIG_WMAVOICE_DECODER 0 +#define CONFIG_WS_SND1_DECODER 0 +#define CONFIG_XMA1_DECODER 0 +#define CONFIG_XMA2_DECODER 0 +#define CONFIG_PCM_ALAW_DECODER 0 +#define CONFIG_PCM_BLURAY_DECODER 0 +#define CONFIG_PCM_DVD_DECODER 0 +#define CONFIG_PCM_F16LE_DECODER 0 +#define CONFIG_PCM_F24LE_DECODER 0 +#define CONFIG_PCM_F32BE_DECODER 0 +#define CONFIG_PCM_F32LE_DECODER 0 +#define CONFIG_PCM_F64BE_DECODER 0 +#define CONFIG_PCM_F64LE_DECODER 0 +#define CONFIG_PCM_LXF_DECODER 0 +#define CONFIG_PCM_MULAW_DECODER 0 +#define CONFIG_PCM_S8_DECODER 0 +#define CONFIG_PCM_S8_PLANAR_DECODER 0 +#define CONFIG_PCM_S16BE_DECODER 0 +#define CONFIG_PCM_S16BE_PLANAR_DECODER 0 +#define CONFIG_PCM_S16LE_DECODER 1 +#define CONFIG_PCM_S16LE_PLANAR_DECODER 0 +#define CONFIG_PCM_S24BE_DECODER 0 +#define CONFIG_PCM_S24DAUD_DECODER 0 +#define CONFIG_PCM_S24LE_DECODER 0 +#define CONFIG_PCM_S24LE_PLANAR_DECODER 0 +#define CONFIG_PCM_S32BE_DECODER 0 +#define CONFIG_PCM_S32LE_DECODER 0 +#define CONFIG_PCM_S32LE_PLANAR_DECODER 0 +#define CONFIG_PCM_S64BE_DECODER 0 +#define CONFIG_PCM_S64LE_DECODER 0 +#define CONFIG_PCM_U8_DECODER 0 +#define CONFIG_PCM_U16BE_DECODER 0 +#define CONFIG_PCM_U16LE_DECODER 0 +#define CONFIG_PCM_U24BE_DECODER 0 +#define CONFIG_PCM_U24LE_DECODER 0 +#define CONFIG_PCM_U32BE_DECODER 0 +#define CONFIG_PCM_U32LE_DECODER 0 +#define CONFIG_PCM_ZORK_DECODER 0 +#define CONFIG_INTERPLAY_DPCM_DECODER 0 +#define CONFIG_ROQ_DPCM_DECODER 0 +#define CONFIG_SOL_DPCM_DECODER 0 +#define CONFIG_XAN_DPCM_DECODER 0 +#define CONFIG_ADPCM_4XM_DECODER 0 +#define CONFIG_ADPCM_ADX_DECODER 0 +#define CONFIG_ADPCM_AFC_DECODER 0 +#define CONFIG_ADPCM_AICA_DECODER 0 +#define CONFIG_ADPCM_CT_DECODER 0 +#define CONFIG_ADPCM_DTK_DECODER 0 +#define CONFIG_ADPCM_EA_DECODER 0 +#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0 +#define CONFIG_ADPCM_EA_R1_DECODER 0 +#define CONFIG_ADPCM_EA_R2_DECODER 0 +#define CONFIG_ADPCM_EA_R3_DECODER 0 +#define CONFIG_ADPCM_EA_XAS_DECODER 0 +#define CONFIG_ADPCM_G722_DECODER 0 +#define CONFIG_ADPCM_G726_DECODER 0 +#define CONFIG_ADPCM_G726LE_DECODER 0 +#define CONFIG_ADPCM_IMA_AMV_DECODER 0 +#define CONFIG_ADPCM_IMA_APC_DECODER 0 +#define CONFIG_ADPCM_IMA_DAT4_DECODER 0 +#define CONFIG_ADPCM_IMA_DK3_DECODER 0 +#define CONFIG_ADPCM_IMA_DK4_DECODER 0 +#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0 +#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0 +#define CONFIG_ADPCM_IMA_ISS_DECODER 0 +#define CONFIG_ADPCM_IMA_OKI_DECODER 0 +#define CONFIG_ADPCM_IMA_QT_DECODER 0 +#define CONFIG_ADPCM_IMA_RAD_DECODER 0 +#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0 +#define CONFIG_ADPCM_IMA_WAV_DECODER 0 +#define CONFIG_ADPCM_IMA_WS_DECODER 0 +#define CONFIG_ADPCM_MS_DECODER 0 +#define CONFIG_ADPCM_MTAF_DECODER 0 +#define CONFIG_ADPCM_PSX_DECODER 0 +#define CONFIG_ADPCM_SBPRO_2_DECODER 0 +#define CONFIG_ADPCM_SBPRO_3_DECODER 0 +#define CONFIG_ADPCM_SBPRO_4_DECODER 0 +#define CONFIG_ADPCM_SWF_DECODER 0 +#define CONFIG_ADPCM_THP_DECODER 0 +#define CONFIG_ADPCM_THP_LE_DECODER 0 +#define CONFIG_ADPCM_VIMA_DECODER 0 +#define CONFIG_ADPCM_XA_DECODER 0 +#define CONFIG_ADPCM_YAMAHA_DECODER 0 +#define CONFIG_SSA_DECODER 0 +#define CONFIG_ASS_DECODER 0 +#define CONFIG_CCAPTION_DECODER 0 +#define CONFIG_DVBSUB_DECODER 0 +#define CONFIG_DVDSUB_DECODER 0 +#define CONFIG_JACOSUB_DECODER 0 +#define CONFIG_MICRODVD_DECODER 0 +#define CONFIG_MOVTEXT_DECODER 0 +#define CONFIG_MPL2_DECODER 0 +#define CONFIG_PGSSUB_DECODER 0 +#define CONFIG_PJS_DECODER 0 +#define CONFIG_REALTEXT_DECODER 0 +#define CONFIG_SAMI_DECODER 0 +#define CONFIG_SRT_DECODER 0 +#define CONFIG_STL_DECODER 0 +#define CONFIG_SUBRIP_DECODER 0 +#define CONFIG_SUBVIEWER_DECODER 0 +#define CONFIG_SUBVIEWER1_DECODER 0 +#define CONFIG_TEXT_DECODER 0 +#define CONFIG_VPLAYER_DECODER 0 +#define CONFIG_WEBVTT_DECODER 0 +#define CONFIG_XSUB_DECODER 0 +#define CONFIG_AAC_AT_DECODER 0 +#define CONFIG_AC3_AT_DECODER 0 +#define CONFIG_ADPCM_IMA_QT_AT_DECODER 0 +#define CONFIG_ALAC_AT_DECODER 0 +#define CONFIG_AMR_NB_AT_DECODER 0 +#define CONFIG_EAC3_AT_DECODER 0 +#define CONFIG_GSM_MS_AT_DECODER 0 +#define CONFIG_ILBC_AT_DECODER 0 +#define CONFIG_MP1_AT_DECODER 0 +#define CONFIG_MP2_AT_DECODER 0 +#define CONFIG_MP3_AT_DECODER 0 +#define CONFIG_PCM_ALAW_AT_DECODER 0 +#define CONFIG_PCM_MULAW_AT_DECODER 0 +#define CONFIG_QDMC_AT_DECODER 0 +#define CONFIG_QDM2_AT_DECODER 0 +#define CONFIG_LIBCELT_DECODER 0 +#define CONFIG_LIBFDK_AAC_DECODER 0 +#define CONFIG_LIBGSM_DECODER 0 +#define CONFIG_LIBGSM_MS_DECODER 0 +#define CONFIG_LIBILBC_DECODER 0 +#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0 +#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0 +#define CONFIG_LIBOPENJPEG_DECODER 0 +#define CONFIG_LIBOPUS_DECODER 0 +#define CONFIG_LIBSCHROEDINGER_DECODER 0 +#define CONFIG_LIBSPEEX_DECODER 0 +#define CONFIG_LIBVORBIS_DECODER 0 +#define CONFIG_LIBVPX_VP8_DECODER 0 +#define CONFIG_LIBVPX_VP9_DECODER 0 +#define CONFIG_LIBZVBI_TELETEXT_DECODER 0 +#define CONFIG_BINTEXT_DECODER 0 +#define CONFIG_XBIN_DECODER 0 +#define CONFIG_IDF_DECODER 0 +#define CONFIG_LIBOPENH264_DECODER 0 +#define CONFIG_H264_CUVID_DECODER 0 +#define CONFIG_HEVC_CUVID_DECODER 0 +#define CONFIG_HEVC_MEDIACODEC_DECODER 0 +#define CONFIG_MJPEG_CUVID_DECODER 0 +#define CONFIG_MPEG1_CUVID_DECODER 0 +#define CONFIG_MPEG2_CUVID_DECODER 0 +#define CONFIG_MPEG4_CUVID_DECODER 0 +#define CONFIG_MPEG4_MEDIACODEC_DECODER 0 +#define CONFIG_VC1_CUVID_DECODER 0 +#define CONFIG_VP8_CUVID_DECODER 0 +#define CONFIG_VP8_MEDIACODEC_DECODER 0 +#define CONFIG_VP8_QSV_DECODER 0 +#define CONFIG_VP9_CUVID_DECODER 0 +#define CONFIG_VP9_MEDIACODEC_DECODER 0 +#define CONFIG_AA_DEMUXER 0 +#define CONFIG_AAC_DEMUXER 1 +#define CONFIG_AC3_DEMUXER 0 +#define CONFIG_ACM_DEMUXER 0 +#define CONFIG_ACT_DEMUXER 0 +#define CONFIG_ADF_DEMUXER 0 +#define CONFIG_ADP_DEMUXER 0 +#define CONFIG_ADS_DEMUXER 0 +#define CONFIG_ADX_DEMUXER 0 +#define CONFIG_AEA_DEMUXER 0 +#define CONFIG_AFC_DEMUXER 0 +#define CONFIG_AIFF_DEMUXER 0 +#define CONFIG_AIX_DEMUXER 0 +#define CONFIG_AMR_DEMUXER 0 +#define CONFIG_ANM_DEMUXER 0 +#define CONFIG_APC_DEMUXER 0 +#define CONFIG_APE_DEMUXER 0 +#define CONFIG_APNG_DEMUXER 0 +#define CONFIG_AQTITLE_DEMUXER 0 +#define CONFIG_ASF_DEMUXER 0 +#define CONFIG_ASF_O_DEMUXER 0 +#define CONFIG_ASS_DEMUXER 0 +#define CONFIG_AST_DEMUXER 0 +#define CONFIG_AU_DEMUXER 0 +#define CONFIG_AVI_DEMUXER 0 +#define CONFIG_AVISYNTH_DEMUXER 0 +#define CONFIG_AVR_DEMUXER 0 +#define CONFIG_AVS_DEMUXER 0 +#define CONFIG_BETHSOFTVID_DEMUXER 0 +#define CONFIG_BFI_DEMUXER 0 +#define CONFIG_BINTEXT_DEMUXER 0 +#define CONFIG_BINK_DEMUXER 0 +#define CONFIG_BIT_DEMUXER 0 +#define CONFIG_BMV_DEMUXER 0 +#define CONFIG_BFSTM_DEMUXER 0 +#define CONFIG_BRSTM_DEMUXER 0 +#define CONFIG_BOA_DEMUXER 0 +#define CONFIG_C93_DEMUXER 0 +#define CONFIG_CAF_DEMUXER 0 +#define CONFIG_CAVSVIDEO_DEMUXER 0 +#define CONFIG_CDG_DEMUXER 0 +#define CONFIG_CDXL_DEMUXER 0 +#define CONFIG_CINE_DEMUXER 0 +#define CONFIG_CONCAT_DEMUXER 1 +#define CONFIG_DATA_DEMUXER 1 +#define CONFIG_DAUD_DEMUXER 0 +#define CONFIG_DCSTR_DEMUXER 0 +#define CONFIG_DFA_DEMUXER 0 +#define CONFIG_DIRAC_DEMUXER 0 +#define CONFIG_DNXHD_DEMUXER 0 +#define CONFIG_DSF_DEMUXER 0 +#define CONFIG_DSICIN_DEMUXER 0 +#define CONFIG_DSS_DEMUXER 0 +#define CONFIG_DTS_DEMUXER 0 +#define CONFIG_DTSHD_DEMUXER 0 +#define CONFIG_DV_DEMUXER 0 +#define CONFIG_DVBSUB_DEMUXER 0 +#define CONFIG_DVBTXT_DEMUXER 0 +#define CONFIG_DXA_DEMUXER 0 +#define CONFIG_EA_DEMUXER 0 +#define CONFIG_EA_CDATA_DEMUXER 0 +#define CONFIG_EAC3_DEMUXER 0 +#define CONFIG_EPAF_DEMUXER 0 +#define CONFIG_FFM_DEMUXER 0 +#define CONFIG_FFMETADATA_DEMUXER 0 +#define CONFIG_FILMSTRIP_DEMUXER 0 +#define CONFIG_FLAC_DEMUXER 1 +#define CONFIG_FLIC_DEMUXER 0 +#define CONFIG_FLV_DEMUXER 1 +#define CONFIG_LIVE_FLV_DEMUXER 1 +#define CONFIG_FOURXM_DEMUXER 0 +#define CONFIG_FRM_DEMUXER 0 +#define CONFIG_FSB_DEMUXER 0 +#define CONFIG_G722_DEMUXER 0 +#define CONFIG_G723_1_DEMUXER 0 +#define CONFIG_G729_DEMUXER 0 +#define CONFIG_GENH_DEMUXER 0 +#define CONFIG_GIF_DEMUXER 0 +#define CONFIG_GSM_DEMUXER 0 +#define CONFIG_GXF_DEMUXER 0 +#define CONFIG_H261_DEMUXER 0 +#define CONFIG_H263_DEMUXER 0 +#define CONFIG_H264_DEMUXER 0 +#define CONFIG_HEVC_DEMUXER 1 +#define CONFIG_HLS_DEMUXER 1 +#define CONFIG_HNM_DEMUXER 0 +#define CONFIG_ICO_DEMUXER 0 +#define CONFIG_IDCIN_DEMUXER 0 +#define CONFIG_IDF_DEMUXER 0 +#define CONFIG_IFF_DEMUXER 0 +#define CONFIG_ILBC_DEMUXER 0 +#define CONFIG_IMAGE2_DEMUXER 0 +#define CONFIG_IMAGE2PIPE_DEMUXER 0 +#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0 +#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0 +#define CONFIG_INGENIENT_DEMUXER 0 +#define CONFIG_IPMOVIE_DEMUXER 0 +#define CONFIG_IRCAM_DEMUXER 0 +#define CONFIG_ISS_DEMUXER 0 +#define CONFIG_IV8_DEMUXER 0 +#define CONFIG_IVF_DEMUXER 0 +#define CONFIG_IVR_DEMUXER 0 +#define CONFIG_JACOSUB_DEMUXER 0 +#define CONFIG_JV_DEMUXER 0 +#define CONFIG_LMLM4_DEMUXER 0 +#define CONFIG_LOAS_DEMUXER 0 +#define CONFIG_LRC_DEMUXER 0 +#define CONFIG_LVF_DEMUXER 0 +#define CONFIG_LXF_DEMUXER 0 +#define CONFIG_M4V_DEMUXER 0 +#define CONFIG_MATROSKA_DEMUXER 0 +#define CONFIG_MGSTS_DEMUXER 0 +#define CONFIG_MICRODVD_DEMUXER 0 +#define CONFIG_MJPEG_DEMUXER 0 +#define CONFIG_MJPEG_2000_DEMUXER 0 +#define CONFIG_MLP_DEMUXER 0 +#define CONFIG_MLV_DEMUXER 0 +#define CONFIG_MM_DEMUXER 0 +#define CONFIG_MMF_DEMUXER 0 +#define CONFIG_MOV_DEMUXER 1 +#define CONFIG_MP3_DEMUXER 1 +#define CONFIG_MPC_DEMUXER 0 +#define CONFIG_MPC8_DEMUXER 0 +#define CONFIG_MPEGPS_DEMUXER 1 +#define CONFIG_MPEGTS_DEMUXER 1 +#define CONFIG_MPEGTSRAW_DEMUXER 0 +#define CONFIG_MPEGVIDEO_DEMUXER 1 +#define CONFIG_MPJPEG_DEMUXER 0 +#define CONFIG_MPL2_DEMUXER 0 +#define CONFIG_MPSUB_DEMUXER 0 +#define CONFIG_MSF_DEMUXER 0 +#define CONFIG_MSNWC_TCP_DEMUXER 0 +#define CONFIG_MTAF_DEMUXER 0 +#define CONFIG_MTV_DEMUXER 0 +#define CONFIG_MUSX_DEMUXER 0 +#define CONFIG_MV_DEMUXER 0 +#define CONFIG_MVI_DEMUXER 0 +#define CONFIG_MXF_DEMUXER 0 +#define CONFIG_MXG_DEMUXER 0 +#define CONFIG_NC_DEMUXER 0 +#define CONFIG_NISTSPHERE_DEMUXER 0 +#define CONFIG_NSV_DEMUXER 0 +#define CONFIG_NUT_DEMUXER 0 +#define CONFIG_NUV_DEMUXER 0 +#define CONFIG_OGG_DEMUXER 0 +#define CONFIG_OMA_DEMUXER 0 +#define CONFIG_PAF_DEMUXER 0 +#define CONFIG_PCM_ALAW_DEMUXER 0 +#define CONFIG_PCM_MULAW_DEMUXER 0 +#define CONFIG_PCM_F64BE_DEMUXER 0 +#define CONFIG_PCM_F64LE_DEMUXER 0 +#define CONFIG_PCM_F32BE_DEMUXER 0 +#define CONFIG_PCM_F32LE_DEMUXER 0 +#define CONFIG_PCM_S32BE_DEMUXER 0 +#define CONFIG_PCM_S32LE_DEMUXER 0 +#define CONFIG_PCM_S24BE_DEMUXER 0 +#define CONFIG_PCM_S24LE_DEMUXER 0 +#define CONFIG_PCM_S16BE_DEMUXER 0 +#define CONFIG_PCM_S16LE_DEMUXER 0 +#define CONFIG_PCM_S8_DEMUXER 0 +#define CONFIG_PCM_U32BE_DEMUXER 0 +#define CONFIG_PCM_U32LE_DEMUXER 0 +#define CONFIG_PCM_U24BE_DEMUXER 0 +#define CONFIG_PCM_U24LE_DEMUXER 0 +#define CONFIG_PCM_U16BE_DEMUXER 0 +#define CONFIG_PCM_U16LE_DEMUXER 0 +#define CONFIG_PCM_U8_DEMUXER 0 +#define CONFIG_PJS_DEMUXER 0 +#define CONFIG_PMP_DEMUXER 0 +#define CONFIG_PVA_DEMUXER 0 +#define CONFIG_PVF_DEMUXER 0 +#define CONFIG_QCP_DEMUXER 0 +#define CONFIG_R3D_DEMUXER 0 +#define CONFIG_RAWVIDEO_DEMUXER 0 +#define CONFIG_REALTEXT_DEMUXER 0 +#define CONFIG_REDSPARK_DEMUXER 0 +#define CONFIG_RL2_DEMUXER 0 +#define CONFIG_RM_DEMUXER 0 +#define CONFIG_ROQ_DEMUXER 0 +#define CONFIG_RPL_DEMUXER 0 +#define CONFIG_RSD_DEMUXER 0 +#define CONFIG_RSO_DEMUXER 0 +#define CONFIG_RTP_DEMUXER 0 +#define CONFIG_RTSP_DEMUXER 0 +#define CONFIG_SAMI_DEMUXER 0 +#define CONFIG_SAP_DEMUXER 0 +#define CONFIG_SBG_DEMUXER 0 +#define CONFIG_SCC_DEMUXER 0 +#define CONFIG_SDP_DEMUXER 0 +#define CONFIG_SDR2_DEMUXER 0 +#define CONFIG_SDS_DEMUXER 0 +#define CONFIG_SDX_DEMUXER 0 +#define CONFIG_SEGAFILM_DEMUXER 0 +#define CONFIG_SHORTEN_DEMUXER 0 +#define CONFIG_SIFF_DEMUXER 0 +#define CONFIG_SLN_DEMUXER 0 +#define CONFIG_SMACKER_DEMUXER 0 +#define CONFIG_SMJPEG_DEMUXER 0 +#define CONFIG_SMUSH_DEMUXER 0 +#define CONFIG_SOL_DEMUXER 0 +#define CONFIG_SOX_DEMUXER 0 +#define CONFIG_SPDIF_DEMUXER 0 +#define CONFIG_SRT_DEMUXER 0 +#define CONFIG_STR_DEMUXER 0 +#define CONFIG_STL_DEMUXER 0 +#define CONFIG_SUBVIEWER1_DEMUXER 0 +#define CONFIG_SUBVIEWER_DEMUXER 0 +#define CONFIG_SUP_DEMUXER 0 +#define CONFIG_SVAG_DEMUXER 0 +#define CONFIG_SWF_DEMUXER 0 +#define CONFIG_TAK_DEMUXER 0 +#define CONFIG_TEDCAPTIONS_DEMUXER 0 +#define CONFIG_THP_DEMUXER 0 +#define CONFIG_THREEDOSTR_DEMUXER 0 +#define CONFIG_TIERTEXSEQ_DEMUXER 0 +#define CONFIG_TMV_DEMUXER 0 +#define CONFIG_TRUEHD_DEMUXER 0 +#define CONFIG_TTA_DEMUXER 0 +#define CONFIG_TXD_DEMUXER 0 +#define CONFIG_TTY_DEMUXER 0 +#define CONFIG_V210_DEMUXER 0 +#define CONFIG_V210X_DEMUXER 0 +#define CONFIG_VAG_DEMUXER 0 +#define CONFIG_VC1_DEMUXER 0 +#define CONFIG_VC1T_DEMUXER 0 +#define CONFIG_VIVO_DEMUXER 0 +#define CONFIG_VMD_DEMUXER 0 +#define CONFIG_VOBSUB_DEMUXER 0 +#define CONFIG_VOC_DEMUXER 0 +#define CONFIG_VPK_DEMUXER 0 +#define CONFIG_VPLAYER_DEMUXER 0 +#define CONFIG_VQF_DEMUXER 0 +#define CONFIG_W64_DEMUXER 0 +#define CONFIG_WAV_DEMUXER 1 +#define CONFIG_WC3_DEMUXER 0 +#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0 +#define CONFIG_WEBVTT_DEMUXER 0 +#define CONFIG_WSAUD_DEMUXER 0 +#define CONFIG_WSD_DEMUXER 0 +#define CONFIG_WSVQA_DEMUXER 0 +#define CONFIG_WTV_DEMUXER 0 +#define CONFIG_WVE_DEMUXER 0 +#define CONFIG_WV_DEMUXER 0 +#define CONFIG_XA_DEMUXER 0 +#define CONFIG_XBIN_DEMUXER 0 +#define CONFIG_XMV_DEMUXER 0 +#define CONFIG_XVAG_DEMUXER 0 +#define CONFIG_XWMA_DEMUXER 0 +#define CONFIG_YOP_DEMUXER 0 +#define CONFIG_YUV4MPEGPIPE_DEMUXER 0 +#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_DDS_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PPM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PSD_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_QDRAW_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_XPM_PIPE_DEMUXER 0 +#define CONFIG_LIBGME_DEMUXER 0 +#define CONFIG_LIBMODPLUG_DEMUXER 0 +#define CONFIG_LIBNUT_DEMUXER 0 +#define CONFIG_LIBOPENMPT_DEMUXER 0 +#define CONFIG_A64MULTI_ENCODER 0 +#define CONFIG_A64MULTI5_ENCODER 0 +#define CONFIG_ALIAS_PIX_ENCODER 0 +#define CONFIG_AMV_ENCODER 0 +#define CONFIG_APNG_ENCODER 0 +#define CONFIG_ASV1_ENCODER 0 +#define CONFIG_ASV2_ENCODER 0 +#define CONFIG_AVRP_ENCODER 0 +#define CONFIG_AVUI_ENCODER 0 +#define CONFIG_AYUV_ENCODER 0 +#define CONFIG_BMP_ENCODER 0 +#define CONFIG_CINEPAK_ENCODER 0 +#define CONFIG_CLJR_ENCODER 0 +#define CONFIG_COMFORTNOISE_ENCODER 0 +#define CONFIG_DNXHD_ENCODER 0 +#define CONFIG_DPX_ENCODER 0 +#define CONFIG_DVVIDEO_ENCODER 0 +#define CONFIG_FFV1_ENCODER 0 +#define CONFIG_FFVHUFF_ENCODER 0 +#define CONFIG_FLASHSV_ENCODER 0 +#define CONFIG_FLASHSV2_ENCODER 0 +#define CONFIG_FLV_ENCODER 0 +#define CONFIG_GIF_ENCODER 0 +#define CONFIG_H261_ENCODER 0 +#define CONFIG_H263_ENCODER 0 +#define CONFIG_H263P_ENCODER 0 +#define CONFIG_HAP_ENCODER 0 +#define CONFIG_HUFFYUV_ENCODER 0 +#define CONFIG_JPEG2000_ENCODER 0 +#define CONFIG_JPEGLS_ENCODER 0 +#define CONFIG_LJPEG_ENCODER 0 +#define CONFIG_MJPEG_ENCODER 0 +#define CONFIG_MPEG1VIDEO_ENCODER 0 +#define CONFIG_MPEG2VIDEO_ENCODER 0 +#define CONFIG_MPEG4_ENCODER 0 +#define CONFIG_MSMPEG4V2_ENCODER 0 +#define CONFIG_MSMPEG4V3_ENCODER 0 +#define CONFIG_MSVIDEO1_ENCODER 0 +#define CONFIG_PAM_ENCODER 0 +#define CONFIG_PBM_ENCODER 0 +#define CONFIG_PCX_ENCODER 0 +#define CONFIG_PGM_ENCODER 0 +#define CONFIG_PGMYUV_ENCODER 0 +#define CONFIG_PNG_ENCODER 1 +#define CONFIG_PPM_ENCODER 0 +#define CONFIG_PRORES_ENCODER 0 +#define CONFIG_PRORES_AW_ENCODER 0 +#define CONFIG_PRORES_KS_ENCODER 0 +#define CONFIG_QTRLE_ENCODER 0 +#define CONFIG_R10K_ENCODER 0 +#define CONFIG_R210_ENCODER 0 +#define CONFIG_RAWVIDEO_ENCODER 0 +#define CONFIG_ROQ_ENCODER 0 +#define CONFIG_RV10_ENCODER 0 +#define CONFIG_RV20_ENCODER 0 +#define CONFIG_S302M_ENCODER 0 +#define CONFIG_SGI_ENCODER 0 +#define CONFIG_SNOW_ENCODER 0 +#define CONFIG_SUNRAST_ENCODER 0 +#define CONFIG_SVQ1_ENCODER 0 +#define CONFIG_TARGA_ENCODER 0 +#define CONFIG_TIFF_ENCODER 0 +#define CONFIG_UTVIDEO_ENCODER 0 +#define CONFIG_V210_ENCODER 0 +#define CONFIG_V308_ENCODER 0 +#define CONFIG_V408_ENCODER 0 +#define CONFIG_V410_ENCODER 0 +#define CONFIG_VC2_ENCODER 0 +#define CONFIG_WRAPPED_AVFRAME_ENCODER 0 +#define CONFIG_WMV1_ENCODER 0 +#define CONFIG_WMV2_ENCODER 0 +#define CONFIG_XBM_ENCODER 0 +#define CONFIG_XFACE_ENCODER 0 +#define CONFIG_XWD_ENCODER 0 +#define CONFIG_Y41P_ENCODER 0 +#define CONFIG_YUV4_ENCODER 0 +#define CONFIG_ZLIB_ENCODER 0 +#define CONFIG_ZMBV_ENCODER 0 +#define CONFIG_AAC_ENCODER 0 +#define CONFIG_AC3_ENCODER 0 +#define CONFIG_AC3_FIXED_ENCODER 0 +#define CONFIG_ALAC_ENCODER 0 +#define CONFIG_DCA_ENCODER 0 +#define CONFIG_EAC3_ENCODER 0 +#define CONFIG_FLAC_ENCODER 0 +#define CONFIG_G723_1_ENCODER 0 +#define CONFIG_MLP_ENCODER 0 +#define CONFIG_MP2_ENCODER 0 +#define CONFIG_MP2FIXED_ENCODER 0 +#define CONFIG_NELLYMOSER_ENCODER 0 +#define CONFIG_OPUS_ENCODER 0 +#define CONFIG_RA_144_ENCODER 0 +#define CONFIG_SONIC_ENCODER 0 +#define CONFIG_SONIC_LS_ENCODER 0 +#define CONFIG_TRUEHD_ENCODER 0 +#define CONFIG_TTA_ENCODER 0 +#define CONFIG_VORBIS_ENCODER 0 +#define CONFIG_WAVPACK_ENCODER 0 +#define CONFIG_WMAV1_ENCODER 0 +#define CONFIG_WMAV2_ENCODER 0 +#define CONFIG_PCM_ALAW_ENCODER 0 +#define CONFIG_PCM_F32BE_ENCODER 0 +#define CONFIG_PCM_F32LE_ENCODER 0 +#define CONFIG_PCM_F64BE_ENCODER 0 +#define CONFIG_PCM_F64LE_ENCODER 0 +#define CONFIG_PCM_MULAW_ENCODER 0 +#define CONFIG_PCM_S8_ENCODER 0 +#define CONFIG_PCM_S8_PLANAR_ENCODER 0 +#define CONFIG_PCM_S16BE_ENCODER 0 +#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S16LE_ENCODER 0 +#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S24BE_ENCODER 0 +#define CONFIG_PCM_S24DAUD_ENCODER 0 +#define CONFIG_PCM_S24LE_ENCODER 0 +#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S32BE_ENCODER 0 +#define CONFIG_PCM_S32LE_ENCODER 0 +#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S64BE_ENCODER 0 +#define CONFIG_PCM_S64LE_ENCODER 0 +#define CONFIG_PCM_U8_ENCODER 0 +#define CONFIG_PCM_U16BE_ENCODER 0 +#define CONFIG_PCM_U16LE_ENCODER 0 +#define CONFIG_PCM_U24BE_ENCODER 0 +#define CONFIG_PCM_U24LE_ENCODER 0 +#define CONFIG_PCM_U32BE_ENCODER 0 +#define CONFIG_PCM_U32LE_ENCODER 0 +#define CONFIG_ROQ_DPCM_ENCODER 0 +#define CONFIG_ADPCM_ADX_ENCODER 0 +#define CONFIG_ADPCM_G722_ENCODER 0 +#define CONFIG_ADPCM_G726_ENCODER 0 +#define CONFIG_ADPCM_IMA_QT_ENCODER 0 +#define CONFIG_ADPCM_IMA_WAV_ENCODER 0 +#define CONFIG_ADPCM_MS_ENCODER 0 +#define CONFIG_ADPCM_SWF_ENCODER 0 +#define CONFIG_ADPCM_YAMAHA_ENCODER 0 +#define CONFIG_SSA_ENCODER 0 +#define CONFIG_ASS_ENCODER 0 +#define CONFIG_DVBSUB_ENCODER 0 +#define CONFIG_DVDSUB_ENCODER 0 +#define CONFIG_MOVTEXT_ENCODER 0 +#define CONFIG_SRT_ENCODER 0 +#define CONFIG_SUBRIP_ENCODER 0 +#define CONFIG_TEXT_ENCODER 0 +#define CONFIG_WEBVTT_ENCODER 0 +#define CONFIG_XSUB_ENCODER 0 +#define CONFIG_AAC_AT_ENCODER 0 +#define CONFIG_ALAC_AT_ENCODER 0 +#define CONFIG_ILBC_AT_ENCODER 0 +#define CONFIG_PCM_ALAW_AT_ENCODER 0 +#define CONFIG_PCM_MULAW_AT_ENCODER 0 +#define CONFIG_LIBFDK_AAC_ENCODER 0 +#define CONFIG_LIBGSM_ENCODER 0 +#define CONFIG_LIBGSM_MS_ENCODER 0 +#define CONFIG_LIBILBC_ENCODER 0 +#define CONFIG_LIBMP3LAME_ENCODER 0 +#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0 +#define CONFIG_LIBOPENJPEG_ENCODER 0 +#define CONFIG_LIBOPUS_ENCODER 0 +#define CONFIG_LIBSCHROEDINGER_ENCODER 0 +#define CONFIG_LIBSHINE_ENCODER 0 +#define CONFIG_LIBSPEEX_ENCODER 0 +#define CONFIG_LIBTHEORA_ENCODER 0 +#define CONFIG_LIBTWOLAME_ENCODER 0 +#define CONFIG_LIBVO_AMRWBENC_ENCODER 0 +#define CONFIG_LIBVORBIS_ENCODER 0 +#define CONFIG_LIBVPX_VP8_ENCODER 0 +#define CONFIG_LIBVPX_VP9_ENCODER 0 +#define CONFIG_LIBWAVPACK_ENCODER 0 +#define CONFIG_LIBWEBP_ANIM_ENCODER 0 +#define CONFIG_LIBWEBP_ENCODER 0 +#define CONFIG_LIBX262_ENCODER 0 +#define CONFIG_LIBX264_ENCODER 0 +#define CONFIG_LIBX264RGB_ENCODER 0 +#define CONFIG_LIBX265_ENCODER 0 +#define CONFIG_LIBXAVS_ENCODER 0 +#define CONFIG_LIBXVID_ENCODER 0 +#define CONFIG_LIBOPENH264_ENCODER 0 +#define CONFIG_H264_NVENC_ENCODER 0 +#define CONFIG_H264_OMX_ENCODER 0 +#define CONFIG_H264_QSV_ENCODER 0 +#define CONFIG_H264_VAAPI_ENCODER 0 +#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0 +#define CONFIG_NVENC_ENCODER 0 +#define CONFIG_NVENC_H264_ENCODER 0 +#define CONFIG_NVENC_HEVC_ENCODER 0 +#define CONFIG_HEVC_NVENC_ENCODER 0 +#define CONFIG_HEVC_QSV_ENCODER 0 +#define CONFIG_HEVC_VAAPI_ENCODER 0 +#define CONFIG_LIBKVAZAAR_ENCODER 0 +#define CONFIG_MJPEG_VAAPI_ENCODER 0 +#define CONFIG_MPEG2_QSV_ENCODER 0 +#define CONFIG_MPEG2_VAAPI_ENCODER 0 +#define CONFIG_VP8_VAAPI_ENCODER 0 +#define CONFIG_ABENCH_FILTER 0 +#define CONFIG_ACOMPRESSOR_FILTER 0 +#define CONFIG_ACROSSFADE_FILTER 0 +#define CONFIG_ACRUSHER_FILTER 0 +#define CONFIG_ADELAY_FILTER 0 +#define CONFIG_AECHO_FILTER 0 +#define CONFIG_AEMPHASIS_FILTER 0 +#define CONFIG_AEVAL_FILTER 0 +#define CONFIG_AFADE_FILTER 0 +#define CONFIG_AFFTFILT_FILTER 0 +#define CONFIG_AFORMAT_FILTER 0 +#define CONFIG_AGATE_FILTER 0 +#define CONFIG_AINTERLEAVE_FILTER 0 +#define CONFIG_ALIMITER_FILTER 0 +#define CONFIG_ALLPASS_FILTER 0 +#define CONFIG_ALOOP_FILTER 0 +#define CONFIG_AMERGE_FILTER 0 +#define CONFIG_AMETADATA_FILTER 0 +#define CONFIG_AMIX_FILTER 0 +#define CONFIG_ANEQUALIZER_FILTER 0 +#define CONFIG_ANULL_FILTER 0 +#define CONFIG_APAD_FILTER 0 +#define CONFIG_APERMS_FILTER 0 +#define CONFIG_APHASER_FILTER 0 +#define CONFIG_APULSATOR_FILTER 0 +#define CONFIG_AREALTIME_FILTER 0 +#define CONFIG_ARESAMPLE_FILTER 0 +#define CONFIG_AREVERSE_FILTER 0 +#define CONFIG_ASELECT_FILTER 0 +#define CONFIG_ASENDCMD_FILTER 0 +#define CONFIG_ASETNSAMPLES_FILTER 0 +#define CONFIG_ASETPTS_FILTER 0 +#define CONFIG_ASETRATE_FILTER 0 +#define CONFIG_ASETTB_FILTER 0 +#define CONFIG_ASHOWINFO_FILTER 0 +#define CONFIG_ASIDEDATA_FILTER 0 +#define CONFIG_ASPLIT_FILTER 0 +#define CONFIG_ASTATS_FILTER 0 +#define CONFIG_ASTREAMSELECT_FILTER 0 +#define CONFIG_ATEMPO_FILTER 0 +#define CONFIG_ATRIM_FILTER 0 +#define CONFIG_AZMQ_FILTER 0 +#define CONFIG_BANDPASS_FILTER 0 +#define CONFIG_BANDREJECT_FILTER 0 +#define CONFIG_BASS_FILTER 0 +#define CONFIG_BIQUAD_FILTER 0 +#define CONFIG_BS2B_FILTER 0 +#define CONFIG_CHANNELMAP_FILTER 0 +#define CONFIG_CHANNELSPLIT_FILTER 0 +#define CONFIG_CHORUS_FILTER 0 +#define CONFIG_COMPAND_FILTER 0 +#define CONFIG_COMPENSATIONDELAY_FILTER 0 +#define CONFIG_CRYSTALIZER_FILTER 0 +#define CONFIG_DCSHIFT_FILTER 0 +#define CONFIG_DYNAUDNORM_FILTER 0 +#define CONFIG_EARWAX_FILTER 0 +#define CONFIG_EBUR128_FILTER 0 +#define CONFIG_EQUALIZER_FILTER 0 +#define CONFIG_EXTRASTEREO_FILTER 0 +#define CONFIG_FIREQUALIZER_FILTER 0 +#define CONFIG_FLANGER_FILTER 0 +#define CONFIG_HDCD_FILTER 0 +#define CONFIG_HIGHPASS_FILTER 0 +#define CONFIG_JOIN_FILTER 0 +#define CONFIG_LADSPA_FILTER 0 +#define CONFIG_LOUDNORM_FILTER 0 +#define CONFIG_LOWPASS_FILTER 0 +#define CONFIG_PAN_FILTER 0 +#define CONFIG_REPLAYGAIN_FILTER 0 +#define CONFIG_RESAMPLE_FILTER 0 +#define CONFIG_RUBBERBAND_FILTER 0 +#define CONFIG_SIDECHAINCOMPRESS_FILTER 0 +#define CONFIG_SIDECHAINGATE_FILTER 0 +#define CONFIG_SILENCEDETECT_FILTER 0 +#define CONFIG_SILENCEREMOVE_FILTER 0 +#define CONFIG_SOFALIZER_FILTER 0 +#define CONFIG_STEREOTOOLS_FILTER 0 +#define CONFIG_STEREOWIDEN_FILTER 0 +#define CONFIG_TREBLE_FILTER 0 +#define CONFIG_TREMOLO_FILTER 0 +#define CONFIG_VIBRATO_FILTER 0 +#define CONFIG_VOLUME_FILTER 0 +#define CONFIG_VOLUMEDETECT_FILTER 0 +#define CONFIG_AEVALSRC_FILTER 0 +#define CONFIG_ANOISESRC_FILTER 0 +#define CONFIG_ANULLSRC_FILTER 0 +#define CONFIG_FLITE_FILTER 0 +#define CONFIG_SINE_FILTER 0 +#define CONFIG_ANULLSINK_FILTER 0 +#define CONFIG_ALPHAEXTRACT_FILTER 0 +#define CONFIG_ALPHAMERGE_FILTER 0 +#define CONFIG_ASS_FILTER 0 +#define CONFIG_ATADENOISE_FILTER 0 +#define CONFIG_AVGBLUR_FILTER 0 +#define CONFIG_BBOX_FILTER 0 +#define CONFIG_BENCH_FILTER 0 +#define CONFIG_BITPLANENOISE_FILTER 0 +#define CONFIG_BLACKDETECT_FILTER 0 +#define CONFIG_BLACKFRAME_FILTER 0 +#define CONFIG_BLEND_FILTER 0 +#define CONFIG_BOXBLUR_FILTER 0 +#define CONFIG_BWDIF_FILTER 0 +#define CONFIG_CHROMAKEY_FILTER 0 +#define CONFIG_CIESCOPE_FILTER 0 +#define CONFIG_CODECVIEW_FILTER 0 +#define CONFIG_COLORBALANCE_FILTER 0 +#define CONFIG_COLORCHANNELMIXER_FILTER 0 +#define CONFIG_COLORKEY_FILTER 0 +#define CONFIG_COLORLEVELS_FILTER 0 +#define CONFIG_COLORMATRIX_FILTER 0 +#define CONFIG_COLORSPACE_FILTER 0 +#define CONFIG_CONVOLUTION_FILTER 0 +#define CONFIG_COPY_FILTER 0 +#define CONFIG_COREIMAGE_FILTER 0 +#define CONFIG_COVER_RECT_FILTER 0 +#define CONFIG_CROP_FILTER 0 +#define CONFIG_CROPDETECT_FILTER 0 +#define CONFIG_CURVES_FILTER 0 +#define CONFIG_DATASCOPE_FILTER 0 +#define CONFIG_DCTDNOIZ_FILTER 0 +#define CONFIG_DEBAND_FILTER 0 +#define CONFIG_DECIMATE_FILTER 0 +#define CONFIG_DEFLATE_FILTER 0 +#define CONFIG_DEINTERLACE_QSV_FILTER 0 +#define CONFIG_DEINTERLACE_VAAPI_FILTER 0 +#define CONFIG_DEJUDDER_FILTER 0 +#define CONFIG_DELOGO_FILTER 0 +#define CONFIG_DESHAKE_FILTER 0 +#define CONFIG_DETELECINE_FILTER 0 +#define CONFIG_DILATION_FILTER 0 +#define CONFIG_DISPLACE_FILTER 0 +#define CONFIG_DRAWBOX_FILTER 0 +#define CONFIG_DRAWGRAPH_FILTER 0 +#define CONFIG_DRAWGRID_FILTER 0 +#define CONFIG_DRAWTEXT_FILTER 0 +#define CONFIG_EDGEDETECT_FILTER 0 +#define CONFIG_ELBG_FILTER 0 +#define CONFIG_EQ_FILTER 0 +#define CONFIG_EROSION_FILTER 0 +#define CONFIG_EXTRACTPLANES_FILTER 0 +#define CONFIG_FADE_FILTER 0 +#define CONFIG_FFTFILT_FILTER 0 +#define CONFIG_FIELD_FILTER 0 +#define CONFIG_FIELDHINT_FILTER 0 +#define CONFIG_FIELDMATCH_FILTER 0 +#define CONFIG_FIELDORDER_FILTER 0 +#define CONFIG_FIND_RECT_FILTER 0 +#define CONFIG_FORMAT_FILTER 0 +#define CONFIG_FPS_FILTER 0 +#define CONFIG_FRAMEPACK_FILTER 0 +#define CONFIG_FRAMERATE_FILTER 0 +#define CONFIG_FRAMESTEP_FILTER 0 +#define CONFIG_FREI0R_FILTER 0 +#define CONFIG_FSPP_FILTER 0 +#define CONFIG_GBLUR_FILTER 0 +#define CONFIG_GEQ_FILTER 0 +#define CONFIG_GRADFUN_FILTER 0 +#define CONFIG_HALDCLUT_FILTER 0 +#define CONFIG_HFLIP_FILTER 0 +#define CONFIG_HISTEQ_FILTER 0 +#define CONFIG_HISTOGRAM_FILTER 0 +#define CONFIG_HQDN3D_FILTER 0 +#define CONFIG_HQX_FILTER 0 +#define CONFIG_HSTACK_FILTER 0 +#define CONFIG_HUE_FILTER 0 +#define CONFIG_HWDOWNLOAD_FILTER 0 +#define CONFIG_HWMAP_FILTER 0 +#define CONFIG_HWUPLOAD_FILTER 0 +#define CONFIG_HWUPLOAD_CUDA_FILTER 0 +#define CONFIG_HYSTERESIS_FILTER 0 +#define CONFIG_IDET_FILTER 0 +#define CONFIG_IL_FILTER 0 +#define CONFIG_INFLATE_FILTER 0 +#define CONFIG_INTERLACE_FILTER 0 +#define CONFIG_INTERLEAVE_FILTER 0 +#define CONFIG_KERNDEINT_FILTER 0 +#define CONFIG_LENSCORRECTION_FILTER 0 +#define CONFIG_LOOP_FILTER 0 +#define CONFIG_LUT_FILTER 0 +#define CONFIG_LUT2_FILTER 0 +#define CONFIG_LUT3D_FILTER 0 +#define CONFIG_LUTRGB_FILTER 0 +#define CONFIG_LUTYUV_FILTER 0 +#define CONFIG_MASKEDCLAMP_FILTER 0 +#define CONFIG_MASKEDMERGE_FILTER 0 +#define CONFIG_MCDEINT_FILTER 0 +#define CONFIG_MERGEPLANES_FILTER 0 +#define CONFIG_MESTIMATE_FILTER 0 +#define CONFIG_METADATA_FILTER 0 +#define CONFIG_MIDEQUALIZER_FILTER 0 +#define CONFIG_MINTERPOLATE_FILTER 0 +#define CONFIG_MPDECIMATE_FILTER 0 +#define CONFIG_NEGATE_FILTER 0 +#define CONFIG_NLMEANS_FILTER 0 +#define CONFIG_NNEDI_FILTER 0 +#define CONFIG_NOFORMAT_FILTER 0 +#define CONFIG_NOISE_FILTER 0 +#define CONFIG_NULL_FILTER 0 +#define CONFIG_OCR_FILTER 0 +#define CONFIG_OCV_FILTER 0 +#define CONFIG_OVERLAY_FILTER 0 +#define CONFIG_OWDENOISE_FILTER 0 +#define CONFIG_PAD_FILTER 0 +#define CONFIG_PALETTEGEN_FILTER 0 +#define CONFIG_PALETTEUSE_FILTER 0 +#define CONFIG_PERMS_FILTER 0 +#define CONFIG_PERSPECTIVE_FILTER 0 +#define CONFIG_PHASE_FILTER 0 +#define CONFIG_PIXDESCTEST_FILTER 0 +#define CONFIG_PP_FILTER 0 +#define CONFIG_PP7_FILTER 0 +#define CONFIG_PREMULTIPLY_FILTER 0 +#define CONFIG_PREWITT_FILTER 0 +#define CONFIG_PSNR_FILTER 0 +#define CONFIG_PULLUP_FILTER 0 +#define CONFIG_QP_FILTER 0 +#define CONFIG_RANDOM_FILTER 0 +#define CONFIG_READEIA608_FILTER 0 +#define CONFIG_READVITC_FILTER 0 +#define CONFIG_REALTIME_FILTER 0 +#define CONFIG_REMAP_FILTER 0 +#define CONFIG_REMOVEGRAIN_FILTER 0 +#define CONFIG_REMOVELOGO_FILTER 0 +#define CONFIG_REPEATFIELDS_FILTER 0 +#define CONFIG_REVERSE_FILTER 0 +#define CONFIG_ROTATE_FILTER 0 +#define CONFIG_SAB_FILTER 0 +#define CONFIG_SCALE_FILTER 0 +#define CONFIG_SCALE_NPP_FILTER 0 +#define CONFIG_SCALE_QSV_FILTER 0 +#define CONFIG_SCALE_VAAPI_FILTER 0 +#define CONFIG_SCALE2REF_FILTER 0 +#define CONFIG_SELECT_FILTER 0 +#define CONFIG_SELECTIVECOLOR_FILTER 0 +#define CONFIG_SENDCMD_FILTER 0 +#define CONFIG_SEPARATEFIELDS_FILTER 0 +#define CONFIG_SETDAR_FILTER 0 +#define CONFIG_SETFIELD_FILTER 0 +#define CONFIG_SETPTS_FILTER 0 +#define CONFIG_SETSAR_FILTER 0 +#define CONFIG_SETTB_FILTER 0 +#define CONFIG_SHOWINFO_FILTER 0 +#define CONFIG_SHOWPALETTE_FILTER 0 +#define CONFIG_SHUFFLEFRAMES_FILTER 0 +#define CONFIG_SHUFFLEPLANES_FILTER 0 +#define CONFIG_SIDEDATA_FILTER 0 +#define CONFIG_SIGNALSTATS_FILTER 0 +#define CONFIG_SIGNATURE_FILTER 0 +#define CONFIG_SMARTBLUR_FILTER 0 +#define CONFIG_SOBEL_FILTER 0 +#define CONFIG_SPLIT_FILTER 0 +#define CONFIG_SPP_FILTER 0 +#define CONFIG_SSIM_FILTER 0 +#define CONFIG_STEREO3D_FILTER 0 +#define CONFIG_STREAMSELECT_FILTER 0 +#define CONFIG_SUBTITLES_FILTER 0 +#define CONFIG_SUPER2XSAI_FILTER 0 +#define CONFIG_SWAPRECT_FILTER 0 +#define CONFIG_SWAPUV_FILTER 0 +#define CONFIG_TBLEND_FILTER 0 +#define CONFIG_TELECINE_FILTER 0 +#define CONFIG_THRESHOLD_FILTER 0 +#define CONFIG_THUMBNAIL_FILTER 0 +#define CONFIG_TILE_FILTER 0 +#define CONFIG_TINTERLACE_FILTER 0 +#define CONFIG_TRANSPOSE_FILTER 0 +#define CONFIG_TRIM_FILTER 0 +#define CONFIG_UNSHARP_FILTER 0 +#define CONFIG_USPP_FILTER 0 +#define CONFIG_VAGUEDENOISER_FILTER 0 +#define CONFIG_VECTORSCOPE_FILTER 0 +#define CONFIG_VFLIP_FILTER 0 +#define CONFIG_VIDSTABDETECT_FILTER 0 +#define CONFIG_VIDSTABTRANSFORM_FILTER 0 +#define CONFIG_VIGNETTE_FILTER 0 +#define CONFIG_VSTACK_FILTER 0 +#define CONFIG_W3FDIF_FILTER 0 +#define CONFIG_WAVEFORM_FILTER 0 +#define CONFIG_WEAVE_FILTER 0 +#define CONFIG_XBR_FILTER 0 +#define CONFIG_YADIF_FILTER 0 +#define CONFIG_ZMQ_FILTER 0 +#define CONFIG_ZOOMPAN_FILTER 0 +#define CONFIG_ZSCALE_FILTER 0 +#define CONFIG_ALLRGB_FILTER 0 +#define CONFIG_ALLYUV_FILTER 0 +#define CONFIG_CELLAUTO_FILTER 0 +#define CONFIG_COLOR_FILTER 0 +#define CONFIG_COREIMAGESRC_FILTER 0 +#define CONFIG_FREI0R_SRC_FILTER 0 +#define CONFIG_HALDCLUTSRC_FILTER 0 +#define CONFIG_LIFE_FILTER 0 +#define CONFIG_MANDELBROT_FILTER 0 +#define CONFIG_MPTESTSRC_FILTER 0 +#define CONFIG_NULLSRC_FILTER 0 +#define CONFIG_RGBTESTSRC_FILTER 0 +#define CONFIG_SMPTEBARS_FILTER 0 +#define CONFIG_SMPTEHDBARS_FILTER 0 +#define CONFIG_TESTSRC_FILTER 0 +#define CONFIG_TESTSRC2_FILTER 0 +#define CONFIG_YUVTESTSRC_FILTER 0 +#define CONFIG_NULLSINK_FILTER 0 +#define CONFIG_ABITSCOPE_FILTER 0 +#define CONFIG_ADRAWGRAPH_FILTER 0 +#define CONFIG_AHISTOGRAM_FILTER 0 +#define CONFIG_APHASEMETER_FILTER 0 +#define CONFIG_AVECTORSCOPE_FILTER 0 +#define CONFIG_CONCAT_FILTER 0 +#define CONFIG_SHOWCQT_FILTER 0 +#define CONFIG_SHOWFREQS_FILTER 0 +#define CONFIG_SHOWSPECTRUM_FILTER 0 +#define CONFIG_SHOWSPECTRUMPIC_FILTER 0 +#define CONFIG_SHOWVOLUME_FILTER 0 +#define CONFIG_SHOWWAVES_FILTER 0 +#define CONFIG_SHOWWAVESPIC_FILTER 0 +#define CONFIG_SPECTRUMSYNTH_FILTER 0 +#define CONFIG_AMOVIE_FILTER 0 +#define CONFIG_MOVIE_FILTER 0 +#define CONFIG_H263_VAAPI_HWACCEL 0 +#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_H264_CUVID_HWACCEL 0 +#define CONFIG_H264_D3D11VA_HWACCEL 0 +#define CONFIG_H264_DXVA2_HWACCEL 0 +#define CONFIG_H264_MEDIACODEC_HWACCEL 0 +#define CONFIG_H264_MMAL_HWACCEL 0 +#define CONFIG_H264_QSV_HWACCEL 0 +#define CONFIG_H264_VAAPI_HWACCEL 0 +#define CONFIG_H264_VDA_HWACCEL 0 +#define CONFIG_H264_VDA_OLD_HWACCEL 0 +#define CONFIG_H264_VDPAU_HWACCEL 0 +#define CONFIG_H264_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_HEVC_CUVID_HWACCEL 0 +#define CONFIG_HEVC_D3D11VA_HWACCEL 0 +#define CONFIG_HEVC_DXVA2_HWACCEL 0 +#define CONFIG_HEVC_MEDIACODEC_HWACCEL 0 +#define CONFIG_HEVC_QSV_HWACCEL 0 +#define CONFIG_HEVC_VAAPI_HWACCEL 0 +#define CONFIG_HEVC_VDPAU_HWACCEL 0 +#define CONFIG_MJPEG_CUVID_HWACCEL 0 +#define CONFIG_MPEG1_CUVID_HWACCEL 0 +#define CONFIG_MPEG1_XVMC_HWACCEL 0 +#define CONFIG_MPEG1_VDPAU_HWACCEL 0 +#define CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_MPEG2_CUVID_HWACCEL 0 +#define CONFIG_MPEG2_XVMC_HWACCEL 0 +#define CONFIG_MPEG2_D3D11VA_HWACCEL 0 +#define CONFIG_MPEG2_DXVA2_HWACCEL 0 +#define CONFIG_MPEG2_MMAL_HWACCEL 0 +#define CONFIG_MPEG2_QSV_HWACCEL 0 +#define CONFIG_MPEG2_VAAPI_HWACCEL 0 +#define CONFIG_MPEG2_VDPAU_HWACCEL 0 +#define CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_MPEG4_CUVID_HWACCEL 0 +#define CONFIG_MPEG4_MEDIACODEC_HWACCEL 0 +#define CONFIG_MPEG4_MMAL_HWACCEL 0 +#define CONFIG_MPEG4_VAAPI_HWACCEL 0 +#define CONFIG_MPEG4_VDPAU_HWACCEL 0 +#define CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_VC1_CUVID_HWACCEL 0 +#define CONFIG_VC1_D3D11VA_HWACCEL 0 +#define CONFIG_VC1_DXVA2_HWACCEL 0 +#define CONFIG_VC1_VAAPI_HWACCEL 0 +#define CONFIG_VC1_VDPAU_HWACCEL 0 +#define CONFIG_VC1_MMAL_HWACCEL 0 +#define CONFIG_VC1_QSV_HWACCEL 0 +#define CONFIG_VP8_CUVID_HWACCEL 0 +#define CONFIG_VP8_MEDIACODEC_HWACCEL 0 +#define CONFIG_VP8_QSV_HWACCEL 0 +#define CONFIG_VP9_CUVID_HWACCEL 0 +#define CONFIG_VP9_D3D11VA_HWACCEL 0 +#define CONFIG_VP9_DXVA2_HWACCEL 0 +#define CONFIG_VP9_MEDIACODEC_HWACCEL 0 +#define CONFIG_VP9_VAAPI_HWACCEL 0 +#define CONFIG_WMV3_D3D11VA_HWACCEL 0 +#define CONFIG_WMV3_DXVA2_HWACCEL 0 +#define CONFIG_WMV3_VAAPI_HWACCEL 0 +#define CONFIG_WMV3_VDPAU_HWACCEL 0 +#define CONFIG_ALSA_INDEV 0 +#define CONFIG_AVFOUNDATION_INDEV 0 +#define CONFIG_BKTR_INDEV 0 +#define CONFIG_DECKLINK_INDEV 0 +#define CONFIG_DSHOW_INDEV 0 +#define CONFIG_DV1394_INDEV 0 +#define CONFIG_FBDEV_INDEV 0 +#define CONFIG_GDIGRAB_INDEV 0 +#define CONFIG_IEC61883_INDEV 0 +#define CONFIG_JACK_INDEV 0 +#define CONFIG_LAVFI_INDEV 0 +#define CONFIG_OPENAL_INDEV 0 +#define CONFIG_OSS_INDEV 0 +#define CONFIG_PULSE_INDEV 0 +#define CONFIG_QTKIT_INDEV 0 +#define CONFIG_SNDIO_INDEV 0 +#define CONFIG_V4L2_INDEV 0 +#define CONFIG_VFWCAP_INDEV 0 +#define CONFIG_XCBGRAB_INDEV 0 +#define CONFIG_LIBCDIO_INDEV 0 +#define CONFIG_LIBDC1394_INDEV 0 +#define CONFIG_A64_MUXER 0 +#define CONFIG_AC3_MUXER 0 +#define CONFIG_ADTS_MUXER 0 +#define CONFIG_ADX_MUXER 0 +#define CONFIG_AIFF_MUXER 0 +#define CONFIG_AMR_MUXER 0 +#define CONFIG_APNG_MUXER 0 +#define CONFIG_ASF_MUXER 0 +#define CONFIG_ASS_MUXER 0 +#define CONFIG_AST_MUXER 0 +#define CONFIG_ASF_STREAM_MUXER 0 +#define CONFIG_AU_MUXER 0 +#define CONFIG_AVI_MUXER 0 +#define CONFIG_AVM2_MUXER 0 +#define CONFIG_BIT_MUXER 0 +#define CONFIG_CAF_MUXER 0 +#define CONFIG_CAVSVIDEO_MUXER 0 +#define CONFIG_CRC_MUXER 0 +#define CONFIG_DASH_MUXER 0 +#define CONFIG_DATA_MUXER 0 +#define CONFIG_DAUD_MUXER 0 +#define CONFIG_DIRAC_MUXER 0 +#define CONFIG_DNXHD_MUXER 0 +#define CONFIG_DTS_MUXER 0 +#define CONFIG_DV_MUXER 0 +#define CONFIG_EAC3_MUXER 0 +#define CONFIG_F4V_MUXER 0 +#define CONFIG_FFM_MUXER 0 +#define CONFIG_FFMETADATA_MUXER 0 +#define CONFIG_FIFO_MUXER 0 +#define CONFIG_FILMSTRIP_MUXER 0 +#define CONFIG_FLAC_MUXER 0 +#define CONFIG_FLV_MUXER 0 +#define CONFIG_FRAMECRC_MUXER 0 +#define CONFIG_FRAMEHASH_MUXER 0 +#define CONFIG_FRAMEMD5_MUXER 0 +#define CONFIG_G722_MUXER 0 +#define CONFIG_G723_1_MUXER 0 +#define CONFIG_GIF_MUXER 0 +#define CONFIG_GSM_MUXER 0 +#define CONFIG_GXF_MUXER 0 +#define CONFIG_H261_MUXER 0 +#define CONFIG_H263_MUXER 0 +#define CONFIG_H264_MUXER 0 +#define CONFIG_HASH_MUXER 0 +#define CONFIG_HDS_MUXER 0 +#define CONFIG_HEVC_MUXER 0 +#define CONFIG_HLS_MUXER 0 +#define CONFIG_ICO_MUXER 0 +#define CONFIG_ILBC_MUXER 0 +#define CONFIG_IMAGE2_MUXER 0 +#define CONFIG_IMAGE2PIPE_MUXER 0 +#define CONFIG_IPOD_MUXER 0 +#define CONFIG_IRCAM_MUXER 0 +#define CONFIG_ISMV_MUXER 0 +#define CONFIG_IVF_MUXER 0 +#define CONFIG_JACOSUB_MUXER 0 +#define CONFIG_LATM_MUXER 0 +#define CONFIG_LRC_MUXER 0 +#define CONFIG_M4V_MUXER 0 +#define CONFIG_MD5_MUXER 0 +#define CONFIG_MATROSKA_MUXER 0 +#define CONFIG_MATROSKA_AUDIO_MUXER 0 +#define CONFIG_MICRODVD_MUXER 0 +#define CONFIG_MJPEG_MUXER 0 +#define CONFIG_MLP_MUXER 0 +#define CONFIG_MMF_MUXER 0 +#define CONFIG_MOV_MUXER 1 +#define CONFIG_MP2_MUXER 0 +#define CONFIG_MP3_MUXER 0 +#define CONFIG_MP4_MUXER 1 +#define CONFIG_MPEG1SYSTEM_MUXER 0 +#define CONFIG_MPEG1VCD_MUXER 0 +#define CONFIG_MPEG1VIDEO_MUXER 0 +#define CONFIG_MPEG2DVD_MUXER 0 +#define CONFIG_MPEG2SVCD_MUXER 0 +#define CONFIG_MPEG2VIDEO_MUXER 0 +#define CONFIG_MPEG2VOB_MUXER 0 +#define CONFIG_MPEGTS_MUXER 0 +#define CONFIG_MPJPEG_MUXER 0 +#define CONFIG_MXF_MUXER 0 +#define CONFIG_MXF_D10_MUXER 0 +#define CONFIG_MXF_OPATOM_MUXER 0 +#define CONFIG_NULL_MUXER 0 +#define CONFIG_NUT_MUXER 0 +#define CONFIG_OGA_MUXER 0 +#define CONFIG_OGG_MUXER 0 +#define CONFIG_OGV_MUXER 0 +#define CONFIG_OMA_MUXER 0 +#define CONFIG_OPUS_MUXER 0 +#define CONFIG_PCM_ALAW_MUXER 0 +#define CONFIG_PCM_MULAW_MUXER 0 +#define CONFIG_PCM_F64BE_MUXER 0 +#define CONFIG_PCM_F64LE_MUXER 0 +#define CONFIG_PCM_F32BE_MUXER 0 +#define CONFIG_PCM_F32LE_MUXER 0 +#define CONFIG_PCM_S32BE_MUXER 0 +#define CONFIG_PCM_S32LE_MUXER 0 +#define CONFIG_PCM_S24BE_MUXER 0 +#define CONFIG_PCM_S24LE_MUXER 0 +#define CONFIG_PCM_S16BE_MUXER 0 +#define CONFIG_PCM_S16LE_MUXER 0 +#define CONFIG_PCM_S8_MUXER 0 +#define CONFIG_PCM_U32BE_MUXER 0 +#define CONFIG_PCM_U32LE_MUXER 0 +#define CONFIG_PCM_U24BE_MUXER 0 +#define CONFIG_PCM_U24LE_MUXER 0 +#define CONFIG_PCM_U16BE_MUXER 0 +#define CONFIG_PCM_U16LE_MUXER 0 +#define CONFIG_PCM_U8_MUXER 0 +#define CONFIG_PSP_MUXER 0 +#define CONFIG_RAWVIDEO_MUXER 0 +#define CONFIG_RM_MUXER 0 +#define CONFIG_ROQ_MUXER 0 +#define CONFIG_RSO_MUXER 0 +#define CONFIG_RTP_MUXER 0 +#define CONFIG_RTP_MPEGTS_MUXER 0 +#define CONFIG_RTSP_MUXER 0 +#define CONFIG_SAP_MUXER 0 +#define CONFIG_SCC_MUXER 0 +#define CONFIG_SEGMENT_MUXER 0 +#define CONFIG_STREAM_SEGMENT_MUXER 0 +#define CONFIG_SINGLEJPEG_MUXER 0 +#define CONFIG_SMJPEG_MUXER 0 +#define CONFIG_SMOOTHSTREAMING_MUXER 0 +#define CONFIG_SOX_MUXER 0 +#define CONFIG_SPX_MUXER 0 +#define CONFIG_SPDIF_MUXER 0 +#define CONFIG_SRT_MUXER 0 +#define CONFIG_SWF_MUXER 0 +#define CONFIG_TEE_MUXER 0 +#define CONFIG_TG2_MUXER 0 +#define CONFIG_TGP_MUXER 0 +#define CONFIG_MKVTIMESTAMP_V2_MUXER 0 +#define CONFIG_TRUEHD_MUXER 0 +#define CONFIG_TTA_MUXER 0 +#define CONFIG_UNCODEDFRAMECRC_MUXER 0 +#define CONFIG_VC1_MUXER 0 +#define CONFIG_VC1T_MUXER 0 +#define CONFIG_VOC_MUXER 0 +#define CONFIG_W64_MUXER 0 +#define CONFIG_WAV_MUXER 0 +#define CONFIG_WEBM_MUXER 0 +#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0 +#define CONFIG_WEBM_CHUNK_MUXER 0 +#define CONFIG_WEBP_MUXER 0 +#define CONFIG_WEBVTT_MUXER 0 +#define CONFIG_WTV_MUXER 0 +#define CONFIG_WV_MUXER 0 +#define CONFIG_YUV4MPEGPIPE_MUXER 0 +#define CONFIG_CHROMAPRINT_MUXER 0 +#define CONFIG_LIBNUT_MUXER 0 +#define CONFIG_ALSA_OUTDEV 0 +#define CONFIG_CACA_OUTDEV 0 +#define CONFIG_DECKLINK_OUTDEV 0 +#define CONFIG_FBDEV_OUTDEV 0 +#define CONFIG_OPENGL_OUTDEV 0 +#define CONFIG_OSS_OUTDEV 0 +#define CONFIG_PULSE_OUTDEV 0 +#define CONFIG_SDL2_OUTDEV 0 +#define CONFIG_SNDIO_OUTDEV 0 +#define CONFIG_V4L2_OUTDEV 0 +#define CONFIG_XV_OUTDEV 0 +#define CONFIG_AAC_PARSER 1 +#define CONFIG_AAC_LATM_PARSER 1 +#define CONFIG_AC3_PARSER 0 +#define CONFIG_ADX_PARSER 0 +#define CONFIG_BMP_PARSER 0 +#define CONFIG_CAVSVIDEO_PARSER 0 +#define CONFIG_COOK_PARSER 0 +#define CONFIG_DCA_PARSER 0 +#define CONFIG_DIRAC_PARSER 0 +#define CONFIG_DNXHD_PARSER 0 +#define CONFIG_DPX_PARSER 0 +#define CONFIG_DVAUDIO_PARSER 0 +#define CONFIG_DVBSUB_PARSER 0 +#define CONFIG_DVDSUB_PARSER 0 +#define CONFIG_DVD_NAV_PARSER 0 +#define CONFIG_FLAC_PARSER 1 +#define CONFIG_G729_PARSER 0 +#define CONFIG_GSM_PARSER 0 +#define CONFIG_H261_PARSER 0 +#define CONFIG_H263_PARSER 1 +#define CONFIG_H264_PARSER 1 +#define CONFIG_HEVC_PARSER 1 +#define CONFIG_MJPEG_PARSER 0 +#define CONFIG_MLP_PARSER 0 +#define CONFIG_MPEG4VIDEO_PARSER 1 +#define CONFIG_MPEGAUDIO_PARSER 1 +#define CONFIG_MPEGVIDEO_PARSER 0 +#define CONFIG_OPUS_PARSER 0 +#define CONFIG_PNG_PARSER 0 +#define CONFIG_PNM_PARSER 0 +#define CONFIG_RV30_PARSER 0 +#define CONFIG_RV40_PARSER 0 +#define CONFIG_SIPR_PARSER 0 +#define CONFIG_TAK_PARSER 0 +#define CONFIG_VC1_PARSER 0 +#define CONFIG_VORBIS_PARSER 0 +#define CONFIG_VP3_PARSER 0 +#define CONFIG_VP8_PARSER 0 +#define CONFIG_VP9_PARSER 0 +#define CONFIG_XMA_PARSER 0 +#define CONFIG_ASYNC_PROTOCOL 1 +#define CONFIG_BLURAY_PROTOCOL 0 +#define CONFIG_CACHE_PROTOCOL 1 +#define CONFIG_CONCAT_PROTOCOL 0 +#define CONFIG_CRYPTO_PROTOCOL 1 +#define CONFIG_DATA_PROTOCOL 1 +#define CONFIG_FFRTMPCRYPT_PROTOCOL 0 +#define CONFIG_FFRTMPHTTP_PROTOCOL 1 +#define CONFIG_FILE_PROTOCOL 1 +#define CONFIG_FTP_PROTOCOL 1 +#define CONFIG_GOPHER_PROTOCOL 0 +#define CONFIG_HLS_PROTOCOL 1 +#define CONFIG_HTTP_PROTOCOL 1 +#define CONFIG_HTTPPROXY_PROTOCOL 1 +#define CONFIG_HTTPS_PROTOCOL 1 +#define CONFIG_ICECAST_PROTOCOL 0 +#define CONFIG_IJKHTTPHOOK_PROTOCOL 1 +#define CONFIG_IJKHLSCACHE_PROTOCOL 1 +#define CONFIG_IJKLONGURL_PROTOCOL 1 +#define CONFIG_IJKMEDIADATASOURCE_PROTOCOL 1 +#define CONFIG_IJKSEGMENT_PROTOCOL 1 +#define CONFIG_IJKTCPHOOK_PROTOCOL 1 +#define CONFIG_IJKIO_PROTOCOL 1 +#define CONFIG_MMSH_PROTOCOL 0 +#define CONFIG_MMST_PROTOCOL 0 +#define CONFIG_MD5_PROTOCOL 0 +#define CONFIG_PIPE_PROTOCOL 1 +#define CONFIG_PROMPEG_PROTOCOL 1 +#define CONFIG_RTMP_PROTOCOL 1 +#define CONFIG_RTMPE_PROTOCOL 0 +#define CONFIG_RTMPS_PROTOCOL 0 +#define CONFIG_RTMPT_PROTOCOL 1 +#define CONFIG_RTMPTE_PROTOCOL 0 +#define CONFIG_RTMPTS_PROTOCOL 0 +#define CONFIG_RTP_PROTOCOL 0 +#define CONFIG_SCTP_PROTOCOL 0 +#define CONFIG_SRTP_PROTOCOL 0 +#define CONFIG_SUBFILE_PROTOCOL 0 +#define CONFIG_TEE_PROTOCOL 1 +#define CONFIG_TCP_PROTOCOL 1 +#define CONFIG_TLS_GNUTLS_PROTOCOL 0 +#define CONFIG_TLS_SCHANNEL_PROTOCOL 0 +#define CONFIG_TLS_SECURETRANSPORT_PROTOCOL 0 +#define CONFIG_TLS_OPENSSL_PROTOCOL 1 +#define CONFIG_UDP_PROTOCOL 1 +#define CONFIG_UDPLITE_PROTOCOL 1 +#define CONFIG_UNIX_PROTOCOL 0 +#define CONFIG_LIBRTMP_PROTOCOL 0 +#define CONFIG_LIBRTMPE_PROTOCOL 0 +#define CONFIG_LIBRTMPS_PROTOCOL 0 +#define CONFIG_LIBRTMPT_PROTOCOL 0 +#define CONFIG_LIBRTMPTE_PROTOCOL 0 +#define CONFIG_LIBSSH_PROTOCOL 0 +#define CONFIG_LIBSMBCLIENT_PROTOCOL 0 +#endif /* FFMPEG_CONFIG_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/armv7/config.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/armv7/config.h new file mode 100644 index 0000000..76bf50e --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/armv7/config.h @@ -0,0 +1,2276 @@ +/* Automatically generated by configure - do not modify! */ +#ifndef FFMPEG_CONFIG_H +#define FFMPEG_CONFIG_H +#define FFMPEG_CONFIGURATION "--disable-gpl --disable-nonfree --enable-runtime-cpudetect --disable-gray --disable-swscale-alpha --disable-programs --disable-ffmpeg --disable-ffplay --disable-ffprobe --disable-ffserver --disable-doc --disable-htmlpages --disable-manpages --disable-podpages --disable-txtpages --disable-avdevice --enable-avcodec --enable-avformat --enable-avutil --enable-swresample --enable-swscale --disable-postproc --enable-avfilter --disable-avresample --enable-network --disable-d3d11va --disable-dxva2 --disable-vaapi --disable-vda --disable-vdpau --disable-videotoolbox --disable-encoders --enable-encoder=png --disable-decoders --enable-decoder=aac --enable-decoder=aac_latm --enable-decoder=flv --enable-decoder=h264 --enable-decoder='mp3*' --enable-decoder=vp6f --enable-decoder=flac --enable-decoder=mpeg4 --enable-decoder=wavpack --enable-decoder=wav --enable-decoder=pcm_s16le --disable-hwaccels --disable-muxers --enable-muxer=mp4 --disable-demuxers --enable-demuxer=aac --enable-demuxer=concat --enable-demuxer=data --enable-demuxer=flv --enable-demuxer=hls --enable-demuxer=live_flv --enable-demuxer=mov --enable-demuxer=mp3 --enable-demuxer=mpegps --enable-demuxer=mpegts --enable-demuxer=mpegvideo --enable-demuxer=flac --enable-demuxer=hevc --enable-demuxer=wav --disable-parsers --enable-parser=aac --enable-parser=aac_latm --enable-parser=h264 --enable-parser=flac --enable-parser=hevc --enable-bsfs --disable-bsf=chomp --disable-bsf=dca_core --disable-bsf=dump_extradata --disable-bsf=hevc_mp4toannexb --disable-bsf=imx_dump_header --disable-bsf=mjpeg2jpeg --disable-bsf=mjpega_dump_header --disable-bsf=mov2textsub --disable-bsf=mp3_header_decompress --disable-bsf=mpeg4_unpack_bframes --disable-bsf=noise --disable-bsf=remove_extradata --disable-bsf=text2movsub --disable-bsf=vp9_superframe --enable-protocols --enable-protocol=async --disable-protocol=bluray --disable-protocol=concat --disable-protocol=ffrtmpcrypt --enable-protocol=ffrtmphttp --disable-protocol=gopher --disable-protocol=icecast --disable-protocol='librtmp*' --disable-protocol=libssh --disable-protocol=md5 --disable-protocol=mmsh --disable-protocol=mmst --disable-protocol='rtmp*' --enable-protocol=rtmp --enable-protocol=rtmpt --disable-protocol=rtp --disable-protocol=sctp --disable-protocol=srtp --disable-protocol=subfile --disable-protocol=unix --disable-devices --disable-filters --disable-iconv --disable-audiotoolbox --disable-videotoolbox --enable-cross-compile --disable-stripping --arch=armv7 --target-os=darwin --enable-static --disable-shared --enable-pic --enable-neon --enable-optimizations --enable-debug --enable-small --disable-asm --prefix=/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/build/ffmpeg-armv7/output --enable-openssl --cc='xcrun -sdk iphoneos clang' --extra-cflags=' -arch armv7 -miphoneos-version-min=7.0 -I/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/include -DQCLOUDSSL' --extra-cxxflags=' -arch armv7 -miphoneos-version-min=7.0 -I/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/include -DQCLOUDSSL' --extra-ldflags=' -arch armv7 -miphoneos-version-min=7.0 -arch armv7 -miphoneos-version-min=7.0 -I/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/include -DQCLOUDSSL -L/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/lib -lboringssl'" +#define FFMPEG_LICENSE "LGPL version 2.1 or later" +#define CONFIG_THIS_YEAR 2017 +#define FFMPEG_DATADIR "/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/build/ffmpeg-armv7/output/share/ffmpeg" +#define AVCONV_DATADIR "/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/build/ffmpeg-armv7/output/share/ffmpeg" +#define CC_IDENT "Apple LLVM version 10.0.1 (clang-1001.0.46.4)" +#define av_restrict restrict +#define EXTERN_PREFIX "_" +#define EXTERN_ASM _ +#define BUILDSUF "" +#define SLIBSUF ".dylib" +#define HAVE_MMX2 HAVE_MMXEXT +#define SWS_MAX_FILTER_SIZE 256 +#define ARCH_AARCH64 0 +#define ARCH_ALPHA 0 +#define ARCH_ARM 0 +#define ARCH_AVR32 0 +#define ARCH_AVR32_AP 0 +#define ARCH_AVR32_UC 0 +#define ARCH_BFIN 0 +#define ARCH_IA64 0 +#define ARCH_M68K 0 +#define ARCH_MIPS 0 +#define ARCH_MIPS64 0 +#define ARCH_PARISC 0 +#define ARCH_PPC 0 +#define ARCH_PPC64 0 +#define ARCH_S390 0 +#define ARCH_SH4 0 +#define ARCH_SPARC 0 +#define ARCH_SPARC64 0 +#define ARCH_TILEGX 0 +#define ARCH_TILEPRO 0 +#define ARCH_TOMI 0 +#define ARCH_X86 0 +#define ARCH_X86_32 0 +#define ARCH_X86_64 0 +#define HAVE_ARMV5TE 0 +#define HAVE_ARMV6 0 +#define HAVE_ARMV6T2 0 +#define HAVE_ARMV8 0 +#define HAVE_NEON 0 +#define HAVE_VFP 0 +#define HAVE_VFPV3 0 +#define HAVE_SETEND 0 +#define HAVE_ALTIVEC 0 +#define HAVE_DCBZL 0 +#define HAVE_LDBRX 0 +#define HAVE_POWER8 0 +#define HAVE_PPC4XX 0 +#define HAVE_VSX 0 +#define HAVE_AESNI 0 +#define HAVE_AMD3DNOW 0 +#define HAVE_AMD3DNOWEXT 0 +#define HAVE_AVX 0 +#define HAVE_AVX2 0 +#define HAVE_FMA3 0 +#define HAVE_FMA4 0 +#define HAVE_MMX 0 +#define HAVE_MMXEXT 0 +#define HAVE_SSE 0 +#define HAVE_SSE2 0 +#define HAVE_SSE3 0 +#define HAVE_SSE4 0 +#define HAVE_SSE42 0 +#define HAVE_SSSE3 0 +#define HAVE_XOP 0 +#define HAVE_CPUNOP 0 +#define HAVE_I686 0 +#define HAVE_MIPSFPU 0 +#define HAVE_MIPS32R2 0 +#define HAVE_MIPS32R5 0 +#define HAVE_MIPS64R2 0 +#define HAVE_MIPS32R6 0 +#define HAVE_MIPS64R6 0 +#define HAVE_MIPSDSP 0 +#define HAVE_MIPSDSPR2 0 +#define HAVE_MSA 0 +#define HAVE_LOONGSON2 0 +#define HAVE_LOONGSON3 0 +#define HAVE_MMI 0 +#define HAVE_ARMV5TE_EXTERNAL 1 +#define HAVE_ARMV6_EXTERNAL 1 +#define HAVE_ARMV6T2_EXTERNAL 1 +#define HAVE_ARMV8_EXTERNAL 0 +#define HAVE_NEON_EXTERNAL 1 +#define HAVE_VFP_EXTERNAL 1 +#define HAVE_VFPV3_EXTERNAL 1 +#define HAVE_SETEND_EXTERNAL 1 +#define HAVE_ALTIVEC_EXTERNAL 0 +#define HAVE_DCBZL_EXTERNAL 0 +#define HAVE_LDBRX_EXTERNAL 0 +#define HAVE_POWER8_EXTERNAL 0 +#define HAVE_PPC4XX_EXTERNAL 0 +#define HAVE_VSX_EXTERNAL 0 +#define HAVE_AESNI_EXTERNAL 0 +#define HAVE_AMD3DNOW_EXTERNAL 0 +#define HAVE_AMD3DNOWEXT_EXTERNAL 0 +#define HAVE_AVX_EXTERNAL 0 +#define HAVE_AVX2_EXTERNAL 0 +#define HAVE_FMA3_EXTERNAL 0 +#define HAVE_FMA4_EXTERNAL 0 +#define HAVE_MMX_EXTERNAL 0 +#define HAVE_MMXEXT_EXTERNAL 0 +#define HAVE_SSE_EXTERNAL 0 +#define HAVE_SSE2_EXTERNAL 0 +#define HAVE_SSE3_EXTERNAL 0 +#define HAVE_SSE4_EXTERNAL 0 +#define HAVE_SSE42_EXTERNAL 0 +#define HAVE_SSSE3_EXTERNAL 0 +#define HAVE_XOP_EXTERNAL 0 +#define HAVE_CPUNOP_EXTERNAL 0 +#define HAVE_I686_EXTERNAL 0 +#define HAVE_MIPSFPU_EXTERNAL 0 +#define HAVE_MIPS32R2_EXTERNAL 0 +#define HAVE_MIPS32R5_EXTERNAL 0 +#define HAVE_MIPS64R2_EXTERNAL 0 +#define HAVE_MIPS32R6_EXTERNAL 0 +#define HAVE_MIPS64R6_EXTERNAL 0 +#define HAVE_MIPSDSP_EXTERNAL 0 +#define HAVE_MIPSDSPR2_EXTERNAL 0 +#define HAVE_MSA_EXTERNAL 0 +#define HAVE_LOONGSON2_EXTERNAL 0 +#define HAVE_LOONGSON3_EXTERNAL 0 +#define HAVE_MMI_EXTERNAL 0 +#define HAVE_ARMV5TE_INLINE 1 +#define HAVE_ARMV6_INLINE 1 +#define HAVE_ARMV6T2_INLINE 1 +#define HAVE_ARMV8_INLINE 0 +#define HAVE_NEON_INLINE 1 +#define HAVE_VFP_INLINE 1 +#define HAVE_VFPV3_INLINE 1 +#define HAVE_SETEND_INLINE 1 +#define HAVE_ALTIVEC_INLINE 0 +#define HAVE_DCBZL_INLINE 0 +#define HAVE_LDBRX_INLINE 0 +#define HAVE_POWER8_INLINE 0 +#define HAVE_PPC4XX_INLINE 0 +#define HAVE_VSX_INLINE 0 +#define HAVE_AESNI_INLINE 0 +#define HAVE_AMD3DNOW_INLINE 0 +#define HAVE_AMD3DNOWEXT_INLINE 0 +#define HAVE_AVX_INLINE 0 +#define HAVE_AVX2_INLINE 0 +#define HAVE_FMA3_INLINE 0 +#define HAVE_FMA4_INLINE 0 +#define HAVE_MMX_INLINE 0 +#define HAVE_MMXEXT_INLINE 0 +#define HAVE_SSE_INLINE 0 +#define HAVE_SSE2_INLINE 0 +#define HAVE_SSE3_INLINE 0 +#define HAVE_SSE4_INLINE 0 +#define HAVE_SSE42_INLINE 0 +#define HAVE_SSSE3_INLINE 0 +#define HAVE_XOP_INLINE 0 +#define HAVE_CPUNOP_INLINE 0 +#define HAVE_I686_INLINE 0 +#define HAVE_MIPSFPU_INLINE 0 +#define HAVE_MIPS32R2_INLINE 0 +#define HAVE_MIPS32R5_INLINE 0 +#define HAVE_MIPS64R2_INLINE 0 +#define HAVE_MIPS32R6_INLINE 0 +#define HAVE_MIPS64R6_INLINE 0 +#define HAVE_MIPSDSP_INLINE 0 +#define HAVE_MIPSDSPR2_INLINE 0 +#define HAVE_MSA_INLINE 0 +#define HAVE_LOONGSON2_INLINE 0 +#define HAVE_LOONGSON3_INLINE 0 +#define HAVE_MMI_INLINE 0 +#define HAVE_ALIGNED_STACK 0 +#define HAVE_FAST_64BIT 0 +#define HAVE_FAST_CLZ 1 +#define HAVE_FAST_CMOV 0 +#define HAVE_LOCAL_ALIGNED_8 0 +#define HAVE_LOCAL_ALIGNED_16 0 +#define HAVE_LOCAL_ALIGNED_32 0 +#define HAVE_SIMD_ALIGN_16 0 +#define HAVE_SIMD_ALIGN_32 0 +#define HAVE_ATOMICS_GCC 1 +#define HAVE_ATOMICS_SUNCC 0 +#define HAVE_ATOMICS_WIN32 0 +#define HAVE_ATOMIC_CAS_PTR 0 +#define HAVE_MACHINE_RW_BARRIER 0 +#define HAVE_MEMORYBARRIER 0 +#define HAVE_MM_EMPTY 0 +#define HAVE_RDTSC 0 +#define HAVE_SARESTART 1 +#define HAVE_SEM_TIMEDWAIT 0 +#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1 +#define HAVE_CABS 1 +#define HAVE_CEXP 1 +#define HAVE_INLINE_ASM 1 +#define HAVE_SYMVER 1 +#define HAVE_YASM 0 +#define HAVE_BIGENDIAN 0 +#define HAVE_FAST_UNALIGNED 1 +#define HAVE_ALSA_ASOUNDLIB_H 0 +#define HAVE_ALTIVEC_H 0 +#define HAVE_ARPA_INET_H 1 +#define HAVE_ASM_TYPES_H 0 +#define HAVE_CDIO_PARANOIA_H 0 +#define HAVE_CDIO_PARANOIA_PARANOIA_H 0 +#define HAVE_CUDA_H 0 +#define HAVE_DISPATCH_DISPATCH_H 1 +#define HAVE_DEV_BKTR_IOCTL_BT848_H 0 +#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0 +#define HAVE_DEV_IC_BT8XX_H 0 +#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0 +#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0 +#define HAVE_DIRECT_H 0 +#define HAVE_DIRENT_H 1 +#define HAVE_DLFCN_H 1 +#define HAVE_D3D11_H 0 +#define HAVE_DXVA_H 0 +#define HAVE_ES2_GL_H 0 +#define HAVE_GSM_H 0 +#define HAVE_IO_H 0 +#define HAVE_MACH_MACH_TIME_H 1 +#define HAVE_MACHINE_IOCTL_BT848_H 0 +#define HAVE_MACHINE_IOCTL_METEOR_H 0 +#define HAVE_MALLOC_H 0 +#define HAVE_OPENCV2_CORE_CORE_C_H 0 +#define HAVE_OPENJPEG_2_1_OPENJPEG_H 0 +#define HAVE_OPENJPEG_2_0_OPENJPEG_H 0 +#define HAVE_OPENJPEG_1_5_OPENJPEG_H 0 +#define HAVE_OPENGL_GL3_H 0 +#define HAVE_POLL_H 1 +#define HAVE_SNDIO_H 0 +#define HAVE_SOUNDCARD_H 0 +#define HAVE_STDATOMIC_H 1 +#define HAVE_SYS_MMAN_H 1 +#define HAVE_SYS_PARAM_H 1 +#define HAVE_SYS_RESOURCE_H 1 +#define HAVE_SYS_SELECT_H 1 +#define HAVE_SYS_SOUNDCARD_H 0 +#define HAVE_SYS_TIME_H 1 +#define HAVE_SYS_UN_H 1 +#define HAVE_SYS_VIDEOIO_H 0 +#define HAVE_TERMIOS_H 1 +#define HAVE_UDPLITE_H 0 +#define HAVE_UNISTD_H 1 +#define HAVE_VALGRIND_VALGRIND_H 0 +#define HAVE_WINDOWS_H 0 +#define HAVE_WINSOCK2_H 0 +#define HAVE_INTRINSICS_NEON 0 +#define HAVE_ATANF 1 +#define HAVE_ATAN2F 1 +#define HAVE_CBRT 1 +#define HAVE_CBRTF 1 +#define HAVE_COPYSIGN 1 +#define HAVE_COSF 1 +#define HAVE_ERF 1 +#define HAVE_EXP2 1 +#define HAVE_EXP2F 1 +#define HAVE_EXPF 1 +#define HAVE_HYPOT 1 +#define HAVE_ISFINITE 1 +#define HAVE_ISINF 1 +#define HAVE_ISNAN 1 +#define HAVE_LDEXPF 1 +#define HAVE_LLRINT 1 +#define HAVE_LLRINTF 1 +#define HAVE_LOG2 1 +#define HAVE_LOG2F 1 +#define HAVE_LOG10F 1 +#define HAVE_LRINT 1 +#define HAVE_LRINTF 1 +#define HAVE_POWF 1 +#define HAVE_RINT 1 +#define HAVE_ROUND 1 +#define HAVE_ROUNDF 1 +#define HAVE_SINF 1 +#define HAVE_TRUNC 1 +#define HAVE_TRUNCF 1 +#define HAVE_ACCESS 1 +#define HAVE_ALIGNED_MALLOC 0 +#define HAVE_ARC4RANDOM 1 +#define HAVE_CLOCK_GETTIME 1 +#define HAVE_CLOSESOCKET 0 +#define HAVE_COMMANDLINETOARGVW 0 +#define HAVE_COTASKMEMFREE 0 +#define HAVE_CRYPTGENRANDOM 0 +#define HAVE_DLOPEN 1 +#define HAVE_FCNTL 1 +#define HAVE_FLT_LIM 1 +#define HAVE_FORK 1 +#define HAVE_GETADDRINFO 1 +#define HAVE_GETHRTIME 0 +#define HAVE_GETOPT 1 +#define HAVE_GETPROCESSAFFINITYMASK 0 +#define HAVE_GETPROCESSMEMORYINFO 0 +#define HAVE_GETPROCESSTIMES 0 +#define HAVE_GETRUSAGE 1 +#define HAVE_GETSYSTEMTIMEASFILETIME 0 +#define HAVE_GETTIMEOFDAY 1 +#define HAVE_GLOB 1 +#define HAVE_GLXGETPROCADDRESS 0 +#define HAVE_GMTIME_R 1 +#define HAVE_INET_ATON 1 +#define HAVE_ISATTY 1 +#define HAVE_JACK_PORT_GET_LATENCY_RANGE 0 +#define HAVE_KBHIT 0 +#define HAVE_LOADLIBRARY 0 +#define HAVE_LOCALTIME_R 1 +#define HAVE_LSTAT 1 +#define HAVE_LZO1X_999_COMPRESS 0 +#define HAVE_MACH_ABSOLUTE_TIME 1 +#define HAVE_MAPVIEWOFFILE 0 +#define HAVE_MEMALIGN 0 +#define HAVE_MKSTEMP 1 +#define HAVE_MMAP 1 +#define HAVE_MPROTECT 1 +#define HAVE_NANOSLEEP 1 +#define HAVE_PEEKNAMEDPIPE 0 +#define HAVE_POSIX_MEMALIGN 1 +#define HAVE_PTHREAD_CANCEL 1 +#define HAVE_SCHED_GETAFFINITY 0 +#define HAVE_SETCONSOLETEXTATTRIBUTE 0 +#define HAVE_SETCONSOLECTRLHANDLER 0 +#define HAVE_SETMODE 0 +#define HAVE_SETRLIMIT 1 +#define HAVE_SLEEP 0 +#define HAVE_STRERROR_R 1 +#define HAVE_SYSCONF 1 +#define HAVE_SYSCTL 1 +#define HAVE_USLEEP 1 +#define HAVE_UTGETOSTYPEFROMSTRING 0 +#define HAVE_VIRTUALALLOC 0 +#define HAVE_WGLGETPROCADDRESS 0 +#define HAVE_PTHREADS 1 +#define HAVE_OS2THREADS 0 +#define HAVE_W32THREADS 0 +#define HAVE_AS_DN_DIRECTIVE 0 +#define HAVE_AS_FPU_DIRECTIVE 0 +#define HAVE_AS_FUNC 0 +#define HAVE_AS_OBJECT_ARCH 0 +#define HAVE_ASM_MOD_Q 1 +#define HAVE_ATTRIBUTE_MAY_ALIAS 1 +#define HAVE_ATTRIBUTE_PACKED 1 +#define HAVE_EBP_AVAILABLE 0 +#define HAVE_EBX_AVAILABLE 0 +#define HAVE_GNU_AS 0 +#define HAVE_GNU_WINDRES 0 +#define HAVE_IBM_ASM 0 +#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 0 +#define HAVE_INLINE_ASM_LABELS 1 +#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1 +#define HAVE_PRAGMA_DEPRECATED 1 +#define HAVE_RSYNC_CONTIMEOUT 0 +#define HAVE_SYMVER_ASM_LABEL 1 +#define HAVE_SYMVER_GNU_ASM 0 +#define HAVE_VFP_ARGS 0 +#define HAVE_XFORM_ASM 0 +#define HAVE_XMM_CLOBBERS 0 +#define HAVE_CONDITION_VARIABLE_PTR 0 +#define HAVE_SOCKLEN_T 1 +#define HAVE_STRUCT_ADDRINFO 1 +#define HAVE_STRUCT_GROUP_SOURCE_REQ 1 +#define HAVE_STRUCT_IP_MREQ_SOURCE 1 +#define HAVE_STRUCT_IPV6_MREQ 1 +#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1 +#define HAVE_STRUCT_POLLFD 1 +#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1 +#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0 +#define HAVE_STRUCT_SOCKADDR_IN6 1 +#define HAVE_STRUCT_SOCKADDR_SA_LEN 1 +#define HAVE_STRUCT_SOCKADDR_STORAGE 1 +#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0 +#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0 +#define HAVE_ATOMICS_NATIVE 1 +#define HAVE_DOS_PATHS 0 +#define HAVE_DXVA2_LIB 0 +#define HAVE_DXVA2API_COBJ 0 +#define HAVE_LIBC_MSVCRT 0 +#define HAVE_LIBDC1394_1 0 +#define HAVE_LIBDC1394_2 0 +#define HAVE_MAKEINFO 1 +#define HAVE_MAKEINFO_HTML 0 +#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0 +#define HAVE_PERL 1 +#define HAVE_POD2MAN 1 +#define HAVE_SDL2 0 +#define HAVE_SECTION_DATA_REL_RO 0 +#define HAVE_TEXI2HTML 0 +#define HAVE_THREADS 1 +#define HAVE_VAAPI_DRM 0 +#define HAVE_VAAPI_X11 0 +#define HAVE_VDPAU_X11 0 +#define HAVE_WINRT 0 +#define HAVE_XLIB 1 +#define CONFIG_BSFS 1 +#define CONFIG_DECODERS 1 +#define CONFIG_ENCODERS 1 +#define CONFIG_HWACCELS 0 +#define CONFIG_PARSERS 1 +#define CONFIG_INDEVS 0 +#define CONFIG_OUTDEVS 0 +#define CONFIG_FILTERS 0 +#define CONFIG_DEMUXERS 1 +#define CONFIG_MUXERS 1 +#define CONFIG_PROTOCOLS 1 +#define CONFIG_DOC 0 +#define CONFIG_HTMLPAGES 0 +#define CONFIG_MANPAGES 0 +#define CONFIG_PODPAGES 0 +#define CONFIG_TXTPAGES 0 +#define CONFIG_AVIO_DIR_CMD_EXAMPLE 1 +#define CONFIG_AVIO_READING_EXAMPLE 1 +#define CONFIG_DECODE_AUDIO_EXAMPLE 1 +#define CONFIG_DECODE_VIDEO_EXAMPLE 1 +#define CONFIG_DEMUXING_DECODING_EXAMPLE 1 +#define CONFIG_ENCODE_AUDIO_EXAMPLE 1 +#define CONFIG_ENCODE_VIDEO_EXAMPLE 1 +#define CONFIG_EXTRACT_MVS_EXAMPLE 1 +#define CONFIG_FILTER_AUDIO_EXAMPLE 1 +#define CONFIG_FILTERING_AUDIO_EXAMPLE 1 +#define CONFIG_FILTERING_VIDEO_EXAMPLE 1 +#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 1 +#define CONFIG_METADATA_EXAMPLE 1 +#define CONFIG_MUXING_EXAMPLE 1 +#define CONFIG_QSVDEC_EXAMPLE 0 +#define CONFIG_REMUXING_EXAMPLE 1 +#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 1 +#define CONFIG_SCALING_VIDEO_EXAMPLE 1 +#define CONFIG_TRANSCODE_AAC_EXAMPLE 1 +#define CONFIG_TRANSCODING_EXAMPLE 1 +#define CONFIG_BZLIB 0 +#define CONFIG_ICONV 0 +#define CONFIG_LIBXCB 0 +#define CONFIG_LIBXCB_SHM 0 +#define CONFIG_LIBXCB_SHAPE 0 +#define CONFIG_LIBXCB_XFIXES 0 +#define CONFIG_LZMA 0 +#define CONFIG_SCHANNEL 0 +#define CONFIG_SDL 0 +#define CONFIG_SDL2 0 +#define CONFIG_SECURETRANSPORT 0 +#define CONFIG_XLIB 1 +#define CONFIG_ZLIB 1 +#define CONFIG_AVISYNTH 0 +#define CONFIG_FREI0R 0 +#define CONFIG_LIBCDIO 0 +#define CONFIG_LIBRUBBERBAND 0 +#define CONFIG_LIBVIDSTAB 0 +#define CONFIG_LIBX264 0 +#define CONFIG_LIBX265 0 +#define CONFIG_LIBXAVS 0 +#define CONFIG_LIBXVID 0 +#define CONFIG_DECKLINK 0 +#define CONFIG_LIBFDK_AAC 0 +#define CONFIG_OPENSSL 1 +#define CONFIG_GMP 0 +#define CONFIG_LIBOPENCORE_AMRNB 0 +#define CONFIG_LIBOPENCORE_AMRWB 0 +#define CONFIG_LIBVO_AMRWBENC 0 +#define CONFIG_LIBSMBCLIENT 0 +#define CONFIG_CHROMAPRINT 0 +#define CONFIG_CRYSTALHD 0 +#define CONFIG_GCRYPT 0 +#define CONFIG_GNUTLS 0 +#define CONFIG_JNI 0 +#define CONFIG_LADSPA 0 +#define CONFIG_LIBASS 0 +#define CONFIG_LIBBLURAY 0 +#define CONFIG_LIBBS2B 0 +#define CONFIG_LIBCACA 0 +#define CONFIG_LIBCELT 0 +#define CONFIG_LIBDC1394 0 +#define CONFIG_LIBFLITE 0 +#define CONFIG_LIBFONTCONFIG 0 +#define CONFIG_LIBFREETYPE 0 +#define CONFIG_LIBFRIBIDI 0 +#define CONFIG_LIBGME 0 +#define CONFIG_LIBGSM 0 +#define CONFIG_LIBIEC61883 0 +#define CONFIG_LIBILBC 0 +#define CONFIG_LIBKVAZAAR 0 +#define CONFIG_LIBMODPLUG 0 +#define CONFIG_LIBMP3LAME 0 +#define CONFIG_LIBNUT 0 +#define CONFIG_LIBOPENCV 0 +#define CONFIG_LIBOPENH264 0 +#define CONFIG_LIBOPENJPEG 0 +#define CONFIG_LIBOPENMPT 0 +#define CONFIG_LIBOPUS 0 +#define CONFIG_LIBPULSE 0 +#define CONFIG_LIBRTMP 0 +#define CONFIG_LIBSCHROEDINGER 0 +#define CONFIG_LIBSHINE 0 +#define CONFIG_LIBSMBCLIENT 0 +#define CONFIG_LIBSNAPPY 0 +#define CONFIG_LIBSOXR 0 +#define CONFIG_LIBSPEEX 0 +#define CONFIG_LIBSSH 0 +#define CONFIG_LIBTESSERACT 0 +#define CONFIG_LIBTHEORA 0 +#define CONFIG_LIBTWOLAME 0 +#define CONFIG_LIBV4L2 0 +#define CONFIG_LIBVORBIS 0 +#define CONFIG_LIBVPX 0 +#define CONFIG_LIBWAVPACK 0 +#define CONFIG_LIBWEBP 0 +#define CONFIG_LIBZIMG 0 +#define CONFIG_LIBZMQ 0 +#define CONFIG_LIBZVBI 0 +#define CONFIG_MEDIACODEC 0 +#define CONFIG_NETCDF 0 +#define CONFIG_OPENAL 0 +#define CONFIG_OPENCL 0 +#define CONFIG_OPENGL 0 +#define CONFIG_VIDEOTOOLBOX 0 +#define CONFIG_AUDIOTOOLBOX 0 +#define CONFIG_CUDA 0 +#define CONFIG_CUVID 0 +#define CONFIG_D3D11VA 0 +#define CONFIG_DXVA2 0 +#define CONFIG_NVENC 0 +#define CONFIG_VAAPI 0 +#define CONFIG_VDA 0 +#define CONFIG_VDPAU 0 +#define CONFIG_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_XVMC 0 +#define CONFIG_LIBNPP 0 +#define CONFIG_LIBMFX 0 +#define CONFIG_MMAL 0 +#define CONFIG_OMX 0 +#define CONFIG_FTRAPV 0 +#define CONFIG_GRAY 0 +#define CONFIG_HARDCODED_TABLES 0 +#define CONFIG_OMX_RPI 0 +#define CONFIG_RUNTIME_CPUDETECT 1 +#define CONFIG_SAFE_BITSTREAM_READER 1 +#define CONFIG_SHARED 0 +#define CONFIG_SMALL 1 +#define CONFIG_STATIC 1 +#define CONFIG_SWSCALE_ALPHA 0 +#define CONFIG_GPL 0 +#define CONFIG_NONFREE 0 +#define CONFIG_VERSION3 0 +#define CONFIG_AVCODEC 1 +#define CONFIG_AVDEVICE 0 +#define CONFIG_AVFILTER 1 +#define CONFIG_AVFORMAT 1 +#define CONFIG_AVRESAMPLE 0 +#define CONFIG_AVUTIL 1 +#define CONFIG_POSTPROC 0 +#define CONFIG_SWRESAMPLE 1 +#define CONFIG_SWSCALE 1 +#define CONFIG_FFPLAY 0 +#define CONFIG_FFPROBE 0 +#define CONFIG_FFSERVER 0 +#define CONFIG_FFMPEG 0 +#define CONFIG_DCT 1 +#define CONFIG_DWT 0 +#define CONFIG_ERROR_RESILIENCE 1 +#define CONFIG_FAAN 1 +#define CONFIG_FAST_UNALIGNED 1 +#define CONFIG_FFT 1 +#define CONFIG_LSP 0 +#define CONFIG_LZO 0 +#define CONFIG_MDCT 1 +#define CONFIG_PIXELUTILS 0 +#define CONFIG_NETWORK 1 +#define CONFIG_RDFT 1 +#define CONFIG_FONTCONFIG 0 +#define CONFIG_MEMORY_POISONING 0 +#define CONFIG_NEON_CLOBBER_TEST 0 +#define CONFIG_PIC 1 +#define CONFIG_RAISE_MAJOR 0 +#define CONFIG_THUMB 1 +#define CONFIG_VALGRIND_BACKTRACE 0 +#define CONFIG_XMM_CLOBBER_TEST 0 +#define CONFIG_AANDCTTABLES 0 +#define CONFIG_AC3DSP 0 +#define CONFIG_AUDIO_FRAME_QUEUE 0 +#define CONFIG_AUDIODSP 0 +#define CONFIG_BLOCKDSP 1 +#define CONFIG_BSWAPDSP 0 +#define CONFIG_CABAC 1 +#define CONFIG_DIRAC_PARSE 0 +#define CONFIG_DVPROFILE 0 +#define CONFIG_EXIF 0 +#define CONFIG_FAANDCT 1 +#define CONFIG_FAANIDCT 1 +#define CONFIG_FDCTDSP 1 +#define CONFIG_FLACDSP 1 +#define CONFIG_FMTCONVERT 0 +#define CONFIG_FRAME_THREAD_ENCODER 1 +#define CONFIG_G722DSP 0 +#define CONFIG_GOLOMB 1 +#define CONFIG_GPLV3 0 +#define CONFIG_H263DSP 1 +#define CONFIG_H264CHROMA 1 +#define CONFIG_H264DSP 1 +#define CONFIG_H264PARSE 1 +#define CONFIG_H264PRED 1 +#define CONFIG_H264QPEL 1 +#define CONFIG_HPELDSP 1 +#define CONFIG_HUFFMAN 1 +#define CONFIG_HUFFYUVDSP 0 +#define CONFIG_HUFFYUVENCDSP 0 +#define CONFIG_IDCTDSP 1 +#define CONFIG_IIRFILTER 0 +#define CONFIG_MDCT15 1 +#define CONFIG_INTRAX8 0 +#define CONFIG_ISO_MEDIA 1 +#define CONFIG_IVIDSP 0 +#define CONFIG_JPEGTABLES 0 +#define CONFIG_LGPLV3 0 +#define CONFIG_LIBX262 0 +#define CONFIG_LLAUDDSP 0 +#define CONFIG_LLVIDDSP 0 +#define CONFIG_LLVIDENCDSP 1 +#define CONFIG_LPC 0 +#define CONFIG_LZF 0 +#define CONFIG_ME_CMP 1 +#define CONFIG_MPEG_ER 1 +#define CONFIG_MPEGAUDIO 1 +#define CONFIG_MPEGAUDIODSP 1 +#define CONFIG_MPEGVIDEO 1 +#define CONFIG_MPEGVIDEOENC 0 +#define CONFIG_MSS34DSP 0 +#define CONFIG_PIXBLOCKDSP 1 +#define CONFIG_QPELDSP 1 +#define CONFIG_QSV 0 +#define CONFIG_QSVDEC 0 +#define CONFIG_QSVENC 0 +#define CONFIG_RANGECODER 0 +#define CONFIG_RIFFDEC 1 +#define CONFIG_RIFFENC 1 +#define CONFIG_RTPDEC 0 +#define CONFIG_RTPENC_CHAIN 1 +#define CONFIG_RV34DSP 0 +#define CONFIG_SINEWIN 1 +#define CONFIG_SNAPPY 0 +#define CONFIG_SRTP 0 +#define CONFIG_STARTCODE 1 +#define CONFIG_TEXTUREDSP 0 +#define CONFIG_TEXTUREDSPENC 0 +#define CONFIG_TPELDSP 0 +#define CONFIG_VAAPI_ENCODE 0 +#define CONFIG_VC1DSP 0 +#define CONFIG_VIDEODSP 1 +#define CONFIG_VP3DSP 1 +#define CONFIG_VP56DSP 1 +#define CONFIG_VP8DSP 0 +#define CONFIG_VT_BT2020 0 +#define CONFIG_WMA_FREQS 0 +#define CONFIG_WMV2DSP 0 +#define CONFIG_AAC_ADTSTOASC_BSF 1 +#define CONFIG_CHOMP_BSF 0 +#define CONFIG_DUMP_EXTRADATA_BSF 0 +#define CONFIG_DCA_CORE_BSF 0 +#define CONFIG_EXTRACT_EXTRADATA_BSF 1 +#define CONFIG_H264_MP4TOANNEXB_BSF 1 +#define CONFIG_HEVC_MP4TOANNEXB_BSF 0 +#define CONFIG_IMX_DUMP_HEADER_BSF 0 +#define CONFIG_MJPEG2JPEG_BSF 0 +#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0 +#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0 +#define CONFIG_MPEG4_UNPACK_BFRAMES_BSF 0 +#define CONFIG_MOV2TEXTSUB_BSF 0 +#define CONFIG_NOISE_BSF 0 +#define CONFIG_REMOVE_EXTRADATA_BSF 0 +#define CONFIG_TEXT2MOVSUB_BSF 0 +#define CONFIG_VP9_SUPERFRAME_BSF 0 +#define CONFIG_AASC_DECODER 0 +#define CONFIG_AIC_DECODER 0 +#define CONFIG_ALIAS_PIX_DECODER 0 +#define CONFIG_AMV_DECODER 0 +#define CONFIG_ANM_DECODER 0 +#define CONFIG_ANSI_DECODER 0 +#define CONFIG_APNG_DECODER 0 +#define CONFIG_ASV1_DECODER 0 +#define CONFIG_ASV2_DECODER 0 +#define CONFIG_AURA_DECODER 0 +#define CONFIG_AURA2_DECODER 0 +#define CONFIG_AVRP_DECODER 0 +#define CONFIG_AVRN_DECODER 0 +#define CONFIG_AVS_DECODER 0 +#define CONFIG_AVUI_DECODER 0 +#define CONFIG_AYUV_DECODER 0 +#define CONFIG_BETHSOFTVID_DECODER 0 +#define CONFIG_BFI_DECODER 0 +#define CONFIG_BINK_DECODER 0 +#define CONFIG_BMP_DECODER 0 +#define CONFIG_BMV_VIDEO_DECODER 0 +#define CONFIG_BRENDER_PIX_DECODER 0 +#define CONFIG_C93_DECODER 0 +#define CONFIG_CAVS_DECODER 0 +#define CONFIG_CDGRAPHICS_DECODER 0 +#define CONFIG_CDXL_DECODER 0 +#define CONFIG_CFHD_DECODER 0 +#define CONFIG_CINEPAK_DECODER 0 +#define CONFIG_CLEARVIDEO_DECODER 0 +#define CONFIG_CLJR_DECODER 0 +#define CONFIG_CLLC_DECODER 0 +#define CONFIG_COMFORTNOISE_DECODER 0 +#define CONFIG_CPIA_DECODER 0 +#define CONFIG_CSCD_DECODER 0 +#define CONFIG_CYUV_DECODER 0 +#define CONFIG_DDS_DECODER 0 +#define CONFIG_DFA_DECODER 0 +#define CONFIG_DIRAC_DECODER 0 +#define CONFIG_DNXHD_DECODER 0 +#define CONFIG_DPX_DECODER 0 +#define CONFIG_DSICINVIDEO_DECODER 0 +#define CONFIG_DVAUDIO_DECODER 0 +#define CONFIG_DVVIDEO_DECODER 0 +#define CONFIG_DXA_DECODER 0 +#define CONFIG_DXTORY_DECODER 0 +#define CONFIG_DXV_DECODER 0 +#define CONFIG_EACMV_DECODER 0 +#define CONFIG_EAMAD_DECODER 0 +#define CONFIG_EATGQ_DECODER 0 +#define CONFIG_EATGV_DECODER 0 +#define CONFIG_EATQI_DECODER 0 +#define CONFIG_EIGHTBPS_DECODER 0 +#define CONFIG_EIGHTSVX_EXP_DECODER 0 +#define CONFIG_EIGHTSVX_FIB_DECODER 0 +#define CONFIG_ESCAPE124_DECODER 0 +#define CONFIG_ESCAPE130_DECODER 0 +#define CONFIG_EXR_DECODER 0 +#define CONFIG_FFV1_DECODER 0 +#define CONFIG_FFVHUFF_DECODER 0 +#define CONFIG_FIC_DECODER 0 +#define CONFIG_FLASHSV_DECODER 0 +#define CONFIG_FLASHSV2_DECODER 0 +#define CONFIG_FLIC_DECODER 0 +#define CONFIG_FLV_DECODER 1 +#define CONFIG_FMVC_DECODER 0 +#define CONFIG_FOURXM_DECODER 0 +#define CONFIG_FRAPS_DECODER 0 +#define CONFIG_FRWU_DECODER 0 +#define CONFIG_G2M_DECODER 0 +#define CONFIG_GIF_DECODER 0 +#define CONFIG_H261_DECODER 0 +#define CONFIG_H263_DECODER 1 +#define CONFIG_H263I_DECODER 0 +#define CONFIG_H263P_DECODER 0 +#define CONFIG_H264_DECODER 1 +#define CONFIG_H264_CRYSTALHD_DECODER 0 +#define CONFIG_H264_MEDIACODEC_DECODER 0 +#define CONFIG_H264_MMAL_DECODER 0 +#define CONFIG_H264_QSV_DECODER 0 +#define CONFIG_H264_VDA_DECODER 0 +#define CONFIG_H264_VDPAU_DECODER 0 +#define CONFIG_HAP_DECODER 0 +#define CONFIG_HEVC_DECODER 0 +#define CONFIG_HEVC_QSV_DECODER 0 +#define CONFIG_HNM4_VIDEO_DECODER 0 +#define CONFIG_HQ_HQA_DECODER 0 +#define CONFIG_HQX_DECODER 0 +#define CONFIG_HUFFYUV_DECODER 0 +#define CONFIG_IDCIN_DECODER 0 +#define CONFIG_IFF_ILBM_DECODER 0 +#define CONFIG_INDEO2_DECODER 0 +#define CONFIG_INDEO3_DECODER 0 +#define CONFIG_INDEO4_DECODER 0 +#define CONFIG_INDEO5_DECODER 0 +#define CONFIG_INTERPLAY_VIDEO_DECODER 0 +#define CONFIG_JPEG2000_DECODER 0 +#define CONFIG_JPEGLS_DECODER 0 +#define CONFIG_JV_DECODER 0 +#define CONFIG_KGV1_DECODER 0 +#define CONFIG_KMVC_DECODER 0 +#define CONFIG_LAGARITH_DECODER 0 +#define CONFIG_LOCO_DECODER 0 +#define CONFIG_M101_DECODER 0 +#define CONFIG_MAGICYUV_DECODER 0 +#define CONFIG_MDEC_DECODER 0 +#define CONFIG_MIMIC_DECODER 0 +#define CONFIG_MJPEG_DECODER 0 +#define CONFIG_MJPEGB_DECODER 0 +#define CONFIG_MMVIDEO_DECODER 0 +#define CONFIG_MOTIONPIXELS_DECODER 0 +#define CONFIG_MPEG_XVMC_DECODER 0 +#define CONFIG_MPEG1VIDEO_DECODER 0 +#define CONFIG_MPEG2VIDEO_DECODER 0 +#define CONFIG_MPEG4_DECODER 1 +#define CONFIG_MPEG4_CRYSTALHD_DECODER 0 +#define CONFIG_MPEG4_MMAL_DECODER 0 +#define CONFIG_MPEG4_VDPAU_DECODER 0 +#define CONFIG_MPEGVIDEO_DECODER 0 +#define CONFIG_MPEG_VDPAU_DECODER 0 +#define CONFIG_MPEG1_VDPAU_DECODER 0 +#define CONFIG_MPEG2_MMAL_DECODER 0 +#define CONFIG_MPEG2_CRYSTALHD_DECODER 0 +#define CONFIG_MPEG2_QSV_DECODER 0 +#define CONFIG_MSA1_DECODER 0 +#define CONFIG_MSMPEG4V1_DECODER 0 +#define CONFIG_MSMPEG4V2_DECODER 0 +#define CONFIG_MSMPEG4V3_DECODER 0 +#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0 +#define CONFIG_MSRLE_DECODER 0 +#define CONFIG_MSS1_DECODER 0 +#define CONFIG_MSS2_DECODER 0 +#define CONFIG_MSVIDEO1_DECODER 0 +#define CONFIG_MSZH_DECODER 0 +#define CONFIG_MTS2_DECODER 0 +#define CONFIG_MVC1_DECODER 0 +#define CONFIG_MVC2_DECODER 0 +#define CONFIG_MXPEG_DECODER 0 +#define CONFIG_NUV_DECODER 0 +#define CONFIG_PAF_VIDEO_DECODER 0 +#define CONFIG_PAM_DECODER 0 +#define CONFIG_PBM_DECODER 0 +#define CONFIG_PCX_DECODER 0 +#define CONFIG_PGM_DECODER 0 +#define CONFIG_PGMYUV_DECODER 0 +#define CONFIG_PICTOR_DECODER 0 +#define CONFIG_PIXLET_DECODER 0 +#define CONFIG_PNG_DECODER 0 +#define CONFIG_PPM_DECODER 0 +#define CONFIG_PRORES_DECODER 0 +#define CONFIG_PRORES_LGPL_DECODER 0 +#define CONFIG_PSD_DECODER 0 +#define CONFIG_PTX_DECODER 0 +#define CONFIG_QDRAW_DECODER 0 +#define CONFIG_QPEG_DECODER 0 +#define CONFIG_QTRLE_DECODER 0 +#define CONFIG_R10K_DECODER 0 +#define CONFIG_R210_DECODER 0 +#define CONFIG_RAWVIDEO_DECODER 0 +#define CONFIG_RL2_DECODER 0 +#define CONFIG_ROQ_DECODER 0 +#define CONFIG_RPZA_DECODER 0 +#define CONFIG_RSCC_DECODER 0 +#define CONFIG_RV10_DECODER 0 +#define CONFIG_RV20_DECODER 0 +#define CONFIG_RV30_DECODER 0 +#define CONFIG_RV40_DECODER 0 +#define CONFIG_S302M_DECODER 0 +#define CONFIG_SANM_DECODER 0 +#define CONFIG_SCPR_DECODER 0 +#define CONFIG_SCREENPRESSO_DECODER 0 +#define CONFIG_SDX2_DPCM_DECODER 0 +#define CONFIG_SGI_DECODER 0 +#define CONFIG_SGIRLE_DECODER 0 +#define CONFIG_SHEERVIDEO_DECODER 0 +#define CONFIG_SMACKER_DECODER 0 +#define CONFIG_SMC_DECODER 0 +#define CONFIG_SMVJPEG_DECODER 0 +#define CONFIG_SNOW_DECODER 0 +#define CONFIG_SP5X_DECODER 0 +#define CONFIG_SPEEDHQ_DECODER 0 +#define CONFIG_SUNRAST_DECODER 0 +#define CONFIG_SVQ1_DECODER 0 +#define CONFIG_SVQ3_DECODER 0 +#define CONFIG_TARGA_DECODER 0 +#define CONFIG_TARGA_Y216_DECODER 0 +#define CONFIG_TDSC_DECODER 0 +#define CONFIG_THEORA_DECODER 0 +#define CONFIG_THP_DECODER 0 +#define CONFIG_TIERTEXSEQVIDEO_DECODER 0 +#define CONFIG_TIFF_DECODER 0 +#define CONFIG_TMV_DECODER 0 +#define CONFIG_TRUEMOTION1_DECODER 0 +#define CONFIG_TRUEMOTION2_DECODER 0 +#define CONFIG_TRUEMOTION2RT_DECODER 0 +#define CONFIG_TSCC_DECODER 0 +#define CONFIG_TSCC2_DECODER 0 +#define CONFIG_TXD_DECODER 0 +#define CONFIG_ULTI_DECODER 0 +#define CONFIG_UTVIDEO_DECODER 0 +#define CONFIG_V210_DECODER 0 +#define CONFIG_V210X_DECODER 0 +#define CONFIG_V308_DECODER 0 +#define CONFIG_V408_DECODER 0 +#define CONFIG_V410_DECODER 0 +#define CONFIG_VB_DECODER 0 +#define CONFIG_VBLE_DECODER 0 +#define CONFIG_VC1_DECODER 0 +#define CONFIG_VC1_CRYSTALHD_DECODER 0 +#define CONFIG_VC1_VDPAU_DECODER 0 +#define CONFIG_VC1IMAGE_DECODER 0 +#define CONFIG_VC1_MMAL_DECODER 0 +#define CONFIG_VC1_QSV_DECODER 0 +#define CONFIG_VCR1_DECODER 0 +#define CONFIG_VMDVIDEO_DECODER 0 +#define CONFIG_VMNC_DECODER 0 +#define CONFIG_VP3_DECODER 0 +#define CONFIG_VP5_DECODER 0 +#define CONFIG_VP6_DECODER 1 +#define CONFIG_VP6A_DECODER 0 +#define CONFIG_VP6F_DECODER 1 +#define CONFIG_VP7_DECODER 0 +#define CONFIG_VP8_DECODER 0 +#define CONFIG_VP9_DECODER 0 +#define CONFIG_VQA_DECODER 0 +#define CONFIG_WEBP_DECODER 0 +#define CONFIG_WMV1_DECODER 0 +#define CONFIG_WMV2_DECODER 0 +#define CONFIG_WMV3_DECODER 0 +#define CONFIG_WMV3_CRYSTALHD_DECODER 0 +#define CONFIG_WMV3_VDPAU_DECODER 0 +#define CONFIG_WMV3IMAGE_DECODER 0 +#define CONFIG_WNV1_DECODER 0 +#define CONFIG_XAN_WC3_DECODER 0 +#define CONFIG_XAN_WC4_DECODER 0 +#define CONFIG_XBM_DECODER 0 +#define CONFIG_XFACE_DECODER 0 +#define CONFIG_XL_DECODER 0 +#define CONFIG_XPM_DECODER 0 +#define CONFIG_XWD_DECODER 0 +#define CONFIG_Y41P_DECODER 0 +#define CONFIG_YLC_DECODER 0 +#define CONFIG_YOP_DECODER 0 +#define CONFIG_YUV4_DECODER 0 +#define CONFIG_ZERO12V_DECODER 0 +#define CONFIG_ZEROCODEC_DECODER 0 +#define CONFIG_ZLIB_DECODER 0 +#define CONFIG_ZMBV_DECODER 0 +#define CONFIG_AAC_DECODER 1 +#define CONFIG_AAC_FIXED_DECODER 0 +#define CONFIG_AAC_LATM_DECODER 1 +#define CONFIG_AC3_DECODER 0 +#define CONFIG_AC3_FIXED_DECODER 0 +#define CONFIG_ALAC_DECODER 0 +#define CONFIG_ALS_DECODER 0 +#define CONFIG_AMRNB_DECODER 0 +#define CONFIG_AMRWB_DECODER 0 +#define CONFIG_APE_DECODER 0 +#define CONFIG_ATRAC1_DECODER 0 +#define CONFIG_ATRAC3_DECODER 0 +#define CONFIG_ATRAC3AL_DECODER 0 +#define CONFIG_ATRAC3P_DECODER 0 +#define CONFIG_ATRAC3PAL_DECODER 0 +#define CONFIG_BINKAUDIO_DCT_DECODER 0 +#define CONFIG_BINKAUDIO_RDFT_DECODER 0 +#define CONFIG_BMV_AUDIO_DECODER 0 +#define CONFIG_COOK_DECODER 0 +#define CONFIG_DCA_DECODER 0 +#define CONFIG_DSD_LSBF_DECODER 0 +#define CONFIG_DSD_MSBF_DECODER 0 +#define CONFIG_DSD_LSBF_PLANAR_DECODER 0 +#define CONFIG_DSD_MSBF_PLANAR_DECODER 0 +#define CONFIG_DSICINAUDIO_DECODER 0 +#define CONFIG_DSS_SP_DECODER 0 +#define CONFIG_DST_DECODER 0 +#define CONFIG_EAC3_DECODER 0 +#define CONFIG_EVRC_DECODER 0 +#define CONFIG_FFWAVESYNTH_DECODER 0 +#define CONFIG_FLAC_DECODER 1 +#define CONFIG_G723_1_DECODER 0 +#define CONFIG_G729_DECODER 0 +#define CONFIG_GSM_DECODER 0 +#define CONFIG_GSM_MS_DECODER 0 +#define CONFIG_IAC_DECODER 0 +#define CONFIG_IMC_DECODER 0 +#define CONFIG_INTERPLAY_ACM_DECODER 0 +#define CONFIG_MACE3_DECODER 0 +#define CONFIG_MACE6_DECODER 0 +#define CONFIG_METASOUND_DECODER 0 +#define CONFIG_MLP_DECODER 0 +#define CONFIG_MP1_DECODER 0 +#define CONFIG_MP1FLOAT_DECODER 0 +#define CONFIG_MP2_DECODER 0 +#define CONFIG_MP2FLOAT_DECODER 0 +#define CONFIG_MP3_DECODER 1 +#define CONFIG_MP3FLOAT_DECODER 1 +#define CONFIG_MP3ADU_DECODER 1 +#define CONFIG_MP3ADUFLOAT_DECODER 1 +#define CONFIG_MP3ON4_DECODER 1 +#define CONFIG_MP3ON4FLOAT_DECODER 1 +#define CONFIG_MPC7_DECODER 0 +#define CONFIG_MPC8_DECODER 0 +#define CONFIG_NELLYMOSER_DECODER 0 +#define CONFIG_ON2AVC_DECODER 0 +#define CONFIG_OPUS_DECODER 0 +#define CONFIG_PAF_AUDIO_DECODER 0 +#define CONFIG_QCELP_DECODER 0 +#define CONFIG_QDM2_DECODER 0 +#define CONFIG_QDMC_DECODER 0 +#define CONFIG_RA_144_DECODER 0 +#define CONFIG_RA_288_DECODER 0 +#define CONFIG_RALF_DECODER 0 +#define CONFIG_SHORTEN_DECODER 0 +#define CONFIG_SIPR_DECODER 0 +#define CONFIG_SMACKAUD_DECODER 0 +#define CONFIG_SONIC_DECODER 0 +#define CONFIG_TAK_DECODER 0 +#define CONFIG_TRUEHD_DECODER 0 +#define CONFIG_TRUESPEECH_DECODER 0 +#define CONFIG_TTA_DECODER 0 +#define CONFIG_TWINVQ_DECODER 0 +#define CONFIG_VMDAUDIO_DECODER 0 +#define CONFIG_VORBIS_DECODER 0 +#define CONFIG_WAVPACK_DECODER 1 +#define CONFIG_WMALOSSLESS_DECODER 0 +#define CONFIG_WMAPRO_DECODER 0 +#define CONFIG_WMAV1_DECODER 0 +#define CONFIG_WMAV2_DECODER 0 +#define CONFIG_WMAVOICE_DECODER 0 +#define CONFIG_WS_SND1_DECODER 0 +#define CONFIG_XMA1_DECODER 0 +#define CONFIG_XMA2_DECODER 0 +#define CONFIG_PCM_ALAW_DECODER 0 +#define CONFIG_PCM_BLURAY_DECODER 0 +#define CONFIG_PCM_DVD_DECODER 0 +#define CONFIG_PCM_F16LE_DECODER 0 +#define CONFIG_PCM_F24LE_DECODER 0 +#define CONFIG_PCM_F32BE_DECODER 0 +#define CONFIG_PCM_F32LE_DECODER 0 +#define CONFIG_PCM_F64BE_DECODER 0 +#define CONFIG_PCM_F64LE_DECODER 0 +#define CONFIG_PCM_LXF_DECODER 0 +#define CONFIG_PCM_MULAW_DECODER 0 +#define CONFIG_PCM_S8_DECODER 0 +#define CONFIG_PCM_S8_PLANAR_DECODER 0 +#define CONFIG_PCM_S16BE_DECODER 0 +#define CONFIG_PCM_S16BE_PLANAR_DECODER 0 +#define CONFIG_PCM_S16LE_DECODER 1 +#define CONFIG_PCM_S16LE_PLANAR_DECODER 0 +#define CONFIG_PCM_S24BE_DECODER 0 +#define CONFIG_PCM_S24DAUD_DECODER 0 +#define CONFIG_PCM_S24LE_DECODER 0 +#define CONFIG_PCM_S24LE_PLANAR_DECODER 0 +#define CONFIG_PCM_S32BE_DECODER 0 +#define CONFIG_PCM_S32LE_DECODER 0 +#define CONFIG_PCM_S32LE_PLANAR_DECODER 0 +#define CONFIG_PCM_S64BE_DECODER 0 +#define CONFIG_PCM_S64LE_DECODER 0 +#define CONFIG_PCM_U8_DECODER 0 +#define CONFIG_PCM_U16BE_DECODER 0 +#define CONFIG_PCM_U16LE_DECODER 0 +#define CONFIG_PCM_U24BE_DECODER 0 +#define CONFIG_PCM_U24LE_DECODER 0 +#define CONFIG_PCM_U32BE_DECODER 0 +#define CONFIG_PCM_U32LE_DECODER 0 +#define CONFIG_PCM_ZORK_DECODER 0 +#define CONFIG_INTERPLAY_DPCM_DECODER 0 +#define CONFIG_ROQ_DPCM_DECODER 0 +#define CONFIG_SOL_DPCM_DECODER 0 +#define CONFIG_XAN_DPCM_DECODER 0 +#define CONFIG_ADPCM_4XM_DECODER 0 +#define CONFIG_ADPCM_ADX_DECODER 0 +#define CONFIG_ADPCM_AFC_DECODER 0 +#define CONFIG_ADPCM_AICA_DECODER 0 +#define CONFIG_ADPCM_CT_DECODER 0 +#define CONFIG_ADPCM_DTK_DECODER 0 +#define CONFIG_ADPCM_EA_DECODER 0 +#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0 +#define CONFIG_ADPCM_EA_R1_DECODER 0 +#define CONFIG_ADPCM_EA_R2_DECODER 0 +#define CONFIG_ADPCM_EA_R3_DECODER 0 +#define CONFIG_ADPCM_EA_XAS_DECODER 0 +#define CONFIG_ADPCM_G722_DECODER 0 +#define CONFIG_ADPCM_G726_DECODER 0 +#define CONFIG_ADPCM_G726LE_DECODER 0 +#define CONFIG_ADPCM_IMA_AMV_DECODER 0 +#define CONFIG_ADPCM_IMA_APC_DECODER 0 +#define CONFIG_ADPCM_IMA_DAT4_DECODER 0 +#define CONFIG_ADPCM_IMA_DK3_DECODER 0 +#define CONFIG_ADPCM_IMA_DK4_DECODER 0 +#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0 +#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0 +#define CONFIG_ADPCM_IMA_ISS_DECODER 0 +#define CONFIG_ADPCM_IMA_OKI_DECODER 0 +#define CONFIG_ADPCM_IMA_QT_DECODER 0 +#define CONFIG_ADPCM_IMA_RAD_DECODER 0 +#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0 +#define CONFIG_ADPCM_IMA_WAV_DECODER 0 +#define CONFIG_ADPCM_IMA_WS_DECODER 0 +#define CONFIG_ADPCM_MS_DECODER 0 +#define CONFIG_ADPCM_MTAF_DECODER 0 +#define CONFIG_ADPCM_PSX_DECODER 0 +#define CONFIG_ADPCM_SBPRO_2_DECODER 0 +#define CONFIG_ADPCM_SBPRO_3_DECODER 0 +#define CONFIG_ADPCM_SBPRO_4_DECODER 0 +#define CONFIG_ADPCM_SWF_DECODER 0 +#define CONFIG_ADPCM_THP_DECODER 0 +#define CONFIG_ADPCM_THP_LE_DECODER 0 +#define CONFIG_ADPCM_VIMA_DECODER 0 +#define CONFIG_ADPCM_XA_DECODER 0 +#define CONFIG_ADPCM_YAMAHA_DECODER 0 +#define CONFIG_SSA_DECODER 0 +#define CONFIG_ASS_DECODER 0 +#define CONFIG_CCAPTION_DECODER 0 +#define CONFIG_DVBSUB_DECODER 0 +#define CONFIG_DVDSUB_DECODER 0 +#define CONFIG_JACOSUB_DECODER 0 +#define CONFIG_MICRODVD_DECODER 0 +#define CONFIG_MOVTEXT_DECODER 0 +#define CONFIG_MPL2_DECODER 0 +#define CONFIG_PGSSUB_DECODER 0 +#define CONFIG_PJS_DECODER 0 +#define CONFIG_REALTEXT_DECODER 0 +#define CONFIG_SAMI_DECODER 0 +#define CONFIG_SRT_DECODER 0 +#define CONFIG_STL_DECODER 0 +#define CONFIG_SUBRIP_DECODER 0 +#define CONFIG_SUBVIEWER_DECODER 0 +#define CONFIG_SUBVIEWER1_DECODER 0 +#define CONFIG_TEXT_DECODER 0 +#define CONFIG_VPLAYER_DECODER 0 +#define CONFIG_WEBVTT_DECODER 0 +#define CONFIG_XSUB_DECODER 0 +#define CONFIG_AAC_AT_DECODER 0 +#define CONFIG_AC3_AT_DECODER 0 +#define CONFIG_ADPCM_IMA_QT_AT_DECODER 0 +#define CONFIG_ALAC_AT_DECODER 0 +#define CONFIG_AMR_NB_AT_DECODER 0 +#define CONFIG_EAC3_AT_DECODER 0 +#define CONFIG_GSM_MS_AT_DECODER 0 +#define CONFIG_ILBC_AT_DECODER 0 +#define CONFIG_MP1_AT_DECODER 0 +#define CONFIG_MP2_AT_DECODER 0 +#define CONFIG_MP3_AT_DECODER 0 +#define CONFIG_PCM_ALAW_AT_DECODER 0 +#define CONFIG_PCM_MULAW_AT_DECODER 0 +#define CONFIG_QDMC_AT_DECODER 0 +#define CONFIG_QDM2_AT_DECODER 0 +#define CONFIG_LIBCELT_DECODER 0 +#define CONFIG_LIBFDK_AAC_DECODER 0 +#define CONFIG_LIBGSM_DECODER 0 +#define CONFIG_LIBGSM_MS_DECODER 0 +#define CONFIG_LIBILBC_DECODER 0 +#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0 +#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0 +#define CONFIG_LIBOPENJPEG_DECODER 0 +#define CONFIG_LIBOPUS_DECODER 0 +#define CONFIG_LIBSCHROEDINGER_DECODER 0 +#define CONFIG_LIBSPEEX_DECODER 0 +#define CONFIG_LIBVORBIS_DECODER 0 +#define CONFIG_LIBVPX_VP8_DECODER 0 +#define CONFIG_LIBVPX_VP9_DECODER 0 +#define CONFIG_LIBZVBI_TELETEXT_DECODER 0 +#define CONFIG_BINTEXT_DECODER 0 +#define CONFIG_XBIN_DECODER 0 +#define CONFIG_IDF_DECODER 0 +#define CONFIG_LIBOPENH264_DECODER 0 +#define CONFIG_H264_CUVID_DECODER 0 +#define CONFIG_HEVC_CUVID_DECODER 0 +#define CONFIG_HEVC_MEDIACODEC_DECODER 0 +#define CONFIG_MJPEG_CUVID_DECODER 0 +#define CONFIG_MPEG1_CUVID_DECODER 0 +#define CONFIG_MPEG2_CUVID_DECODER 0 +#define CONFIG_MPEG4_CUVID_DECODER 0 +#define CONFIG_MPEG4_MEDIACODEC_DECODER 0 +#define CONFIG_VC1_CUVID_DECODER 0 +#define CONFIG_VP8_CUVID_DECODER 0 +#define CONFIG_VP8_MEDIACODEC_DECODER 0 +#define CONFIG_VP8_QSV_DECODER 0 +#define CONFIG_VP9_CUVID_DECODER 0 +#define CONFIG_VP9_MEDIACODEC_DECODER 0 +#define CONFIG_AA_DEMUXER 0 +#define CONFIG_AAC_DEMUXER 1 +#define CONFIG_AC3_DEMUXER 0 +#define CONFIG_ACM_DEMUXER 0 +#define CONFIG_ACT_DEMUXER 0 +#define CONFIG_ADF_DEMUXER 0 +#define CONFIG_ADP_DEMUXER 0 +#define CONFIG_ADS_DEMUXER 0 +#define CONFIG_ADX_DEMUXER 0 +#define CONFIG_AEA_DEMUXER 0 +#define CONFIG_AFC_DEMUXER 0 +#define CONFIG_AIFF_DEMUXER 0 +#define CONFIG_AIX_DEMUXER 0 +#define CONFIG_AMR_DEMUXER 0 +#define CONFIG_ANM_DEMUXER 0 +#define CONFIG_APC_DEMUXER 0 +#define CONFIG_APE_DEMUXER 0 +#define CONFIG_APNG_DEMUXER 0 +#define CONFIG_AQTITLE_DEMUXER 0 +#define CONFIG_ASF_DEMUXER 0 +#define CONFIG_ASF_O_DEMUXER 0 +#define CONFIG_ASS_DEMUXER 0 +#define CONFIG_AST_DEMUXER 0 +#define CONFIG_AU_DEMUXER 0 +#define CONFIG_AVI_DEMUXER 0 +#define CONFIG_AVISYNTH_DEMUXER 0 +#define CONFIG_AVR_DEMUXER 0 +#define CONFIG_AVS_DEMUXER 0 +#define CONFIG_BETHSOFTVID_DEMUXER 0 +#define CONFIG_BFI_DEMUXER 0 +#define CONFIG_BINTEXT_DEMUXER 0 +#define CONFIG_BINK_DEMUXER 0 +#define CONFIG_BIT_DEMUXER 0 +#define CONFIG_BMV_DEMUXER 0 +#define CONFIG_BFSTM_DEMUXER 0 +#define CONFIG_BRSTM_DEMUXER 0 +#define CONFIG_BOA_DEMUXER 0 +#define CONFIG_C93_DEMUXER 0 +#define CONFIG_CAF_DEMUXER 0 +#define CONFIG_CAVSVIDEO_DEMUXER 0 +#define CONFIG_CDG_DEMUXER 0 +#define CONFIG_CDXL_DEMUXER 0 +#define CONFIG_CINE_DEMUXER 0 +#define CONFIG_CONCAT_DEMUXER 1 +#define CONFIG_DATA_DEMUXER 1 +#define CONFIG_DAUD_DEMUXER 0 +#define CONFIG_DCSTR_DEMUXER 0 +#define CONFIG_DFA_DEMUXER 0 +#define CONFIG_DIRAC_DEMUXER 0 +#define CONFIG_DNXHD_DEMUXER 0 +#define CONFIG_DSF_DEMUXER 0 +#define CONFIG_DSICIN_DEMUXER 0 +#define CONFIG_DSS_DEMUXER 0 +#define CONFIG_DTS_DEMUXER 0 +#define CONFIG_DTSHD_DEMUXER 0 +#define CONFIG_DV_DEMUXER 0 +#define CONFIG_DVBSUB_DEMUXER 0 +#define CONFIG_DVBTXT_DEMUXER 0 +#define CONFIG_DXA_DEMUXER 0 +#define CONFIG_EA_DEMUXER 0 +#define CONFIG_EA_CDATA_DEMUXER 0 +#define CONFIG_EAC3_DEMUXER 0 +#define CONFIG_EPAF_DEMUXER 0 +#define CONFIG_FFM_DEMUXER 0 +#define CONFIG_FFMETADATA_DEMUXER 0 +#define CONFIG_FILMSTRIP_DEMUXER 0 +#define CONFIG_FLAC_DEMUXER 1 +#define CONFIG_FLIC_DEMUXER 0 +#define CONFIG_FLV_DEMUXER 1 +#define CONFIG_LIVE_FLV_DEMUXER 1 +#define CONFIG_FOURXM_DEMUXER 0 +#define CONFIG_FRM_DEMUXER 0 +#define CONFIG_FSB_DEMUXER 0 +#define CONFIG_G722_DEMUXER 0 +#define CONFIG_G723_1_DEMUXER 0 +#define CONFIG_G729_DEMUXER 0 +#define CONFIG_GENH_DEMUXER 0 +#define CONFIG_GIF_DEMUXER 0 +#define CONFIG_GSM_DEMUXER 0 +#define CONFIG_GXF_DEMUXER 0 +#define CONFIG_H261_DEMUXER 0 +#define CONFIG_H263_DEMUXER 0 +#define CONFIG_H264_DEMUXER 0 +#define CONFIG_HEVC_DEMUXER 1 +#define CONFIG_HLS_DEMUXER 1 +#define CONFIG_HNM_DEMUXER 0 +#define CONFIG_ICO_DEMUXER 0 +#define CONFIG_IDCIN_DEMUXER 0 +#define CONFIG_IDF_DEMUXER 0 +#define CONFIG_IFF_DEMUXER 0 +#define CONFIG_ILBC_DEMUXER 0 +#define CONFIG_IMAGE2_DEMUXER 0 +#define CONFIG_IMAGE2PIPE_DEMUXER 0 +#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0 +#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0 +#define CONFIG_INGENIENT_DEMUXER 0 +#define CONFIG_IPMOVIE_DEMUXER 0 +#define CONFIG_IRCAM_DEMUXER 0 +#define CONFIG_ISS_DEMUXER 0 +#define CONFIG_IV8_DEMUXER 0 +#define CONFIG_IVF_DEMUXER 0 +#define CONFIG_IVR_DEMUXER 0 +#define CONFIG_JACOSUB_DEMUXER 0 +#define CONFIG_JV_DEMUXER 0 +#define CONFIG_LMLM4_DEMUXER 0 +#define CONFIG_LOAS_DEMUXER 0 +#define CONFIG_LRC_DEMUXER 0 +#define CONFIG_LVF_DEMUXER 0 +#define CONFIG_LXF_DEMUXER 0 +#define CONFIG_M4V_DEMUXER 0 +#define CONFIG_MATROSKA_DEMUXER 0 +#define CONFIG_MGSTS_DEMUXER 0 +#define CONFIG_MICRODVD_DEMUXER 0 +#define CONFIG_MJPEG_DEMUXER 0 +#define CONFIG_MJPEG_2000_DEMUXER 0 +#define CONFIG_MLP_DEMUXER 0 +#define CONFIG_MLV_DEMUXER 0 +#define CONFIG_MM_DEMUXER 0 +#define CONFIG_MMF_DEMUXER 0 +#define CONFIG_MOV_DEMUXER 1 +#define CONFIG_MP3_DEMUXER 1 +#define CONFIG_MPC_DEMUXER 0 +#define CONFIG_MPC8_DEMUXER 0 +#define CONFIG_MPEGPS_DEMUXER 1 +#define CONFIG_MPEGTS_DEMUXER 1 +#define CONFIG_MPEGTSRAW_DEMUXER 0 +#define CONFIG_MPEGVIDEO_DEMUXER 1 +#define CONFIG_MPJPEG_DEMUXER 0 +#define CONFIG_MPL2_DEMUXER 0 +#define CONFIG_MPSUB_DEMUXER 0 +#define CONFIG_MSF_DEMUXER 0 +#define CONFIG_MSNWC_TCP_DEMUXER 0 +#define CONFIG_MTAF_DEMUXER 0 +#define CONFIG_MTV_DEMUXER 0 +#define CONFIG_MUSX_DEMUXER 0 +#define CONFIG_MV_DEMUXER 0 +#define CONFIG_MVI_DEMUXER 0 +#define CONFIG_MXF_DEMUXER 0 +#define CONFIG_MXG_DEMUXER 0 +#define CONFIG_NC_DEMUXER 0 +#define CONFIG_NISTSPHERE_DEMUXER 0 +#define CONFIG_NSV_DEMUXER 0 +#define CONFIG_NUT_DEMUXER 0 +#define CONFIG_NUV_DEMUXER 0 +#define CONFIG_OGG_DEMUXER 0 +#define CONFIG_OMA_DEMUXER 0 +#define CONFIG_PAF_DEMUXER 0 +#define CONFIG_PCM_ALAW_DEMUXER 0 +#define CONFIG_PCM_MULAW_DEMUXER 0 +#define CONFIG_PCM_F64BE_DEMUXER 0 +#define CONFIG_PCM_F64LE_DEMUXER 0 +#define CONFIG_PCM_F32BE_DEMUXER 0 +#define CONFIG_PCM_F32LE_DEMUXER 0 +#define CONFIG_PCM_S32BE_DEMUXER 0 +#define CONFIG_PCM_S32LE_DEMUXER 0 +#define CONFIG_PCM_S24BE_DEMUXER 0 +#define CONFIG_PCM_S24LE_DEMUXER 0 +#define CONFIG_PCM_S16BE_DEMUXER 0 +#define CONFIG_PCM_S16LE_DEMUXER 0 +#define CONFIG_PCM_S8_DEMUXER 0 +#define CONFIG_PCM_U32BE_DEMUXER 0 +#define CONFIG_PCM_U32LE_DEMUXER 0 +#define CONFIG_PCM_U24BE_DEMUXER 0 +#define CONFIG_PCM_U24LE_DEMUXER 0 +#define CONFIG_PCM_U16BE_DEMUXER 0 +#define CONFIG_PCM_U16LE_DEMUXER 0 +#define CONFIG_PCM_U8_DEMUXER 0 +#define CONFIG_PJS_DEMUXER 0 +#define CONFIG_PMP_DEMUXER 0 +#define CONFIG_PVA_DEMUXER 0 +#define CONFIG_PVF_DEMUXER 0 +#define CONFIG_QCP_DEMUXER 0 +#define CONFIG_R3D_DEMUXER 0 +#define CONFIG_RAWVIDEO_DEMUXER 0 +#define CONFIG_REALTEXT_DEMUXER 0 +#define CONFIG_REDSPARK_DEMUXER 0 +#define CONFIG_RL2_DEMUXER 0 +#define CONFIG_RM_DEMUXER 0 +#define CONFIG_ROQ_DEMUXER 0 +#define CONFIG_RPL_DEMUXER 0 +#define CONFIG_RSD_DEMUXER 0 +#define CONFIG_RSO_DEMUXER 0 +#define CONFIG_RTP_DEMUXER 0 +#define CONFIG_RTSP_DEMUXER 0 +#define CONFIG_SAMI_DEMUXER 0 +#define CONFIG_SAP_DEMUXER 0 +#define CONFIG_SBG_DEMUXER 0 +#define CONFIG_SCC_DEMUXER 0 +#define CONFIG_SDP_DEMUXER 0 +#define CONFIG_SDR2_DEMUXER 0 +#define CONFIG_SDS_DEMUXER 0 +#define CONFIG_SDX_DEMUXER 0 +#define CONFIG_SEGAFILM_DEMUXER 0 +#define CONFIG_SHORTEN_DEMUXER 0 +#define CONFIG_SIFF_DEMUXER 0 +#define CONFIG_SLN_DEMUXER 0 +#define CONFIG_SMACKER_DEMUXER 0 +#define CONFIG_SMJPEG_DEMUXER 0 +#define CONFIG_SMUSH_DEMUXER 0 +#define CONFIG_SOL_DEMUXER 0 +#define CONFIG_SOX_DEMUXER 0 +#define CONFIG_SPDIF_DEMUXER 0 +#define CONFIG_SRT_DEMUXER 0 +#define CONFIG_STR_DEMUXER 0 +#define CONFIG_STL_DEMUXER 0 +#define CONFIG_SUBVIEWER1_DEMUXER 0 +#define CONFIG_SUBVIEWER_DEMUXER 0 +#define CONFIG_SUP_DEMUXER 0 +#define CONFIG_SVAG_DEMUXER 0 +#define CONFIG_SWF_DEMUXER 0 +#define CONFIG_TAK_DEMUXER 0 +#define CONFIG_TEDCAPTIONS_DEMUXER 0 +#define CONFIG_THP_DEMUXER 0 +#define CONFIG_THREEDOSTR_DEMUXER 0 +#define CONFIG_TIERTEXSEQ_DEMUXER 0 +#define CONFIG_TMV_DEMUXER 0 +#define CONFIG_TRUEHD_DEMUXER 0 +#define CONFIG_TTA_DEMUXER 0 +#define CONFIG_TXD_DEMUXER 0 +#define CONFIG_TTY_DEMUXER 0 +#define CONFIG_V210_DEMUXER 0 +#define CONFIG_V210X_DEMUXER 0 +#define CONFIG_VAG_DEMUXER 0 +#define CONFIG_VC1_DEMUXER 0 +#define CONFIG_VC1T_DEMUXER 0 +#define CONFIG_VIVO_DEMUXER 0 +#define CONFIG_VMD_DEMUXER 0 +#define CONFIG_VOBSUB_DEMUXER 0 +#define CONFIG_VOC_DEMUXER 0 +#define CONFIG_VPK_DEMUXER 0 +#define CONFIG_VPLAYER_DEMUXER 0 +#define CONFIG_VQF_DEMUXER 0 +#define CONFIG_W64_DEMUXER 0 +#define CONFIG_WAV_DEMUXER 1 +#define CONFIG_WC3_DEMUXER 0 +#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0 +#define CONFIG_WEBVTT_DEMUXER 0 +#define CONFIG_WSAUD_DEMUXER 0 +#define CONFIG_WSD_DEMUXER 0 +#define CONFIG_WSVQA_DEMUXER 0 +#define CONFIG_WTV_DEMUXER 0 +#define CONFIG_WVE_DEMUXER 0 +#define CONFIG_WV_DEMUXER 0 +#define CONFIG_XA_DEMUXER 0 +#define CONFIG_XBIN_DEMUXER 0 +#define CONFIG_XMV_DEMUXER 0 +#define CONFIG_XVAG_DEMUXER 0 +#define CONFIG_XWMA_DEMUXER 0 +#define CONFIG_YOP_DEMUXER 0 +#define CONFIG_YUV4MPEGPIPE_DEMUXER 0 +#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_DDS_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PPM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PSD_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_QDRAW_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_XPM_PIPE_DEMUXER 0 +#define CONFIG_LIBGME_DEMUXER 0 +#define CONFIG_LIBMODPLUG_DEMUXER 0 +#define CONFIG_LIBNUT_DEMUXER 0 +#define CONFIG_LIBOPENMPT_DEMUXER 0 +#define CONFIG_A64MULTI_ENCODER 0 +#define CONFIG_A64MULTI5_ENCODER 0 +#define CONFIG_ALIAS_PIX_ENCODER 0 +#define CONFIG_AMV_ENCODER 0 +#define CONFIG_APNG_ENCODER 0 +#define CONFIG_ASV1_ENCODER 0 +#define CONFIG_ASV2_ENCODER 0 +#define CONFIG_AVRP_ENCODER 0 +#define CONFIG_AVUI_ENCODER 0 +#define CONFIG_AYUV_ENCODER 0 +#define CONFIG_BMP_ENCODER 0 +#define CONFIG_CINEPAK_ENCODER 0 +#define CONFIG_CLJR_ENCODER 0 +#define CONFIG_COMFORTNOISE_ENCODER 0 +#define CONFIG_DNXHD_ENCODER 0 +#define CONFIG_DPX_ENCODER 0 +#define CONFIG_DVVIDEO_ENCODER 0 +#define CONFIG_FFV1_ENCODER 0 +#define CONFIG_FFVHUFF_ENCODER 0 +#define CONFIG_FLASHSV_ENCODER 0 +#define CONFIG_FLASHSV2_ENCODER 0 +#define CONFIG_FLV_ENCODER 0 +#define CONFIG_GIF_ENCODER 0 +#define CONFIG_H261_ENCODER 0 +#define CONFIG_H263_ENCODER 0 +#define CONFIG_H263P_ENCODER 0 +#define CONFIG_HAP_ENCODER 0 +#define CONFIG_HUFFYUV_ENCODER 0 +#define CONFIG_JPEG2000_ENCODER 0 +#define CONFIG_JPEGLS_ENCODER 0 +#define CONFIG_LJPEG_ENCODER 0 +#define CONFIG_MJPEG_ENCODER 0 +#define CONFIG_MPEG1VIDEO_ENCODER 0 +#define CONFIG_MPEG2VIDEO_ENCODER 0 +#define CONFIG_MPEG4_ENCODER 0 +#define CONFIG_MSMPEG4V2_ENCODER 0 +#define CONFIG_MSMPEG4V3_ENCODER 0 +#define CONFIG_MSVIDEO1_ENCODER 0 +#define CONFIG_PAM_ENCODER 0 +#define CONFIG_PBM_ENCODER 0 +#define CONFIG_PCX_ENCODER 0 +#define CONFIG_PGM_ENCODER 0 +#define CONFIG_PGMYUV_ENCODER 0 +#define CONFIG_PNG_ENCODER 1 +#define CONFIG_PPM_ENCODER 0 +#define CONFIG_PRORES_ENCODER 0 +#define CONFIG_PRORES_AW_ENCODER 0 +#define CONFIG_PRORES_KS_ENCODER 0 +#define CONFIG_QTRLE_ENCODER 0 +#define CONFIG_R10K_ENCODER 0 +#define CONFIG_R210_ENCODER 0 +#define CONFIG_RAWVIDEO_ENCODER 0 +#define CONFIG_ROQ_ENCODER 0 +#define CONFIG_RV10_ENCODER 0 +#define CONFIG_RV20_ENCODER 0 +#define CONFIG_S302M_ENCODER 0 +#define CONFIG_SGI_ENCODER 0 +#define CONFIG_SNOW_ENCODER 0 +#define CONFIG_SUNRAST_ENCODER 0 +#define CONFIG_SVQ1_ENCODER 0 +#define CONFIG_TARGA_ENCODER 0 +#define CONFIG_TIFF_ENCODER 0 +#define CONFIG_UTVIDEO_ENCODER 0 +#define CONFIG_V210_ENCODER 0 +#define CONFIG_V308_ENCODER 0 +#define CONFIG_V408_ENCODER 0 +#define CONFIG_V410_ENCODER 0 +#define CONFIG_VC2_ENCODER 0 +#define CONFIG_WRAPPED_AVFRAME_ENCODER 0 +#define CONFIG_WMV1_ENCODER 0 +#define CONFIG_WMV2_ENCODER 0 +#define CONFIG_XBM_ENCODER 0 +#define CONFIG_XFACE_ENCODER 0 +#define CONFIG_XWD_ENCODER 0 +#define CONFIG_Y41P_ENCODER 0 +#define CONFIG_YUV4_ENCODER 0 +#define CONFIG_ZLIB_ENCODER 0 +#define CONFIG_ZMBV_ENCODER 0 +#define CONFIG_AAC_ENCODER 0 +#define CONFIG_AC3_ENCODER 0 +#define CONFIG_AC3_FIXED_ENCODER 0 +#define CONFIG_ALAC_ENCODER 0 +#define CONFIG_DCA_ENCODER 0 +#define CONFIG_EAC3_ENCODER 0 +#define CONFIG_FLAC_ENCODER 0 +#define CONFIG_G723_1_ENCODER 0 +#define CONFIG_MLP_ENCODER 0 +#define CONFIG_MP2_ENCODER 0 +#define CONFIG_MP2FIXED_ENCODER 0 +#define CONFIG_NELLYMOSER_ENCODER 0 +#define CONFIG_OPUS_ENCODER 0 +#define CONFIG_RA_144_ENCODER 0 +#define CONFIG_SONIC_ENCODER 0 +#define CONFIG_SONIC_LS_ENCODER 0 +#define CONFIG_TRUEHD_ENCODER 0 +#define CONFIG_TTA_ENCODER 0 +#define CONFIG_VORBIS_ENCODER 0 +#define CONFIG_WAVPACK_ENCODER 0 +#define CONFIG_WMAV1_ENCODER 0 +#define CONFIG_WMAV2_ENCODER 0 +#define CONFIG_PCM_ALAW_ENCODER 0 +#define CONFIG_PCM_F32BE_ENCODER 0 +#define CONFIG_PCM_F32LE_ENCODER 0 +#define CONFIG_PCM_F64BE_ENCODER 0 +#define CONFIG_PCM_F64LE_ENCODER 0 +#define CONFIG_PCM_MULAW_ENCODER 0 +#define CONFIG_PCM_S8_ENCODER 0 +#define CONFIG_PCM_S8_PLANAR_ENCODER 0 +#define CONFIG_PCM_S16BE_ENCODER 0 +#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S16LE_ENCODER 0 +#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S24BE_ENCODER 0 +#define CONFIG_PCM_S24DAUD_ENCODER 0 +#define CONFIG_PCM_S24LE_ENCODER 0 +#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S32BE_ENCODER 0 +#define CONFIG_PCM_S32LE_ENCODER 0 +#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S64BE_ENCODER 0 +#define CONFIG_PCM_S64LE_ENCODER 0 +#define CONFIG_PCM_U8_ENCODER 0 +#define CONFIG_PCM_U16BE_ENCODER 0 +#define CONFIG_PCM_U16LE_ENCODER 0 +#define CONFIG_PCM_U24BE_ENCODER 0 +#define CONFIG_PCM_U24LE_ENCODER 0 +#define CONFIG_PCM_U32BE_ENCODER 0 +#define CONFIG_PCM_U32LE_ENCODER 0 +#define CONFIG_ROQ_DPCM_ENCODER 0 +#define CONFIG_ADPCM_ADX_ENCODER 0 +#define CONFIG_ADPCM_G722_ENCODER 0 +#define CONFIG_ADPCM_G726_ENCODER 0 +#define CONFIG_ADPCM_IMA_QT_ENCODER 0 +#define CONFIG_ADPCM_IMA_WAV_ENCODER 0 +#define CONFIG_ADPCM_MS_ENCODER 0 +#define CONFIG_ADPCM_SWF_ENCODER 0 +#define CONFIG_ADPCM_YAMAHA_ENCODER 0 +#define CONFIG_SSA_ENCODER 0 +#define CONFIG_ASS_ENCODER 0 +#define CONFIG_DVBSUB_ENCODER 0 +#define CONFIG_DVDSUB_ENCODER 0 +#define CONFIG_MOVTEXT_ENCODER 0 +#define CONFIG_SRT_ENCODER 0 +#define CONFIG_SUBRIP_ENCODER 0 +#define CONFIG_TEXT_ENCODER 0 +#define CONFIG_WEBVTT_ENCODER 0 +#define CONFIG_XSUB_ENCODER 0 +#define CONFIG_AAC_AT_ENCODER 0 +#define CONFIG_ALAC_AT_ENCODER 0 +#define CONFIG_ILBC_AT_ENCODER 0 +#define CONFIG_PCM_ALAW_AT_ENCODER 0 +#define CONFIG_PCM_MULAW_AT_ENCODER 0 +#define CONFIG_LIBFDK_AAC_ENCODER 0 +#define CONFIG_LIBGSM_ENCODER 0 +#define CONFIG_LIBGSM_MS_ENCODER 0 +#define CONFIG_LIBILBC_ENCODER 0 +#define CONFIG_LIBMP3LAME_ENCODER 0 +#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0 +#define CONFIG_LIBOPENJPEG_ENCODER 0 +#define CONFIG_LIBOPUS_ENCODER 0 +#define CONFIG_LIBSCHROEDINGER_ENCODER 0 +#define CONFIG_LIBSHINE_ENCODER 0 +#define CONFIG_LIBSPEEX_ENCODER 0 +#define CONFIG_LIBTHEORA_ENCODER 0 +#define CONFIG_LIBTWOLAME_ENCODER 0 +#define CONFIG_LIBVO_AMRWBENC_ENCODER 0 +#define CONFIG_LIBVORBIS_ENCODER 0 +#define CONFIG_LIBVPX_VP8_ENCODER 0 +#define CONFIG_LIBVPX_VP9_ENCODER 0 +#define CONFIG_LIBWAVPACK_ENCODER 0 +#define CONFIG_LIBWEBP_ANIM_ENCODER 0 +#define CONFIG_LIBWEBP_ENCODER 0 +#define CONFIG_LIBX262_ENCODER 0 +#define CONFIG_LIBX264_ENCODER 0 +#define CONFIG_LIBX264RGB_ENCODER 0 +#define CONFIG_LIBX265_ENCODER 0 +#define CONFIG_LIBXAVS_ENCODER 0 +#define CONFIG_LIBXVID_ENCODER 0 +#define CONFIG_LIBOPENH264_ENCODER 0 +#define CONFIG_H264_NVENC_ENCODER 0 +#define CONFIG_H264_OMX_ENCODER 0 +#define CONFIG_H264_QSV_ENCODER 0 +#define CONFIG_H264_VAAPI_ENCODER 0 +#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0 +#define CONFIG_NVENC_ENCODER 0 +#define CONFIG_NVENC_H264_ENCODER 0 +#define CONFIG_NVENC_HEVC_ENCODER 0 +#define CONFIG_HEVC_NVENC_ENCODER 0 +#define CONFIG_HEVC_QSV_ENCODER 0 +#define CONFIG_HEVC_VAAPI_ENCODER 0 +#define CONFIG_LIBKVAZAAR_ENCODER 0 +#define CONFIG_MJPEG_VAAPI_ENCODER 0 +#define CONFIG_MPEG2_QSV_ENCODER 0 +#define CONFIG_MPEG2_VAAPI_ENCODER 0 +#define CONFIG_VP8_VAAPI_ENCODER 0 +#define CONFIG_ABENCH_FILTER 0 +#define CONFIG_ACOMPRESSOR_FILTER 0 +#define CONFIG_ACROSSFADE_FILTER 0 +#define CONFIG_ACRUSHER_FILTER 0 +#define CONFIG_ADELAY_FILTER 0 +#define CONFIG_AECHO_FILTER 0 +#define CONFIG_AEMPHASIS_FILTER 0 +#define CONFIG_AEVAL_FILTER 0 +#define CONFIG_AFADE_FILTER 0 +#define CONFIG_AFFTFILT_FILTER 0 +#define CONFIG_AFORMAT_FILTER 0 +#define CONFIG_AGATE_FILTER 0 +#define CONFIG_AINTERLEAVE_FILTER 0 +#define CONFIG_ALIMITER_FILTER 0 +#define CONFIG_ALLPASS_FILTER 0 +#define CONFIG_ALOOP_FILTER 0 +#define CONFIG_AMERGE_FILTER 0 +#define CONFIG_AMETADATA_FILTER 0 +#define CONFIG_AMIX_FILTER 0 +#define CONFIG_ANEQUALIZER_FILTER 0 +#define CONFIG_ANULL_FILTER 0 +#define CONFIG_APAD_FILTER 0 +#define CONFIG_APERMS_FILTER 0 +#define CONFIG_APHASER_FILTER 0 +#define CONFIG_APULSATOR_FILTER 0 +#define CONFIG_AREALTIME_FILTER 0 +#define CONFIG_ARESAMPLE_FILTER 0 +#define CONFIG_AREVERSE_FILTER 0 +#define CONFIG_ASELECT_FILTER 0 +#define CONFIG_ASENDCMD_FILTER 0 +#define CONFIG_ASETNSAMPLES_FILTER 0 +#define CONFIG_ASETPTS_FILTER 0 +#define CONFIG_ASETRATE_FILTER 0 +#define CONFIG_ASETTB_FILTER 0 +#define CONFIG_ASHOWINFO_FILTER 0 +#define CONFIG_ASIDEDATA_FILTER 0 +#define CONFIG_ASPLIT_FILTER 0 +#define CONFIG_ASTATS_FILTER 0 +#define CONFIG_ASTREAMSELECT_FILTER 0 +#define CONFIG_ATEMPO_FILTER 0 +#define CONFIG_ATRIM_FILTER 0 +#define CONFIG_AZMQ_FILTER 0 +#define CONFIG_BANDPASS_FILTER 0 +#define CONFIG_BANDREJECT_FILTER 0 +#define CONFIG_BASS_FILTER 0 +#define CONFIG_BIQUAD_FILTER 0 +#define CONFIG_BS2B_FILTER 0 +#define CONFIG_CHANNELMAP_FILTER 0 +#define CONFIG_CHANNELSPLIT_FILTER 0 +#define CONFIG_CHORUS_FILTER 0 +#define CONFIG_COMPAND_FILTER 0 +#define CONFIG_COMPENSATIONDELAY_FILTER 0 +#define CONFIG_CRYSTALIZER_FILTER 0 +#define CONFIG_DCSHIFT_FILTER 0 +#define CONFIG_DYNAUDNORM_FILTER 0 +#define CONFIG_EARWAX_FILTER 0 +#define CONFIG_EBUR128_FILTER 0 +#define CONFIG_EQUALIZER_FILTER 0 +#define CONFIG_EXTRASTEREO_FILTER 0 +#define CONFIG_FIREQUALIZER_FILTER 0 +#define CONFIG_FLANGER_FILTER 0 +#define CONFIG_HDCD_FILTER 0 +#define CONFIG_HIGHPASS_FILTER 0 +#define CONFIG_JOIN_FILTER 0 +#define CONFIG_LADSPA_FILTER 0 +#define CONFIG_LOUDNORM_FILTER 0 +#define CONFIG_LOWPASS_FILTER 0 +#define CONFIG_PAN_FILTER 0 +#define CONFIG_REPLAYGAIN_FILTER 0 +#define CONFIG_RESAMPLE_FILTER 0 +#define CONFIG_RUBBERBAND_FILTER 0 +#define CONFIG_SIDECHAINCOMPRESS_FILTER 0 +#define CONFIG_SIDECHAINGATE_FILTER 0 +#define CONFIG_SILENCEDETECT_FILTER 0 +#define CONFIG_SILENCEREMOVE_FILTER 0 +#define CONFIG_SOFALIZER_FILTER 0 +#define CONFIG_STEREOTOOLS_FILTER 0 +#define CONFIG_STEREOWIDEN_FILTER 0 +#define CONFIG_TREBLE_FILTER 0 +#define CONFIG_TREMOLO_FILTER 0 +#define CONFIG_VIBRATO_FILTER 0 +#define CONFIG_VOLUME_FILTER 0 +#define CONFIG_VOLUMEDETECT_FILTER 0 +#define CONFIG_AEVALSRC_FILTER 0 +#define CONFIG_ANOISESRC_FILTER 0 +#define CONFIG_ANULLSRC_FILTER 0 +#define CONFIG_FLITE_FILTER 0 +#define CONFIG_SINE_FILTER 0 +#define CONFIG_ANULLSINK_FILTER 0 +#define CONFIG_ALPHAEXTRACT_FILTER 0 +#define CONFIG_ALPHAMERGE_FILTER 0 +#define CONFIG_ASS_FILTER 0 +#define CONFIG_ATADENOISE_FILTER 0 +#define CONFIG_AVGBLUR_FILTER 0 +#define CONFIG_BBOX_FILTER 0 +#define CONFIG_BENCH_FILTER 0 +#define CONFIG_BITPLANENOISE_FILTER 0 +#define CONFIG_BLACKDETECT_FILTER 0 +#define CONFIG_BLACKFRAME_FILTER 0 +#define CONFIG_BLEND_FILTER 0 +#define CONFIG_BOXBLUR_FILTER 0 +#define CONFIG_BWDIF_FILTER 0 +#define CONFIG_CHROMAKEY_FILTER 0 +#define CONFIG_CIESCOPE_FILTER 0 +#define CONFIG_CODECVIEW_FILTER 0 +#define CONFIG_COLORBALANCE_FILTER 0 +#define CONFIG_COLORCHANNELMIXER_FILTER 0 +#define CONFIG_COLORKEY_FILTER 0 +#define CONFIG_COLORLEVELS_FILTER 0 +#define CONFIG_COLORMATRIX_FILTER 0 +#define CONFIG_COLORSPACE_FILTER 0 +#define CONFIG_CONVOLUTION_FILTER 0 +#define CONFIG_COPY_FILTER 0 +#define CONFIG_COREIMAGE_FILTER 0 +#define CONFIG_COVER_RECT_FILTER 0 +#define CONFIG_CROP_FILTER 0 +#define CONFIG_CROPDETECT_FILTER 0 +#define CONFIG_CURVES_FILTER 0 +#define CONFIG_DATASCOPE_FILTER 0 +#define CONFIG_DCTDNOIZ_FILTER 0 +#define CONFIG_DEBAND_FILTER 0 +#define CONFIG_DECIMATE_FILTER 0 +#define CONFIG_DEFLATE_FILTER 0 +#define CONFIG_DEINTERLACE_QSV_FILTER 0 +#define CONFIG_DEINTERLACE_VAAPI_FILTER 0 +#define CONFIG_DEJUDDER_FILTER 0 +#define CONFIG_DELOGO_FILTER 0 +#define CONFIG_DESHAKE_FILTER 0 +#define CONFIG_DETELECINE_FILTER 0 +#define CONFIG_DILATION_FILTER 0 +#define CONFIG_DISPLACE_FILTER 0 +#define CONFIG_DRAWBOX_FILTER 0 +#define CONFIG_DRAWGRAPH_FILTER 0 +#define CONFIG_DRAWGRID_FILTER 0 +#define CONFIG_DRAWTEXT_FILTER 0 +#define CONFIG_EDGEDETECT_FILTER 0 +#define CONFIG_ELBG_FILTER 0 +#define CONFIG_EQ_FILTER 0 +#define CONFIG_EROSION_FILTER 0 +#define CONFIG_EXTRACTPLANES_FILTER 0 +#define CONFIG_FADE_FILTER 0 +#define CONFIG_FFTFILT_FILTER 0 +#define CONFIG_FIELD_FILTER 0 +#define CONFIG_FIELDHINT_FILTER 0 +#define CONFIG_FIELDMATCH_FILTER 0 +#define CONFIG_FIELDORDER_FILTER 0 +#define CONFIG_FIND_RECT_FILTER 0 +#define CONFIG_FORMAT_FILTER 0 +#define CONFIG_FPS_FILTER 0 +#define CONFIG_FRAMEPACK_FILTER 0 +#define CONFIG_FRAMERATE_FILTER 0 +#define CONFIG_FRAMESTEP_FILTER 0 +#define CONFIG_FREI0R_FILTER 0 +#define CONFIG_FSPP_FILTER 0 +#define CONFIG_GBLUR_FILTER 0 +#define CONFIG_GEQ_FILTER 0 +#define CONFIG_GRADFUN_FILTER 0 +#define CONFIG_HALDCLUT_FILTER 0 +#define CONFIG_HFLIP_FILTER 0 +#define CONFIG_HISTEQ_FILTER 0 +#define CONFIG_HISTOGRAM_FILTER 0 +#define CONFIG_HQDN3D_FILTER 0 +#define CONFIG_HQX_FILTER 0 +#define CONFIG_HSTACK_FILTER 0 +#define CONFIG_HUE_FILTER 0 +#define CONFIG_HWDOWNLOAD_FILTER 0 +#define CONFIG_HWMAP_FILTER 0 +#define CONFIG_HWUPLOAD_FILTER 0 +#define CONFIG_HWUPLOAD_CUDA_FILTER 0 +#define CONFIG_HYSTERESIS_FILTER 0 +#define CONFIG_IDET_FILTER 0 +#define CONFIG_IL_FILTER 0 +#define CONFIG_INFLATE_FILTER 0 +#define CONFIG_INTERLACE_FILTER 0 +#define CONFIG_INTERLEAVE_FILTER 0 +#define CONFIG_KERNDEINT_FILTER 0 +#define CONFIG_LENSCORRECTION_FILTER 0 +#define CONFIG_LOOP_FILTER 0 +#define CONFIG_LUT_FILTER 0 +#define CONFIG_LUT2_FILTER 0 +#define CONFIG_LUT3D_FILTER 0 +#define CONFIG_LUTRGB_FILTER 0 +#define CONFIG_LUTYUV_FILTER 0 +#define CONFIG_MASKEDCLAMP_FILTER 0 +#define CONFIG_MASKEDMERGE_FILTER 0 +#define CONFIG_MCDEINT_FILTER 0 +#define CONFIG_MERGEPLANES_FILTER 0 +#define CONFIG_MESTIMATE_FILTER 0 +#define CONFIG_METADATA_FILTER 0 +#define CONFIG_MIDEQUALIZER_FILTER 0 +#define CONFIG_MINTERPOLATE_FILTER 0 +#define CONFIG_MPDECIMATE_FILTER 0 +#define CONFIG_NEGATE_FILTER 0 +#define CONFIG_NLMEANS_FILTER 0 +#define CONFIG_NNEDI_FILTER 0 +#define CONFIG_NOFORMAT_FILTER 0 +#define CONFIG_NOISE_FILTER 0 +#define CONFIG_NULL_FILTER 0 +#define CONFIG_OCR_FILTER 0 +#define CONFIG_OCV_FILTER 0 +#define CONFIG_OVERLAY_FILTER 0 +#define CONFIG_OWDENOISE_FILTER 0 +#define CONFIG_PAD_FILTER 0 +#define CONFIG_PALETTEGEN_FILTER 0 +#define CONFIG_PALETTEUSE_FILTER 0 +#define CONFIG_PERMS_FILTER 0 +#define CONFIG_PERSPECTIVE_FILTER 0 +#define CONFIG_PHASE_FILTER 0 +#define CONFIG_PIXDESCTEST_FILTER 0 +#define CONFIG_PP_FILTER 0 +#define CONFIG_PP7_FILTER 0 +#define CONFIG_PREMULTIPLY_FILTER 0 +#define CONFIG_PREWITT_FILTER 0 +#define CONFIG_PSNR_FILTER 0 +#define CONFIG_PULLUP_FILTER 0 +#define CONFIG_QP_FILTER 0 +#define CONFIG_RANDOM_FILTER 0 +#define CONFIG_READEIA608_FILTER 0 +#define CONFIG_READVITC_FILTER 0 +#define CONFIG_REALTIME_FILTER 0 +#define CONFIG_REMAP_FILTER 0 +#define CONFIG_REMOVEGRAIN_FILTER 0 +#define CONFIG_REMOVELOGO_FILTER 0 +#define CONFIG_REPEATFIELDS_FILTER 0 +#define CONFIG_REVERSE_FILTER 0 +#define CONFIG_ROTATE_FILTER 0 +#define CONFIG_SAB_FILTER 0 +#define CONFIG_SCALE_FILTER 0 +#define CONFIG_SCALE_NPP_FILTER 0 +#define CONFIG_SCALE_QSV_FILTER 0 +#define CONFIG_SCALE_VAAPI_FILTER 0 +#define CONFIG_SCALE2REF_FILTER 0 +#define CONFIG_SELECT_FILTER 0 +#define CONFIG_SELECTIVECOLOR_FILTER 0 +#define CONFIG_SENDCMD_FILTER 0 +#define CONFIG_SEPARATEFIELDS_FILTER 0 +#define CONFIG_SETDAR_FILTER 0 +#define CONFIG_SETFIELD_FILTER 0 +#define CONFIG_SETPTS_FILTER 0 +#define CONFIG_SETSAR_FILTER 0 +#define CONFIG_SETTB_FILTER 0 +#define CONFIG_SHOWINFO_FILTER 0 +#define CONFIG_SHOWPALETTE_FILTER 0 +#define CONFIG_SHUFFLEFRAMES_FILTER 0 +#define CONFIG_SHUFFLEPLANES_FILTER 0 +#define CONFIG_SIDEDATA_FILTER 0 +#define CONFIG_SIGNALSTATS_FILTER 0 +#define CONFIG_SIGNATURE_FILTER 0 +#define CONFIG_SMARTBLUR_FILTER 0 +#define CONFIG_SOBEL_FILTER 0 +#define CONFIG_SPLIT_FILTER 0 +#define CONFIG_SPP_FILTER 0 +#define CONFIG_SSIM_FILTER 0 +#define CONFIG_STEREO3D_FILTER 0 +#define CONFIG_STREAMSELECT_FILTER 0 +#define CONFIG_SUBTITLES_FILTER 0 +#define CONFIG_SUPER2XSAI_FILTER 0 +#define CONFIG_SWAPRECT_FILTER 0 +#define CONFIG_SWAPUV_FILTER 0 +#define CONFIG_TBLEND_FILTER 0 +#define CONFIG_TELECINE_FILTER 0 +#define CONFIG_THRESHOLD_FILTER 0 +#define CONFIG_THUMBNAIL_FILTER 0 +#define CONFIG_TILE_FILTER 0 +#define CONFIG_TINTERLACE_FILTER 0 +#define CONFIG_TRANSPOSE_FILTER 0 +#define CONFIG_TRIM_FILTER 0 +#define CONFIG_UNSHARP_FILTER 0 +#define CONFIG_USPP_FILTER 0 +#define CONFIG_VAGUEDENOISER_FILTER 0 +#define CONFIG_VECTORSCOPE_FILTER 0 +#define CONFIG_VFLIP_FILTER 0 +#define CONFIG_VIDSTABDETECT_FILTER 0 +#define CONFIG_VIDSTABTRANSFORM_FILTER 0 +#define CONFIG_VIGNETTE_FILTER 0 +#define CONFIG_VSTACK_FILTER 0 +#define CONFIG_W3FDIF_FILTER 0 +#define CONFIG_WAVEFORM_FILTER 0 +#define CONFIG_WEAVE_FILTER 0 +#define CONFIG_XBR_FILTER 0 +#define CONFIG_YADIF_FILTER 0 +#define CONFIG_ZMQ_FILTER 0 +#define CONFIG_ZOOMPAN_FILTER 0 +#define CONFIG_ZSCALE_FILTER 0 +#define CONFIG_ALLRGB_FILTER 0 +#define CONFIG_ALLYUV_FILTER 0 +#define CONFIG_CELLAUTO_FILTER 0 +#define CONFIG_COLOR_FILTER 0 +#define CONFIG_COREIMAGESRC_FILTER 0 +#define CONFIG_FREI0R_SRC_FILTER 0 +#define CONFIG_HALDCLUTSRC_FILTER 0 +#define CONFIG_LIFE_FILTER 0 +#define CONFIG_MANDELBROT_FILTER 0 +#define CONFIG_MPTESTSRC_FILTER 0 +#define CONFIG_NULLSRC_FILTER 0 +#define CONFIG_RGBTESTSRC_FILTER 0 +#define CONFIG_SMPTEBARS_FILTER 0 +#define CONFIG_SMPTEHDBARS_FILTER 0 +#define CONFIG_TESTSRC_FILTER 0 +#define CONFIG_TESTSRC2_FILTER 0 +#define CONFIG_YUVTESTSRC_FILTER 0 +#define CONFIG_NULLSINK_FILTER 0 +#define CONFIG_ABITSCOPE_FILTER 0 +#define CONFIG_ADRAWGRAPH_FILTER 0 +#define CONFIG_AHISTOGRAM_FILTER 0 +#define CONFIG_APHASEMETER_FILTER 0 +#define CONFIG_AVECTORSCOPE_FILTER 0 +#define CONFIG_CONCAT_FILTER 0 +#define CONFIG_SHOWCQT_FILTER 0 +#define CONFIG_SHOWFREQS_FILTER 0 +#define CONFIG_SHOWSPECTRUM_FILTER 0 +#define CONFIG_SHOWSPECTRUMPIC_FILTER 0 +#define CONFIG_SHOWVOLUME_FILTER 0 +#define CONFIG_SHOWWAVES_FILTER 0 +#define CONFIG_SHOWWAVESPIC_FILTER 0 +#define CONFIG_SPECTRUMSYNTH_FILTER 0 +#define CONFIG_AMOVIE_FILTER 0 +#define CONFIG_MOVIE_FILTER 0 +#define CONFIG_H263_VAAPI_HWACCEL 0 +#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_H264_CUVID_HWACCEL 0 +#define CONFIG_H264_D3D11VA_HWACCEL 0 +#define CONFIG_H264_DXVA2_HWACCEL 0 +#define CONFIG_H264_MEDIACODEC_HWACCEL 0 +#define CONFIG_H264_MMAL_HWACCEL 0 +#define CONFIG_H264_QSV_HWACCEL 0 +#define CONFIG_H264_VAAPI_HWACCEL 0 +#define CONFIG_H264_VDA_HWACCEL 0 +#define CONFIG_H264_VDA_OLD_HWACCEL 0 +#define CONFIG_H264_VDPAU_HWACCEL 0 +#define CONFIG_H264_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_HEVC_CUVID_HWACCEL 0 +#define CONFIG_HEVC_D3D11VA_HWACCEL 0 +#define CONFIG_HEVC_DXVA2_HWACCEL 0 +#define CONFIG_HEVC_MEDIACODEC_HWACCEL 0 +#define CONFIG_HEVC_QSV_HWACCEL 0 +#define CONFIG_HEVC_VAAPI_HWACCEL 0 +#define CONFIG_HEVC_VDPAU_HWACCEL 0 +#define CONFIG_MJPEG_CUVID_HWACCEL 0 +#define CONFIG_MPEG1_CUVID_HWACCEL 0 +#define CONFIG_MPEG1_XVMC_HWACCEL 0 +#define CONFIG_MPEG1_VDPAU_HWACCEL 0 +#define CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_MPEG2_CUVID_HWACCEL 0 +#define CONFIG_MPEG2_XVMC_HWACCEL 0 +#define CONFIG_MPEG2_D3D11VA_HWACCEL 0 +#define CONFIG_MPEG2_DXVA2_HWACCEL 0 +#define CONFIG_MPEG2_MMAL_HWACCEL 0 +#define CONFIG_MPEG2_QSV_HWACCEL 0 +#define CONFIG_MPEG2_VAAPI_HWACCEL 0 +#define CONFIG_MPEG2_VDPAU_HWACCEL 0 +#define CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_MPEG4_CUVID_HWACCEL 0 +#define CONFIG_MPEG4_MEDIACODEC_HWACCEL 0 +#define CONFIG_MPEG4_MMAL_HWACCEL 0 +#define CONFIG_MPEG4_VAAPI_HWACCEL 0 +#define CONFIG_MPEG4_VDPAU_HWACCEL 0 +#define CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_VC1_CUVID_HWACCEL 0 +#define CONFIG_VC1_D3D11VA_HWACCEL 0 +#define CONFIG_VC1_DXVA2_HWACCEL 0 +#define CONFIG_VC1_VAAPI_HWACCEL 0 +#define CONFIG_VC1_VDPAU_HWACCEL 0 +#define CONFIG_VC1_MMAL_HWACCEL 0 +#define CONFIG_VC1_QSV_HWACCEL 0 +#define CONFIG_VP8_CUVID_HWACCEL 0 +#define CONFIG_VP8_MEDIACODEC_HWACCEL 0 +#define CONFIG_VP8_QSV_HWACCEL 0 +#define CONFIG_VP9_CUVID_HWACCEL 0 +#define CONFIG_VP9_D3D11VA_HWACCEL 0 +#define CONFIG_VP9_DXVA2_HWACCEL 0 +#define CONFIG_VP9_MEDIACODEC_HWACCEL 0 +#define CONFIG_VP9_VAAPI_HWACCEL 0 +#define CONFIG_WMV3_D3D11VA_HWACCEL 0 +#define CONFIG_WMV3_DXVA2_HWACCEL 0 +#define CONFIG_WMV3_VAAPI_HWACCEL 0 +#define CONFIG_WMV3_VDPAU_HWACCEL 0 +#define CONFIG_ALSA_INDEV 0 +#define CONFIG_AVFOUNDATION_INDEV 0 +#define CONFIG_BKTR_INDEV 0 +#define CONFIG_DECKLINK_INDEV 0 +#define CONFIG_DSHOW_INDEV 0 +#define CONFIG_DV1394_INDEV 0 +#define CONFIG_FBDEV_INDEV 0 +#define CONFIG_GDIGRAB_INDEV 0 +#define CONFIG_IEC61883_INDEV 0 +#define CONFIG_JACK_INDEV 0 +#define CONFIG_LAVFI_INDEV 0 +#define CONFIG_OPENAL_INDEV 0 +#define CONFIG_OSS_INDEV 0 +#define CONFIG_PULSE_INDEV 0 +#define CONFIG_QTKIT_INDEV 0 +#define CONFIG_SNDIO_INDEV 0 +#define CONFIG_V4L2_INDEV 0 +#define CONFIG_VFWCAP_INDEV 0 +#define CONFIG_XCBGRAB_INDEV 0 +#define CONFIG_LIBCDIO_INDEV 0 +#define CONFIG_LIBDC1394_INDEV 0 +#define CONFIG_A64_MUXER 0 +#define CONFIG_AC3_MUXER 0 +#define CONFIG_ADTS_MUXER 0 +#define CONFIG_ADX_MUXER 0 +#define CONFIG_AIFF_MUXER 0 +#define CONFIG_AMR_MUXER 0 +#define CONFIG_APNG_MUXER 0 +#define CONFIG_ASF_MUXER 0 +#define CONFIG_ASS_MUXER 0 +#define CONFIG_AST_MUXER 0 +#define CONFIG_ASF_STREAM_MUXER 0 +#define CONFIG_AU_MUXER 0 +#define CONFIG_AVI_MUXER 0 +#define CONFIG_AVM2_MUXER 0 +#define CONFIG_BIT_MUXER 0 +#define CONFIG_CAF_MUXER 0 +#define CONFIG_CAVSVIDEO_MUXER 0 +#define CONFIG_CRC_MUXER 0 +#define CONFIG_DASH_MUXER 0 +#define CONFIG_DATA_MUXER 0 +#define CONFIG_DAUD_MUXER 0 +#define CONFIG_DIRAC_MUXER 0 +#define CONFIG_DNXHD_MUXER 0 +#define CONFIG_DTS_MUXER 0 +#define CONFIG_DV_MUXER 0 +#define CONFIG_EAC3_MUXER 0 +#define CONFIG_F4V_MUXER 0 +#define CONFIG_FFM_MUXER 0 +#define CONFIG_FFMETADATA_MUXER 0 +#define CONFIG_FIFO_MUXER 0 +#define CONFIG_FILMSTRIP_MUXER 0 +#define CONFIG_FLAC_MUXER 0 +#define CONFIG_FLV_MUXER 0 +#define CONFIG_FRAMECRC_MUXER 0 +#define CONFIG_FRAMEHASH_MUXER 0 +#define CONFIG_FRAMEMD5_MUXER 0 +#define CONFIG_G722_MUXER 0 +#define CONFIG_G723_1_MUXER 0 +#define CONFIG_GIF_MUXER 0 +#define CONFIG_GSM_MUXER 0 +#define CONFIG_GXF_MUXER 0 +#define CONFIG_H261_MUXER 0 +#define CONFIG_H263_MUXER 0 +#define CONFIG_H264_MUXER 0 +#define CONFIG_HASH_MUXER 0 +#define CONFIG_HDS_MUXER 0 +#define CONFIG_HEVC_MUXER 0 +#define CONFIG_HLS_MUXER 0 +#define CONFIG_ICO_MUXER 0 +#define CONFIG_ILBC_MUXER 0 +#define CONFIG_IMAGE2_MUXER 0 +#define CONFIG_IMAGE2PIPE_MUXER 0 +#define CONFIG_IPOD_MUXER 0 +#define CONFIG_IRCAM_MUXER 0 +#define CONFIG_ISMV_MUXER 0 +#define CONFIG_IVF_MUXER 0 +#define CONFIG_JACOSUB_MUXER 0 +#define CONFIG_LATM_MUXER 0 +#define CONFIG_LRC_MUXER 0 +#define CONFIG_M4V_MUXER 0 +#define CONFIG_MD5_MUXER 0 +#define CONFIG_MATROSKA_MUXER 0 +#define CONFIG_MATROSKA_AUDIO_MUXER 0 +#define CONFIG_MICRODVD_MUXER 0 +#define CONFIG_MJPEG_MUXER 0 +#define CONFIG_MLP_MUXER 0 +#define CONFIG_MMF_MUXER 0 +#define CONFIG_MOV_MUXER 1 +#define CONFIG_MP2_MUXER 0 +#define CONFIG_MP3_MUXER 0 +#define CONFIG_MP4_MUXER 1 +#define CONFIG_MPEG1SYSTEM_MUXER 0 +#define CONFIG_MPEG1VCD_MUXER 0 +#define CONFIG_MPEG1VIDEO_MUXER 0 +#define CONFIG_MPEG2DVD_MUXER 0 +#define CONFIG_MPEG2SVCD_MUXER 0 +#define CONFIG_MPEG2VIDEO_MUXER 0 +#define CONFIG_MPEG2VOB_MUXER 0 +#define CONFIG_MPEGTS_MUXER 0 +#define CONFIG_MPJPEG_MUXER 0 +#define CONFIG_MXF_MUXER 0 +#define CONFIG_MXF_D10_MUXER 0 +#define CONFIG_MXF_OPATOM_MUXER 0 +#define CONFIG_NULL_MUXER 0 +#define CONFIG_NUT_MUXER 0 +#define CONFIG_OGA_MUXER 0 +#define CONFIG_OGG_MUXER 0 +#define CONFIG_OGV_MUXER 0 +#define CONFIG_OMA_MUXER 0 +#define CONFIG_OPUS_MUXER 0 +#define CONFIG_PCM_ALAW_MUXER 0 +#define CONFIG_PCM_MULAW_MUXER 0 +#define CONFIG_PCM_F64BE_MUXER 0 +#define CONFIG_PCM_F64LE_MUXER 0 +#define CONFIG_PCM_F32BE_MUXER 0 +#define CONFIG_PCM_F32LE_MUXER 0 +#define CONFIG_PCM_S32BE_MUXER 0 +#define CONFIG_PCM_S32LE_MUXER 0 +#define CONFIG_PCM_S24BE_MUXER 0 +#define CONFIG_PCM_S24LE_MUXER 0 +#define CONFIG_PCM_S16BE_MUXER 0 +#define CONFIG_PCM_S16LE_MUXER 0 +#define CONFIG_PCM_S8_MUXER 0 +#define CONFIG_PCM_U32BE_MUXER 0 +#define CONFIG_PCM_U32LE_MUXER 0 +#define CONFIG_PCM_U24BE_MUXER 0 +#define CONFIG_PCM_U24LE_MUXER 0 +#define CONFIG_PCM_U16BE_MUXER 0 +#define CONFIG_PCM_U16LE_MUXER 0 +#define CONFIG_PCM_U8_MUXER 0 +#define CONFIG_PSP_MUXER 0 +#define CONFIG_RAWVIDEO_MUXER 0 +#define CONFIG_RM_MUXER 0 +#define CONFIG_ROQ_MUXER 0 +#define CONFIG_RSO_MUXER 0 +#define CONFIG_RTP_MUXER 0 +#define CONFIG_RTP_MPEGTS_MUXER 0 +#define CONFIG_RTSP_MUXER 0 +#define CONFIG_SAP_MUXER 0 +#define CONFIG_SCC_MUXER 0 +#define CONFIG_SEGMENT_MUXER 0 +#define CONFIG_STREAM_SEGMENT_MUXER 0 +#define CONFIG_SINGLEJPEG_MUXER 0 +#define CONFIG_SMJPEG_MUXER 0 +#define CONFIG_SMOOTHSTREAMING_MUXER 0 +#define CONFIG_SOX_MUXER 0 +#define CONFIG_SPX_MUXER 0 +#define CONFIG_SPDIF_MUXER 0 +#define CONFIG_SRT_MUXER 0 +#define CONFIG_SWF_MUXER 0 +#define CONFIG_TEE_MUXER 0 +#define CONFIG_TG2_MUXER 0 +#define CONFIG_TGP_MUXER 0 +#define CONFIG_MKVTIMESTAMP_V2_MUXER 0 +#define CONFIG_TRUEHD_MUXER 0 +#define CONFIG_TTA_MUXER 0 +#define CONFIG_UNCODEDFRAMECRC_MUXER 0 +#define CONFIG_VC1_MUXER 0 +#define CONFIG_VC1T_MUXER 0 +#define CONFIG_VOC_MUXER 0 +#define CONFIG_W64_MUXER 0 +#define CONFIG_WAV_MUXER 0 +#define CONFIG_WEBM_MUXER 0 +#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0 +#define CONFIG_WEBM_CHUNK_MUXER 0 +#define CONFIG_WEBP_MUXER 0 +#define CONFIG_WEBVTT_MUXER 0 +#define CONFIG_WTV_MUXER 0 +#define CONFIG_WV_MUXER 0 +#define CONFIG_YUV4MPEGPIPE_MUXER 0 +#define CONFIG_CHROMAPRINT_MUXER 0 +#define CONFIG_LIBNUT_MUXER 0 +#define CONFIG_ALSA_OUTDEV 0 +#define CONFIG_CACA_OUTDEV 0 +#define CONFIG_DECKLINK_OUTDEV 0 +#define CONFIG_FBDEV_OUTDEV 0 +#define CONFIG_OPENGL_OUTDEV 0 +#define CONFIG_OSS_OUTDEV 0 +#define CONFIG_PULSE_OUTDEV 0 +#define CONFIG_SDL2_OUTDEV 0 +#define CONFIG_SNDIO_OUTDEV 0 +#define CONFIG_V4L2_OUTDEV 0 +#define CONFIG_XV_OUTDEV 0 +#define CONFIG_AAC_PARSER 1 +#define CONFIG_AAC_LATM_PARSER 1 +#define CONFIG_AC3_PARSER 0 +#define CONFIG_ADX_PARSER 0 +#define CONFIG_BMP_PARSER 0 +#define CONFIG_CAVSVIDEO_PARSER 0 +#define CONFIG_COOK_PARSER 0 +#define CONFIG_DCA_PARSER 0 +#define CONFIG_DIRAC_PARSER 0 +#define CONFIG_DNXHD_PARSER 0 +#define CONFIG_DPX_PARSER 0 +#define CONFIG_DVAUDIO_PARSER 0 +#define CONFIG_DVBSUB_PARSER 0 +#define CONFIG_DVDSUB_PARSER 0 +#define CONFIG_DVD_NAV_PARSER 0 +#define CONFIG_FLAC_PARSER 1 +#define CONFIG_G729_PARSER 0 +#define CONFIG_GSM_PARSER 0 +#define CONFIG_H261_PARSER 0 +#define CONFIG_H263_PARSER 1 +#define CONFIG_H264_PARSER 1 +#define CONFIG_HEVC_PARSER 1 +#define CONFIG_MJPEG_PARSER 0 +#define CONFIG_MLP_PARSER 0 +#define CONFIG_MPEG4VIDEO_PARSER 1 +#define CONFIG_MPEGAUDIO_PARSER 1 +#define CONFIG_MPEGVIDEO_PARSER 0 +#define CONFIG_OPUS_PARSER 0 +#define CONFIG_PNG_PARSER 0 +#define CONFIG_PNM_PARSER 0 +#define CONFIG_RV30_PARSER 0 +#define CONFIG_RV40_PARSER 0 +#define CONFIG_SIPR_PARSER 0 +#define CONFIG_TAK_PARSER 0 +#define CONFIG_VC1_PARSER 0 +#define CONFIG_VORBIS_PARSER 0 +#define CONFIG_VP3_PARSER 0 +#define CONFIG_VP8_PARSER 0 +#define CONFIG_VP9_PARSER 0 +#define CONFIG_XMA_PARSER 0 +#define CONFIG_ASYNC_PROTOCOL 1 +#define CONFIG_BLURAY_PROTOCOL 0 +#define CONFIG_CACHE_PROTOCOL 1 +#define CONFIG_CONCAT_PROTOCOL 0 +#define CONFIG_CRYPTO_PROTOCOL 1 +#define CONFIG_DATA_PROTOCOL 1 +#define CONFIG_FFRTMPCRYPT_PROTOCOL 0 +#define CONFIG_FFRTMPHTTP_PROTOCOL 1 +#define CONFIG_FILE_PROTOCOL 1 +#define CONFIG_FTP_PROTOCOL 1 +#define CONFIG_GOPHER_PROTOCOL 0 +#define CONFIG_HLS_PROTOCOL 1 +#define CONFIG_HTTP_PROTOCOL 1 +#define CONFIG_HTTPPROXY_PROTOCOL 1 +#define CONFIG_HTTPS_PROTOCOL 1 +#define CONFIG_ICECAST_PROTOCOL 0 +#define CONFIG_IJKHTTPHOOK_PROTOCOL 1 +#define CONFIG_IJKHLSCACHE_PROTOCOL 1 +#define CONFIG_IJKLONGURL_PROTOCOL 1 +#define CONFIG_IJKMEDIADATASOURCE_PROTOCOL 1 +#define CONFIG_IJKSEGMENT_PROTOCOL 1 +#define CONFIG_IJKTCPHOOK_PROTOCOL 1 +#define CONFIG_IJKIO_PROTOCOL 1 +#define CONFIG_MMSH_PROTOCOL 0 +#define CONFIG_MMST_PROTOCOL 0 +#define CONFIG_MD5_PROTOCOL 0 +#define CONFIG_PIPE_PROTOCOL 1 +#define CONFIG_PROMPEG_PROTOCOL 1 +#define CONFIG_RTMP_PROTOCOL 1 +#define CONFIG_RTMPE_PROTOCOL 0 +#define CONFIG_RTMPS_PROTOCOL 0 +#define CONFIG_RTMPT_PROTOCOL 1 +#define CONFIG_RTMPTE_PROTOCOL 0 +#define CONFIG_RTMPTS_PROTOCOL 0 +#define CONFIG_RTP_PROTOCOL 0 +#define CONFIG_SCTP_PROTOCOL 0 +#define CONFIG_SRTP_PROTOCOL 0 +#define CONFIG_SUBFILE_PROTOCOL 0 +#define CONFIG_TEE_PROTOCOL 1 +#define CONFIG_TCP_PROTOCOL 1 +#define CONFIG_TLS_GNUTLS_PROTOCOL 0 +#define CONFIG_TLS_SCHANNEL_PROTOCOL 0 +#define CONFIG_TLS_SECURETRANSPORT_PROTOCOL 0 +#define CONFIG_TLS_OPENSSL_PROTOCOL 1 +#define CONFIG_UDP_PROTOCOL 1 +#define CONFIG_UDPLITE_PROTOCOL 1 +#define CONFIG_UNIX_PROTOCOL 0 +#define CONFIG_LIBRTMP_PROTOCOL 0 +#define CONFIG_LIBRTMPE_PROTOCOL 0 +#define CONFIG_LIBRTMPS_PROTOCOL 0 +#define CONFIG_LIBRTMPT_PROTOCOL 0 +#define CONFIG_LIBRTMPTE_PROTOCOL 0 +#define CONFIG_LIBSSH_PROTOCOL 0 +#define CONFIG_LIBSMBCLIENT_PROTOCOL 0 +#endif /* FFMPEG_CONFIG_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/config.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/config.h new file mode 100644 index 0000000..de2d861 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/config.h @@ -0,0 +1,46 @@ +/* + * config.h + * + * Copyright (c) 2013 Bilibili + * Copyright (c) 2013 Zhang Rui + * + * This file is part of ijkPlayer. + * + * ijkPlayer is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * ijkPlayer is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with ijkPlayer; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#if defined(__aarch64__) +# include "arm64/config.h" +#elif defined(__x86_64__) +# include "x86_64/config.h" +#elif defined(__arm__) + +# if defined(__ARM_ARCH_7S__) +# include "armv7s/config.h" +# elif defined(__ARM_ARCH) +# if __ARM_ARCH == 7 +# include "armv7/config.h" +# else +# error Unsupport ARM architecture +# endif +# else +# error Unsupport ARM architecture +# endif + +#elif defined(__i386__) +# include "i386/config.h" +#else +# error Unsupport architecture +#endif diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/i386/config.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/i386/config.h new file mode 100644 index 0000000..6cb7a05 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/i386/config.h @@ -0,0 +1,2277 @@ +/* Automatically generated by configure - do not modify! */ +#ifndef FFMPEG_CONFIG_H +#define FFMPEG_CONFIG_H +#define FFMPEG_CONFIGURATION "--disable-gpl --disable-nonfree --enable-runtime-cpudetect --disable-gray --disable-swscale-alpha --disable-programs --disable-ffmpeg --disable-ffplay --disable-ffprobe --disable-ffserver --disable-doc --disable-htmlpages --disable-manpages --disable-podpages --disable-txtpages --disable-avdevice --enable-avcodec --enable-avformat --enable-avutil --enable-swresample --enable-swscale --disable-postproc --enable-avfilter --disable-avresample --enable-network --disable-d3d11va --disable-dxva2 --disable-vaapi --disable-vda --disable-vdpau --disable-videotoolbox --disable-encoders --enable-encoder=png --disable-decoders --enable-decoder=aac --enable-decoder=aac_latm --enable-decoder=flv --enable-decoder=h264 --enable-decoder='mp3*' --enable-decoder=vp6f --enable-decoder=flac --enable-decoder=mpeg4 --enable-decoder=wavpack --enable-decoder=wav --enable-decoder=pcm_s16le --disable-hwaccels --disable-muxers --enable-muxer=mp4 --disable-demuxers --enable-demuxer=aac --enable-demuxer=concat --enable-demuxer=data --enable-demuxer=flv --enable-demuxer=hls --enable-demuxer=live_flv --enable-demuxer=mov --enable-demuxer=mp3 --enable-demuxer=mpegps --enable-demuxer=mpegts --enable-demuxer=mpegvideo --enable-demuxer=flac --enable-demuxer=hevc --enable-demuxer=wav --disable-parsers --enable-parser=aac --enable-parser=aac_latm --enable-parser=h264 --enable-parser=flac --enable-parser=hevc --enable-bsfs --disable-bsf=chomp --disable-bsf=dca_core --disable-bsf=dump_extradata --disable-bsf=hevc_mp4toannexb --disable-bsf=imx_dump_header --disable-bsf=mjpeg2jpeg --disable-bsf=mjpega_dump_header --disable-bsf=mov2textsub --disable-bsf=mp3_header_decompress --disable-bsf=mpeg4_unpack_bframes --disable-bsf=noise --disable-bsf=remove_extradata --disable-bsf=text2movsub --disable-bsf=vp9_superframe --enable-protocols --enable-protocol=async --disable-protocol=bluray --disable-protocol=concat --disable-protocol=ffrtmpcrypt --enable-protocol=ffrtmphttp --disable-protocol=gopher --disable-protocol=icecast --disable-protocol='librtmp*' --disable-protocol=libssh --disable-protocol=md5 --disable-protocol=mmsh --disable-protocol=mmst --disable-protocol='rtmp*' --enable-protocol=rtmp --enable-protocol=rtmpt --disable-protocol=rtp --disable-protocol=sctp --disable-protocol=srtp --disable-protocol=subfile --disable-protocol=unix --disable-devices --disable-filters --disable-iconv --disable-audiotoolbox --disable-videotoolbox --enable-cross-compile --disable-stripping --arch=i386 --target-os=darwin --enable-static --disable-shared --disable-asm --disable-mmx --assert-level=2 --prefix=/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/build/ffmpeg-i386/output --enable-openssl --cc='xcrun -sdk iphonesimulator clang' --extra-cflags=' -arch i386 -mios-simulator-version-min=6.0 -I/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/include -DQCLOUDSSL' --extra-cxxflags=' -arch i386 -mios-simulator-version-min=6.0 -I/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/include -DQCLOUDSSL' --extra-ldflags=' -arch i386 -mios-simulator-version-min=6.0 -arch i386 -mios-simulator-version-min=6.0 -I/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/include -DQCLOUDSSL -L/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/lib -lboringssl'" +#define FFMPEG_LICENSE "LGPL version 2.1 or later" +#define CONFIG_THIS_YEAR 2017 +#define FFMPEG_DATADIR "/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/build/ffmpeg-i386/output/share/ffmpeg" +#define AVCONV_DATADIR "/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/build/ffmpeg-i386/output/share/ffmpeg" +#define CC_IDENT "Apple LLVM version 10.0.1 (clang-1001.0.46.4)" +#define av_restrict restrict +#define EXTERN_PREFIX "_" +#define EXTERN_ASM _ +#define BUILDSUF "" +#define SLIBSUF ".dylib" +#define HAVE_MMX2 HAVE_MMXEXT +#define SWS_MAX_FILTER_SIZE 256 +#define ASSERT_LEVEL 2 +#define ARCH_AARCH64 0 +#define ARCH_ALPHA 0 +#define ARCH_ARM 0 +#define ARCH_AVR32 0 +#define ARCH_AVR32_AP 0 +#define ARCH_AVR32_UC 0 +#define ARCH_BFIN 0 +#define ARCH_IA64 0 +#define ARCH_M68K 0 +#define ARCH_MIPS 0 +#define ARCH_MIPS64 0 +#define ARCH_PARISC 0 +#define ARCH_PPC 0 +#define ARCH_PPC64 0 +#define ARCH_S390 0 +#define ARCH_SH4 0 +#define ARCH_SPARC 0 +#define ARCH_SPARC64 0 +#define ARCH_TILEGX 0 +#define ARCH_TILEPRO 0 +#define ARCH_TOMI 0 +#define ARCH_X86 0 +#define ARCH_X86_32 0 +#define ARCH_X86_64 0 +#define HAVE_ARMV5TE 0 +#define HAVE_ARMV6 0 +#define HAVE_ARMV6T2 0 +#define HAVE_ARMV8 0 +#define HAVE_NEON 0 +#define HAVE_VFP 0 +#define HAVE_VFPV3 0 +#define HAVE_SETEND 0 +#define HAVE_ALTIVEC 0 +#define HAVE_DCBZL 0 +#define HAVE_LDBRX 0 +#define HAVE_POWER8 0 +#define HAVE_PPC4XX 0 +#define HAVE_VSX 0 +#define HAVE_AESNI 0 +#define HAVE_AMD3DNOW 0 +#define HAVE_AMD3DNOWEXT 0 +#define HAVE_AVX 0 +#define HAVE_AVX2 0 +#define HAVE_FMA3 0 +#define HAVE_FMA4 0 +#define HAVE_MMX 0 +#define HAVE_MMXEXT 0 +#define HAVE_SSE 0 +#define HAVE_SSE2 0 +#define HAVE_SSE3 0 +#define HAVE_SSE4 0 +#define HAVE_SSE42 0 +#define HAVE_SSSE3 0 +#define HAVE_XOP 0 +#define HAVE_CPUNOP 0 +#define HAVE_I686 0 +#define HAVE_MIPSFPU 0 +#define HAVE_MIPS32R2 0 +#define HAVE_MIPS32R5 0 +#define HAVE_MIPS64R2 0 +#define HAVE_MIPS32R6 0 +#define HAVE_MIPS64R6 0 +#define HAVE_MIPSDSP 0 +#define HAVE_MIPSDSPR2 0 +#define HAVE_MSA 0 +#define HAVE_LOONGSON2 0 +#define HAVE_LOONGSON3 0 +#define HAVE_MMI 0 +#define HAVE_ARMV5TE_EXTERNAL 0 +#define HAVE_ARMV6_EXTERNAL 0 +#define HAVE_ARMV6T2_EXTERNAL 0 +#define HAVE_ARMV8_EXTERNAL 0 +#define HAVE_NEON_EXTERNAL 0 +#define HAVE_VFP_EXTERNAL 0 +#define HAVE_VFPV3_EXTERNAL 0 +#define HAVE_SETEND_EXTERNAL 0 +#define HAVE_ALTIVEC_EXTERNAL 0 +#define HAVE_DCBZL_EXTERNAL 0 +#define HAVE_LDBRX_EXTERNAL 0 +#define HAVE_POWER8_EXTERNAL 0 +#define HAVE_PPC4XX_EXTERNAL 0 +#define HAVE_VSX_EXTERNAL 0 +#define HAVE_AESNI_EXTERNAL 0 +#define HAVE_AMD3DNOW_EXTERNAL 0 +#define HAVE_AMD3DNOWEXT_EXTERNAL 0 +#define HAVE_AVX_EXTERNAL 0 +#define HAVE_AVX2_EXTERNAL 0 +#define HAVE_FMA3_EXTERNAL 0 +#define HAVE_FMA4_EXTERNAL 0 +#define HAVE_MMX_EXTERNAL 0 +#define HAVE_MMXEXT_EXTERNAL 0 +#define HAVE_SSE_EXTERNAL 0 +#define HAVE_SSE2_EXTERNAL 0 +#define HAVE_SSE3_EXTERNAL 0 +#define HAVE_SSE4_EXTERNAL 0 +#define HAVE_SSE42_EXTERNAL 0 +#define HAVE_SSSE3_EXTERNAL 0 +#define HAVE_XOP_EXTERNAL 0 +#define HAVE_CPUNOP_EXTERNAL 0 +#define HAVE_I686_EXTERNAL 0 +#define HAVE_MIPSFPU_EXTERNAL 0 +#define HAVE_MIPS32R2_EXTERNAL 0 +#define HAVE_MIPS32R5_EXTERNAL 0 +#define HAVE_MIPS64R2_EXTERNAL 0 +#define HAVE_MIPS32R6_EXTERNAL 0 +#define HAVE_MIPS64R6_EXTERNAL 0 +#define HAVE_MIPSDSP_EXTERNAL 0 +#define HAVE_MIPSDSPR2_EXTERNAL 0 +#define HAVE_MSA_EXTERNAL 0 +#define HAVE_LOONGSON2_EXTERNAL 0 +#define HAVE_LOONGSON3_EXTERNAL 0 +#define HAVE_MMI_EXTERNAL 0 +#define HAVE_ARMV5TE_INLINE 0 +#define HAVE_ARMV6_INLINE 0 +#define HAVE_ARMV6T2_INLINE 0 +#define HAVE_ARMV8_INLINE 0 +#define HAVE_NEON_INLINE 0 +#define HAVE_VFP_INLINE 0 +#define HAVE_VFPV3_INLINE 0 +#define HAVE_SETEND_INLINE 0 +#define HAVE_ALTIVEC_INLINE 0 +#define HAVE_DCBZL_INLINE 0 +#define HAVE_LDBRX_INLINE 0 +#define HAVE_POWER8_INLINE 0 +#define HAVE_PPC4XX_INLINE 0 +#define HAVE_VSX_INLINE 0 +#define HAVE_AESNI_INLINE 0 +#define HAVE_AMD3DNOW_INLINE 0 +#define HAVE_AMD3DNOWEXT_INLINE 0 +#define HAVE_AVX_INLINE 0 +#define HAVE_AVX2_INLINE 0 +#define HAVE_FMA3_INLINE 0 +#define HAVE_FMA4_INLINE 0 +#define HAVE_MMX_INLINE 0 +#define HAVE_MMXEXT_INLINE 0 +#define HAVE_SSE_INLINE 0 +#define HAVE_SSE2_INLINE 0 +#define HAVE_SSE3_INLINE 0 +#define HAVE_SSE4_INLINE 0 +#define HAVE_SSE42_INLINE 0 +#define HAVE_SSSE3_INLINE 0 +#define HAVE_XOP_INLINE 0 +#define HAVE_CPUNOP_INLINE 0 +#define HAVE_I686_INLINE 0 +#define HAVE_MIPSFPU_INLINE 0 +#define HAVE_MIPS32R2_INLINE 0 +#define HAVE_MIPS32R5_INLINE 0 +#define HAVE_MIPS64R2_INLINE 0 +#define HAVE_MIPS32R6_INLINE 0 +#define HAVE_MIPS64R6_INLINE 0 +#define HAVE_MIPSDSP_INLINE 0 +#define HAVE_MIPSDSPR2_INLINE 0 +#define HAVE_MSA_INLINE 0 +#define HAVE_LOONGSON2_INLINE 0 +#define HAVE_LOONGSON3_INLINE 0 +#define HAVE_MMI_INLINE 0 +#define HAVE_ALIGNED_STACK 0 +#define HAVE_FAST_64BIT 0 +#define HAVE_FAST_CLZ 0 +#define HAVE_FAST_CMOV 0 +#define HAVE_LOCAL_ALIGNED_8 1 +#define HAVE_LOCAL_ALIGNED_16 1 +#define HAVE_LOCAL_ALIGNED_32 1 +#define HAVE_SIMD_ALIGN_16 0 +#define HAVE_SIMD_ALIGN_32 0 +#define HAVE_ATOMICS_GCC 1 +#define HAVE_ATOMICS_SUNCC 0 +#define HAVE_ATOMICS_WIN32 0 +#define HAVE_ATOMIC_CAS_PTR 0 +#define HAVE_MACHINE_RW_BARRIER 0 +#define HAVE_MEMORYBARRIER 0 +#define HAVE_MM_EMPTY 1 +#define HAVE_RDTSC 0 +#define HAVE_SARESTART 1 +#define HAVE_SEM_TIMEDWAIT 0 +#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1 +#define HAVE_CABS 1 +#define HAVE_CEXP 1 +#define HAVE_INLINE_ASM 1 +#define HAVE_SYMVER 1 +#define HAVE_YASM 0 +#define HAVE_BIGENDIAN 0 +#define HAVE_FAST_UNALIGNED 0 +#define HAVE_ALSA_ASOUNDLIB_H 0 +#define HAVE_ALTIVEC_H 0 +#define HAVE_ARPA_INET_H 1 +#define HAVE_ASM_TYPES_H 0 +#define HAVE_CDIO_PARANOIA_H 0 +#define HAVE_CDIO_PARANOIA_PARANOIA_H 0 +#define HAVE_CUDA_H 0 +#define HAVE_DISPATCH_DISPATCH_H 1 +#define HAVE_DEV_BKTR_IOCTL_BT848_H 0 +#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0 +#define HAVE_DEV_IC_BT8XX_H 0 +#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0 +#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0 +#define HAVE_DIRECT_H 0 +#define HAVE_DIRENT_H 1 +#define HAVE_DLFCN_H 1 +#define HAVE_D3D11_H 0 +#define HAVE_DXVA_H 0 +#define HAVE_ES2_GL_H 0 +#define HAVE_GSM_H 0 +#define HAVE_IO_H 0 +#define HAVE_MACH_MACH_TIME_H 1 +#define HAVE_MACHINE_IOCTL_BT848_H 0 +#define HAVE_MACHINE_IOCTL_METEOR_H 0 +#define HAVE_MALLOC_H 0 +#define HAVE_OPENCV2_CORE_CORE_C_H 0 +#define HAVE_OPENJPEG_2_1_OPENJPEG_H 0 +#define HAVE_OPENJPEG_2_0_OPENJPEG_H 0 +#define HAVE_OPENJPEG_1_5_OPENJPEG_H 0 +#define HAVE_OPENGL_GL3_H 0 +#define HAVE_POLL_H 1 +#define HAVE_SNDIO_H 0 +#define HAVE_SOUNDCARD_H 0 +#define HAVE_STDATOMIC_H 1 +#define HAVE_SYS_MMAN_H 1 +#define HAVE_SYS_PARAM_H 1 +#define HAVE_SYS_RESOURCE_H 1 +#define HAVE_SYS_SELECT_H 1 +#define HAVE_SYS_SOUNDCARD_H 0 +#define HAVE_SYS_TIME_H 1 +#define HAVE_SYS_UN_H 1 +#define HAVE_SYS_VIDEOIO_H 0 +#define HAVE_TERMIOS_H 1 +#define HAVE_UDPLITE_H 0 +#define HAVE_UNISTD_H 1 +#define HAVE_VALGRIND_VALGRIND_H 0 +#define HAVE_WINDOWS_H 0 +#define HAVE_WINSOCK2_H 0 +#define HAVE_INTRINSICS_NEON 0 +#define HAVE_ATANF 1 +#define HAVE_ATAN2F 1 +#define HAVE_CBRT 1 +#define HAVE_CBRTF 1 +#define HAVE_COPYSIGN 1 +#define HAVE_COSF 1 +#define HAVE_ERF 1 +#define HAVE_EXP2 1 +#define HAVE_EXP2F 1 +#define HAVE_EXPF 1 +#define HAVE_HYPOT 1 +#define HAVE_ISFINITE 1 +#define HAVE_ISINF 1 +#define HAVE_ISNAN 1 +#define HAVE_LDEXPF 1 +#define HAVE_LLRINT 1 +#define HAVE_LLRINTF 1 +#define HAVE_LOG2 1 +#define HAVE_LOG2F 1 +#define HAVE_LOG10F 1 +#define HAVE_LRINT 1 +#define HAVE_LRINTF 1 +#define HAVE_POWF 1 +#define HAVE_RINT 1 +#define HAVE_ROUND 1 +#define HAVE_ROUNDF 1 +#define HAVE_SINF 1 +#define HAVE_TRUNC 1 +#define HAVE_TRUNCF 1 +#define HAVE_ACCESS 1 +#define HAVE_ALIGNED_MALLOC 0 +#define HAVE_ARC4RANDOM 1 +#define HAVE_CLOCK_GETTIME 1 +#define HAVE_CLOSESOCKET 0 +#define HAVE_COMMANDLINETOARGVW 0 +#define HAVE_COTASKMEMFREE 0 +#define HAVE_CRYPTGENRANDOM 0 +#define HAVE_DLOPEN 1 +#define HAVE_FCNTL 1 +#define HAVE_FLT_LIM 1 +#define HAVE_FORK 1 +#define HAVE_GETADDRINFO 1 +#define HAVE_GETHRTIME 0 +#define HAVE_GETOPT 1 +#define HAVE_GETPROCESSAFFINITYMASK 0 +#define HAVE_GETPROCESSMEMORYINFO 0 +#define HAVE_GETPROCESSTIMES 0 +#define HAVE_GETRUSAGE 1 +#define HAVE_GETSYSTEMTIMEASFILETIME 0 +#define HAVE_GETTIMEOFDAY 1 +#define HAVE_GLOB 1 +#define HAVE_GLXGETPROCADDRESS 0 +#define HAVE_GMTIME_R 1 +#define HAVE_INET_ATON 1 +#define HAVE_ISATTY 1 +#define HAVE_JACK_PORT_GET_LATENCY_RANGE 0 +#define HAVE_KBHIT 0 +#define HAVE_LOADLIBRARY 0 +#define HAVE_LOCALTIME_R 1 +#define HAVE_LSTAT 1 +#define HAVE_LZO1X_999_COMPRESS 0 +#define HAVE_MACH_ABSOLUTE_TIME 1 +#define HAVE_MAPVIEWOFFILE 0 +#define HAVE_MEMALIGN 0 +#define HAVE_MKSTEMP 1 +#define HAVE_MMAP 1 +#define HAVE_MPROTECT 1 +#define HAVE_NANOSLEEP 1 +#define HAVE_PEEKNAMEDPIPE 0 +#define HAVE_POSIX_MEMALIGN 1 +#define HAVE_PTHREAD_CANCEL 1 +#define HAVE_SCHED_GETAFFINITY 0 +#define HAVE_SETCONSOLETEXTATTRIBUTE 0 +#define HAVE_SETCONSOLECTRLHANDLER 0 +#define HAVE_SETMODE 0 +#define HAVE_SETRLIMIT 1 +#define HAVE_SLEEP 0 +#define HAVE_STRERROR_R 1 +#define HAVE_SYSCONF 1 +#define HAVE_SYSCTL 1 +#define HAVE_USLEEP 1 +#define HAVE_UTGETOSTYPEFROMSTRING 0 +#define HAVE_VIRTUALALLOC 0 +#define HAVE_WGLGETPROCADDRESS 0 +#define HAVE_PTHREADS 1 +#define HAVE_OS2THREADS 0 +#define HAVE_W32THREADS 0 +#define HAVE_AS_DN_DIRECTIVE 0 +#define HAVE_AS_FPU_DIRECTIVE 0 +#define HAVE_AS_FUNC 0 +#define HAVE_AS_OBJECT_ARCH 0 +#define HAVE_ASM_MOD_Q 0 +#define HAVE_ATTRIBUTE_MAY_ALIAS 1 +#define HAVE_ATTRIBUTE_PACKED 1 +#define HAVE_EBP_AVAILABLE 1 +#define HAVE_EBX_AVAILABLE 1 +#define HAVE_GNU_AS 0 +#define HAVE_GNU_WINDRES 0 +#define HAVE_IBM_ASM 0 +#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 1 +#define HAVE_INLINE_ASM_LABELS 1 +#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1 +#define HAVE_PRAGMA_DEPRECATED 1 +#define HAVE_RSYNC_CONTIMEOUT 0 +#define HAVE_SYMVER_ASM_LABEL 1 +#define HAVE_SYMVER_GNU_ASM 0 +#define HAVE_VFP_ARGS 0 +#define HAVE_XFORM_ASM 0 +#define HAVE_XMM_CLOBBERS 1 +#define HAVE_CONDITION_VARIABLE_PTR 0 +#define HAVE_SOCKLEN_T 1 +#define HAVE_STRUCT_ADDRINFO 1 +#define HAVE_STRUCT_GROUP_SOURCE_REQ 1 +#define HAVE_STRUCT_IP_MREQ_SOURCE 1 +#define HAVE_STRUCT_IPV6_MREQ 1 +#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1 +#define HAVE_STRUCT_POLLFD 1 +#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1 +#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0 +#define HAVE_STRUCT_SOCKADDR_IN6 1 +#define HAVE_STRUCT_SOCKADDR_SA_LEN 1 +#define HAVE_STRUCT_SOCKADDR_STORAGE 1 +#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0 +#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0 +#define HAVE_ATOMICS_NATIVE 1 +#define HAVE_DOS_PATHS 0 +#define HAVE_DXVA2_LIB 0 +#define HAVE_DXVA2API_COBJ 0 +#define HAVE_LIBC_MSVCRT 0 +#define HAVE_LIBDC1394_1 0 +#define HAVE_LIBDC1394_2 0 +#define HAVE_MAKEINFO 1 +#define HAVE_MAKEINFO_HTML 0 +#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0 +#define HAVE_PERL 1 +#define HAVE_POD2MAN 1 +#define HAVE_SDL2 0 +#define HAVE_SECTION_DATA_REL_RO 0 +#define HAVE_TEXI2HTML 0 +#define HAVE_THREADS 1 +#define HAVE_VAAPI_DRM 0 +#define HAVE_VAAPI_X11 0 +#define HAVE_VDPAU_X11 0 +#define HAVE_WINRT 0 +#define HAVE_XLIB 1 +#define CONFIG_BSFS 1 +#define CONFIG_DECODERS 1 +#define CONFIG_ENCODERS 1 +#define CONFIG_HWACCELS 0 +#define CONFIG_PARSERS 1 +#define CONFIG_INDEVS 0 +#define CONFIG_OUTDEVS 0 +#define CONFIG_FILTERS 0 +#define CONFIG_DEMUXERS 1 +#define CONFIG_MUXERS 1 +#define CONFIG_PROTOCOLS 1 +#define CONFIG_DOC 0 +#define CONFIG_HTMLPAGES 0 +#define CONFIG_MANPAGES 0 +#define CONFIG_PODPAGES 0 +#define CONFIG_TXTPAGES 0 +#define CONFIG_AVIO_DIR_CMD_EXAMPLE 1 +#define CONFIG_AVIO_READING_EXAMPLE 1 +#define CONFIG_DECODE_AUDIO_EXAMPLE 1 +#define CONFIG_DECODE_VIDEO_EXAMPLE 1 +#define CONFIG_DEMUXING_DECODING_EXAMPLE 1 +#define CONFIG_ENCODE_AUDIO_EXAMPLE 1 +#define CONFIG_ENCODE_VIDEO_EXAMPLE 1 +#define CONFIG_EXTRACT_MVS_EXAMPLE 1 +#define CONFIG_FILTER_AUDIO_EXAMPLE 1 +#define CONFIG_FILTERING_AUDIO_EXAMPLE 1 +#define CONFIG_FILTERING_VIDEO_EXAMPLE 1 +#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 1 +#define CONFIG_METADATA_EXAMPLE 1 +#define CONFIG_MUXING_EXAMPLE 1 +#define CONFIG_QSVDEC_EXAMPLE 0 +#define CONFIG_REMUXING_EXAMPLE 1 +#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 1 +#define CONFIG_SCALING_VIDEO_EXAMPLE 1 +#define CONFIG_TRANSCODE_AAC_EXAMPLE 1 +#define CONFIG_TRANSCODING_EXAMPLE 1 +#define CONFIG_BZLIB 0 +#define CONFIG_ICONV 0 +#define CONFIG_LIBXCB 0 +#define CONFIG_LIBXCB_SHM 0 +#define CONFIG_LIBXCB_SHAPE 0 +#define CONFIG_LIBXCB_XFIXES 0 +#define CONFIG_LZMA 0 +#define CONFIG_SCHANNEL 0 +#define CONFIG_SDL 0 +#define CONFIG_SDL2 0 +#define CONFIG_SECURETRANSPORT 0 +#define CONFIG_XLIB 1 +#define CONFIG_ZLIB 1 +#define CONFIG_AVISYNTH 0 +#define CONFIG_FREI0R 0 +#define CONFIG_LIBCDIO 0 +#define CONFIG_LIBRUBBERBAND 0 +#define CONFIG_LIBVIDSTAB 0 +#define CONFIG_LIBX264 0 +#define CONFIG_LIBX265 0 +#define CONFIG_LIBXAVS 0 +#define CONFIG_LIBXVID 0 +#define CONFIG_DECKLINK 0 +#define CONFIG_LIBFDK_AAC 0 +#define CONFIG_OPENSSL 1 +#define CONFIG_GMP 0 +#define CONFIG_LIBOPENCORE_AMRNB 0 +#define CONFIG_LIBOPENCORE_AMRWB 0 +#define CONFIG_LIBVO_AMRWBENC 0 +#define CONFIG_LIBSMBCLIENT 0 +#define CONFIG_CHROMAPRINT 0 +#define CONFIG_CRYSTALHD 0 +#define CONFIG_GCRYPT 0 +#define CONFIG_GNUTLS 0 +#define CONFIG_JNI 0 +#define CONFIG_LADSPA 0 +#define CONFIG_LIBASS 0 +#define CONFIG_LIBBLURAY 0 +#define CONFIG_LIBBS2B 0 +#define CONFIG_LIBCACA 0 +#define CONFIG_LIBCELT 0 +#define CONFIG_LIBDC1394 0 +#define CONFIG_LIBFLITE 0 +#define CONFIG_LIBFONTCONFIG 0 +#define CONFIG_LIBFREETYPE 0 +#define CONFIG_LIBFRIBIDI 0 +#define CONFIG_LIBGME 0 +#define CONFIG_LIBGSM 0 +#define CONFIG_LIBIEC61883 0 +#define CONFIG_LIBILBC 0 +#define CONFIG_LIBKVAZAAR 0 +#define CONFIG_LIBMODPLUG 0 +#define CONFIG_LIBMP3LAME 0 +#define CONFIG_LIBNUT 0 +#define CONFIG_LIBOPENCV 0 +#define CONFIG_LIBOPENH264 0 +#define CONFIG_LIBOPENJPEG 0 +#define CONFIG_LIBOPENMPT 0 +#define CONFIG_LIBOPUS 0 +#define CONFIG_LIBPULSE 0 +#define CONFIG_LIBRTMP 0 +#define CONFIG_LIBSCHROEDINGER 0 +#define CONFIG_LIBSHINE 0 +#define CONFIG_LIBSMBCLIENT 0 +#define CONFIG_LIBSNAPPY 0 +#define CONFIG_LIBSOXR 0 +#define CONFIG_LIBSPEEX 0 +#define CONFIG_LIBSSH 0 +#define CONFIG_LIBTESSERACT 0 +#define CONFIG_LIBTHEORA 0 +#define CONFIG_LIBTWOLAME 0 +#define CONFIG_LIBV4L2 0 +#define CONFIG_LIBVORBIS 0 +#define CONFIG_LIBVPX 0 +#define CONFIG_LIBWAVPACK 0 +#define CONFIG_LIBWEBP 0 +#define CONFIG_LIBZIMG 0 +#define CONFIG_LIBZMQ 0 +#define CONFIG_LIBZVBI 0 +#define CONFIG_MEDIACODEC 0 +#define CONFIG_NETCDF 0 +#define CONFIG_OPENAL 0 +#define CONFIG_OPENCL 0 +#define CONFIG_OPENGL 0 +#define CONFIG_VIDEOTOOLBOX 0 +#define CONFIG_AUDIOTOOLBOX 0 +#define CONFIG_CUDA 0 +#define CONFIG_CUVID 0 +#define CONFIG_D3D11VA 0 +#define CONFIG_DXVA2 0 +#define CONFIG_NVENC 0 +#define CONFIG_VAAPI 0 +#define CONFIG_VDA 0 +#define CONFIG_VDPAU 0 +#define CONFIG_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_XVMC 0 +#define CONFIG_LIBNPP 0 +#define CONFIG_LIBMFX 0 +#define CONFIG_MMAL 0 +#define CONFIG_OMX 0 +#define CONFIG_FTRAPV 0 +#define CONFIG_GRAY 0 +#define CONFIG_HARDCODED_TABLES 0 +#define CONFIG_OMX_RPI 0 +#define CONFIG_RUNTIME_CPUDETECT 1 +#define CONFIG_SAFE_BITSTREAM_READER 1 +#define CONFIG_SHARED 0 +#define CONFIG_SMALL 0 +#define CONFIG_STATIC 1 +#define CONFIG_SWSCALE_ALPHA 0 +#define CONFIG_GPL 0 +#define CONFIG_NONFREE 0 +#define CONFIG_VERSION3 0 +#define CONFIG_AVCODEC 1 +#define CONFIG_AVDEVICE 0 +#define CONFIG_AVFILTER 1 +#define CONFIG_AVFORMAT 1 +#define CONFIG_AVRESAMPLE 0 +#define CONFIG_AVUTIL 1 +#define CONFIG_POSTPROC 0 +#define CONFIG_SWRESAMPLE 1 +#define CONFIG_SWSCALE 1 +#define CONFIG_FFPLAY 0 +#define CONFIG_FFPROBE 0 +#define CONFIG_FFSERVER 0 +#define CONFIG_FFMPEG 0 +#define CONFIG_DCT 1 +#define CONFIG_DWT 0 +#define CONFIG_ERROR_RESILIENCE 1 +#define CONFIG_FAAN 1 +#define CONFIG_FAST_UNALIGNED 0 +#define CONFIG_FFT 1 +#define CONFIG_LSP 0 +#define CONFIG_LZO 0 +#define CONFIG_MDCT 1 +#define CONFIG_PIXELUTILS 0 +#define CONFIG_NETWORK 1 +#define CONFIG_RDFT 1 +#define CONFIG_FONTCONFIG 0 +#define CONFIG_MEMORY_POISONING 0 +#define CONFIG_NEON_CLOBBER_TEST 0 +#define CONFIG_PIC 0 +#define CONFIG_RAISE_MAJOR 0 +#define CONFIG_THUMB 0 +#define CONFIG_VALGRIND_BACKTRACE 0 +#define CONFIG_XMM_CLOBBER_TEST 0 +#define CONFIG_AANDCTTABLES 0 +#define CONFIG_AC3DSP 0 +#define CONFIG_AUDIO_FRAME_QUEUE 0 +#define CONFIG_AUDIODSP 0 +#define CONFIG_BLOCKDSP 1 +#define CONFIG_BSWAPDSP 0 +#define CONFIG_CABAC 1 +#define CONFIG_DIRAC_PARSE 0 +#define CONFIG_DVPROFILE 0 +#define CONFIG_EXIF 0 +#define CONFIG_FAANDCT 1 +#define CONFIG_FAANIDCT 1 +#define CONFIG_FDCTDSP 1 +#define CONFIG_FLACDSP 1 +#define CONFIG_FMTCONVERT 0 +#define CONFIG_FRAME_THREAD_ENCODER 1 +#define CONFIG_G722DSP 0 +#define CONFIG_GOLOMB 1 +#define CONFIG_GPLV3 0 +#define CONFIG_H263DSP 1 +#define CONFIG_H264CHROMA 1 +#define CONFIG_H264DSP 1 +#define CONFIG_H264PARSE 1 +#define CONFIG_H264PRED 1 +#define CONFIG_H264QPEL 1 +#define CONFIG_HPELDSP 1 +#define CONFIG_HUFFMAN 1 +#define CONFIG_HUFFYUVDSP 0 +#define CONFIG_HUFFYUVENCDSP 0 +#define CONFIG_IDCTDSP 1 +#define CONFIG_IIRFILTER 0 +#define CONFIG_MDCT15 1 +#define CONFIG_INTRAX8 0 +#define CONFIG_ISO_MEDIA 1 +#define CONFIG_IVIDSP 0 +#define CONFIG_JPEGTABLES 0 +#define CONFIG_LGPLV3 0 +#define CONFIG_LIBX262 0 +#define CONFIG_LLAUDDSP 0 +#define CONFIG_LLVIDDSP 0 +#define CONFIG_LLVIDENCDSP 1 +#define CONFIG_LPC 0 +#define CONFIG_LZF 0 +#define CONFIG_ME_CMP 1 +#define CONFIG_MPEG_ER 1 +#define CONFIG_MPEGAUDIO 1 +#define CONFIG_MPEGAUDIODSP 1 +#define CONFIG_MPEGVIDEO 1 +#define CONFIG_MPEGVIDEOENC 0 +#define CONFIG_MSS34DSP 0 +#define CONFIG_PIXBLOCKDSP 1 +#define CONFIG_QPELDSP 1 +#define CONFIG_QSV 0 +#define CONFIG_QSVDEC 0 +#define CONFIG_QSVENC 0 +#define CONFIG_RANGECODER 0 +#define CONFIG_RIFFDEC 1 +#define CONFIG_RIFFENC 1 +#define CONFIG_RTPDEC 0 +#define CONFIG_RTPENC_CHAIN 1 +#define CONFIG_RV34DSP 0 +#define CONFIG_SINEWIN 1 +#define CONFIG_SNAPPY 0 +#define CONFIG_SRTP 0 +#define CONFIG_STARTCODE 1 +#define CONFIG_TEXTUREDSP 0 +#define CONFIG_TEXTUREDSPENC 0 +#define CONFIG_TPELDSP 0 +#define CONFIG_VAAPI_ENCODE 0 +#define CONFIG_VC1DSP 0 +#define CONFIG_VIDEODSP 1 +#define CONFIG_VP3DSP 1 +#define CONFIG_VP56DSP 1 +#define CONFIG_VP8DSP 0 +#define CONFIG_VT_BT2020 0 +#define CONFIG_WMA_FREQS 0 +#define CONFIG_WMV2DSP 0 +#define CONFIG_AAC_ADTSTOASC_BSF 1 +#define CONFIG_CHOMP_BSF 0 +#define CONFIG_DUMP_EXTRADATA_BSF 0 +#define CONFIG_DCA_CORE_BSF 0 +#define CONFIG_EXTRACT_EXTRADATA_BSF 1 +#define CONFIG_H264_MP4TOANNEXB_BSF 1 +#define CONFIG_HEVC_MP4TOANNEXB_BSF 0 +#define CONFIG_IMX_DUMP_HEADER_BSF 0 +#define CONFIG_MJPEG2JPEG_BSF 0 +#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0 +#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0 +#define CONFIG_MPEG4_UNPACK_BFRAMES_BSF 0 +#define CONFIG_MOV2TEXTSUB_BSF 0 +#define CONFIG_NOISE_BSF 0 +#define CONFIG_REMOVE_EXTRADATA_BSF 0 +#define CONFIG_TEXT2MOVSUB_BSF 0 +#define CONFIG_VP9_SUPERFRAME_BSF 0 +#define CONFIG_AASC_DECODER 0 +#define CONFIG_AIC_DECODER 0 +#define CONFIG_ALIAS_PIX_DECODER 0 +#define CONFIG_AMV_DECODER 0 +#define CONFIG_ANM_DECODER 0 +#define CONFIG_ANSI_DECODER 0 +#define CONFIG_APNG_DECODER 0 +#define CONFIG_ASV1_DECODER 0 +#define CONFIG_ASV2_DECODER 0 +#define CONFIG_AURA_DECODER 0 +#define CONFIG_AURA2_DECODER 0 +#define CONFIG_AVRP_DECODER 0 +#define CONFIG_AVRN_DECODER 0 +#define CONFIG_AVS_DECODER 0 +#define CONFIG_AVUI_DECODER 0 +#define CONFIG_AYUV_DECODER 0 +#define CONFIG_BETHSOFTVID_DECODER 0 +#define CONFIG_BFI_DECODER 0 +#define CONFIG_BINK_DECODER 0 +#define CONFIG_BMP_DECODER 0 +#define CONFIG_BMV_VIDEO_DECODER 0 +#define CONFIG_BRENDER_PIX_DECODER 0 +#define CONFIG_C93_DECODER 0 +#define CONFIG_CAVS_DECODER 0 +#define CONFIG_CDGRAPHICS_DECODER 0 +#define CONFIG_CDXL_DECODER 0 +#define CONFIG_CFHD_DECODER 0 +#define CONFIG_CINEPAK_DECODER 0 +#define CONFIG_CLEARVIDEO_DECODER 0 +#define CONFIG_CLJR_DECODER 0 +#define CONFIG_CLLC_DECODER 0 +#define CONFIG_COMFORTNOISE_DECODER 0 +#define CONFIG_CPIA_DECODER 0 +#define CONFIG_CSCD_DECODER 0 +#define CONFIG_CYUV_DECODER 0 +#define CONFIG_DDS_DECODER 0 +#define CONFIG_DFA_DECODER 0 +#define CONFIG_DIRAC_DECODER 0 +#define CONFIG_DNXHD_DECODER 0 +#define CONFIG_DPX_DECODER 0 +#define CONFIG_DSICINVIDEO_DECODER 0 +#define CONFIG_DVAUDIO_DECODER 0 +#define CONFIG_DVVIDEO_DECODER 0 +#define CONFIG_DXA_DECODER 0 +#define CONFIG_DXTORY_DECODER 0 +#define CONFIG_DXV_DECODER 0 +#define CONFIG_EACMV_DECODER 0 +#define CONFIG_EAMAD_DECODER 0 +#define CONFIG_EATGQ_DECODER 0 +#define CONFIG_EATGV_DECODER 0 +#define CONFIG_EATQI_DECODER 0 +#define CONFIG_EIGHTBPS_DECODER 0 +#define CONFIG_EIGHTSVX_EXP_DECODER 0 +#define CONFIG_EIGHTSVX_FIB_DECODER 0 +#define CONFIG_ESCAPE124_DECODER 0 +#define CONFIG_ESCAPE130_DECODER 0 +#define CONFIG_EXR_DECODER 0 +#define CONFIG_FFV1_DECODER 0 +#define CONFIG_FFVHUFF_DECODER 0 +#define CONFIG_FIC_DECODER 0 +#define CONFIG_FLASHSV_DECODER 0 +#define CONFIG_FLASHSV2_DECODER 0 +#define CONFIG_FLIC_DECODER 0 +#define CONFIG_FLV_DECODER 1 +#define CONFIG_FMVC_DECODER 0 +#define CONFIG_FOURXM_DECODER 0 +#define CONFIG_FRAPS_DECODER 0 +#define CONFIG_FRWU_DECODER 0 +#define CONFIG_G2M_DECODER 0 +#define CONFIG_GIF_DECODER 0 +#define CONFIG_H261_DECODER 0 +#define CONFIG_H263_DECODER 1 +#define CONFIG_H263I_DECODER 0 +#define CONFIG_H263P_DECODER 0 +#define CONFIG_H264_DECODER 1 +#define CONFIG_H264_CRYSTALHD_DECODER 0 +#define CONFIG_H264_MEDIACODEC_DECODER 0 +#define CONFIG_H264_MMAL_DECODER 0 +#define CONFIG_H264_QSV_DECODER 0 +#define CONFIG_H264_VDA_DECODER 0 +#define CONFIG_H264_VDPAU_DECODER 0 +#define CONFIG_HAP_DECODER 0 +#define CONFIG_HEVC_DECODER 0 +#define CONFIG_HEVC_QSV_DECODER 0 +#define CONFIG_HNM4_VIDEO_DECODER 0 +#define CONFIG_HQ_HQA_DECODER 0 +#define CONFIG_HQX_DECODER 0 +#define CONFIG_HUFFYUV_DECODER 0 +#define CONFIG_IDCIN_DECODER 0 +#define CONFIG_IFF_ILBM_DECODER 0 +#define CONFIG_INDEO2_DECODER 0 +#define CONFIG_INDEO3_DECODER 0 +#define CONFIG_INDEO4_DECODER 0 +#define CONFIG_INDEO5_DECODER 0 +#define CONFIG_INTERPLAY_VIDEO_DECODER 0 +#define CONFIG_JPEG2000_DECODER 0 +#define CONFIG_JPEGLS_DECODER 0 +#define CONFIG_JV_DECODER 0 +#define CONFIG_KGV1_DECODER 0 +#define CONFIG_KMVC_DECODER 0 +#define CONFIG_LAGARITH_DECODER 0 +#define CONFIG_LOCO_DECODER 0 +#define CONFIG_M101_DECODER 0 +#define CONFIG_MAGICYUV_DECODER 0 +#define CONFIG_MDEC_DECODER 0 +#define CONFIG_MIMIC_DECODER 0 +#define CONFIG_MJPEG_DECODER 0 +#define CONFIG_MJPEGB_DECODER 0 +#define CONFIG_MMVIDEO_DECODER 0 +#define CONFIG_MOTIONPIXELS_DECODER 0 +#define CONFIG_MPEG_XVMC_DECODER 0 +#define CONFIG_MPEG1VIDEO_DECODER 0 +#define CONFIG_MPEG2VIDEO_DECODER 0 +#define CONFIG_MPEG4_DECODER 1 +#define CONFIG_MPEG4_CRYSTALHD_DECODER 0 +#define CONFIG_MPEG4_MMAL_DECODER 0 +#define CONFIG_MPEG4_VDPAU_DECODER 0 +#define CONFIG_MPEGVIDEO_DECODER 0 +#define CONFIG_MPEG_VDPAU_DECODER 0 +#define CONFIG_MPEG1_VDPAU_DECODER 0 +#define CONFIG_MPEG2_MMAL_DECODER 0 +#define CONFIG_MPEG2_CRYSTALHD_DECODER 0 +#define CONFIG_MPEG2_QSV_DECODER 0 +#define CONFIG_MSA1_DECODER 0 +#define CONFIG_MSMPEG4V1_DECODER 0 +#define CONFIG_MSMPEG4V2_DECODER 0 +#define CONFIG_MSMPEG4V3_DECODER 0 +#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0 +#define CONFIG_MSRLE_DECODER 0 +#define CONFIG_MSS1_DECODER 0 +#define CONFIG_MSS2_DECODER 0 +#define CONFIG_MSVIDEO1_DECODER 0 +#define CONFIG_MSZH_DECODER 0 +#define CONFIG_MTS2_DECODER 0 +#define CONFIG_MVC1_DECODER 0 +#define CONFIG_MVC2_DECODER 0 +#define CONFIG_MXPEG_DECODER 0 +#define CONFIG_NUV_DECODER 0 +#define CONFIG_PAF_VIDEO_DECODER 0 +#define CONFIG_PAM_DECODER 0 +#define CONFIG_PBM_DECODER 0 +#define CONFIG_PCX_DECODER 0 +#define CONFIG_PGM_DECODER 0 +#define CONFIG_PGMYUV_DECODER 0 +#define CONFIG_PICTOR_DECODER 0 +#define CONFIG_PIXLET_DECODER 0 +#define CONFIG_PNG_DECODER 0 +#define CONFIG_PPM_DECODER 0 +#define CONFIG_PRORES_DECODER 0 +#define CONFIG_PRORES_LGPL_DECODER 0 +#define CONFIG_PSD_DECODER 0 +#define CONFIG_PTX_DECODER 0 +#define CONFIG_QDRAW_DECODER 0 +#define CONFIG_QPEG_DECODER 0 +#define CONFIG_QTRLE_DECODER 0 +#define CONFIG_R10K_DECODER 0 +#define CONFIG_R210_DECODER 0 +#define CONFIG_RAWVIDEO_DECODER 0 +#define CONFIG_RL2_DECODER 0 +#define CONFIG_ROQ_DECODER 0 +#define CONFIG_RPZA_DECODER 0 +#define CONFIG_RSCC_DECODER 0 +#define CONFIG_RV10_DECODER 0 +#define CONFIG_RV20_DECODER 0 +#define CONFIG_RV30_DECODER 0 +#define CONFIG_RV40_DECODER 0 +#define CONFIG_S302M_DECODER 0 +#define CONFIG_SANM_DECODER 0 +#define CONFIG_SCPR_DECODER 0 +#define CONFIG_SCREENPRESSO_DECODER 0 +#define CONFIG_SDX2_DPCM_DECODER 0 +#define CONFIG_SGI_DECODER 0 +#define CONFIG_SGIRLE_DECODER 0 +#define CONFIG_SHEERVIDEO_DECODER 0 +#define CONFIG_SMACKER_DECODER 0 +#define CONFIG_SMC_DECODER 0 +#define CONFIG_SMVJPEG_DECODER 0 +#define CONFIG_SNOW_DECODER 0 +#define CONFIG_SP5X_DECODER 0 +#define CONFIG_SPEEDHQ_DECODER 0 +#define CONFIG_SUNRAST_DECODER 0 +#define CONFIG_SVQ1_DECODER 0 +#define CONFIG_SVQ3_DECODER 0 +#define CONFIG_TARGA_DECODER 0 +#define CONFIG_TARGA_Y216_DECODER 0 +#define CONFIG_TDSC_DECODER 0 +#define CONFIG_THEORA_DECODER 0 +#define CONFIG_THP_DECODER 0 +#define CONFIG_TIERTEXSEQVIDEO_DECODER 0 +#define CONFIG_TIFF_DECODER 0 +#define CONFIG_TMV_DECODER 0 +#define CONFIG_TRUEMOTION1_DECODER 0 +#define CONFIG_TRUEMOTION2_DECODER 0 +#define CONFIG_TRUEMOTION2RT_DECODER 0 +#define CONFIG_TSCC_DECODER 0 +#define CONFIG_TSCC2_DECODER 0 +#define CONFIG_TXD_DECODER 0 +#define CONFIG_ULTI_DECODER 0 +#define CONFIG_UTVIDEO_DECODER 0 +#define CONFIG_V210_DECODER 0 +#define CONFIG_V210X_DECODER 0 +#define CONFIG_V308_DECODER 0 +#define CONFIG_V408_DECODER 0 +#define CONFIG_V410_DECODER 0 +#define CONFIG_VB_DECODER 0 +#define CONFIG_VBLE_DECODER 0 +#define CONFIG_VC1_DECODER 0 +#define CONFIG_VC1_CRYSTALHD_DECODER 0 +#define CONFIG_VC1_VDPAU_DECODER 0 +#define CONFIG_VC1IMAGE_DECODER 0 +#define CONFIG_VC1_MMAL_DECODER 0 +#define CONFIG_VC1_QSV_DECODER 0 +#define CONFIG_VCR1_DECODER 0 +#define CONFIG_VMDVIDEO_DECODER 0 +#define CONFIG_VMNC_DECODER 0 +#define CONFIG_VP3_DECODER 0 +#define CONFIG_VP5_DECODER 0 +#define CONFIG_VP6_DECODER 1 +#define CONFIG_VP6A_DECODER 0 +#define CONFIG_VP6F_DECODER 1 +#define CONFIG_VP7_DECODER 0 +#define CONFIG_VP8_DECODER 0 +#define CONFIG_VP9_DECODER 0 +#define CONFIG_VQA_DECODER 0 +#define CONFIG_WEBP_DECODER 0 +#define CONFIG_WMV1_DECODER 0 +#define CONFIG_WMV2_DECODER 0 +#define CONFIG_WMV3_DECODER 0 +#define CONFIG_WMV3_CRYSTALHD_DECODER 0 +#define CONFIG_WMV3_VDPAU_DECODER 0 +#define CONFIG_WMV3IMAGE_DECODER 0 +#define CONFIG_WNV1_DECODER 0 +#define CONFIG_XAN_WC3_DECODER 0 +#define CONFIG_XAN_WC4_DECODER 0 +#define CONFIG_XBM_DECODER 0 +#define CONFIG_XFACE_DECODER 0 +#define CONFIG_XL_DECODER 0 +#define CONFIG_XPM_DECODER 0 +#define CONFIG_XWD_DECODER 0 +#define CONFIG_Y41P_DECODER 0 +#define CONFIG_YLC_DECODER 0 +#define CONFIG_YOP_DECODER 0 +#define CONFIG_YUV4_DECODER 0 +#define CONFIG_ZERO12V_DECODER 0 +#define CONFIG_ZEROCODEC_DECODER 0 +#define CONFIG_ZLIB_DECODER 0 +#define CONFIG_ZMBV_DECODER 0 +#define CONFIG_AAC_DECODER 1 +#define CONFIG_AAC_FIXED_DECODER 0 +#define CONFIG_AAC_LATM_DECODER 1 +#define CONFIG_AC3_DECODER 0 +#define CONFIG_AC3_FIXED_DECODER 0 +#define CONFIG_ALAC_DECODER 0 +#define CONFIG_ALS_DECODER 0 +#define CONFIG_AMRNB_DECODER 0 +#define CONFIG_AMRWB_DECODER 0 +#define CONFIG_APE_DECODER 0 +#define CONFIG_ATRAC1_DECODER 0 +#define CONFIG_ATRAC3_DECODER 0 +#define CONFIG_ATRAC3AL_DECODER 0 +#define CONFIG_ATRAC3P_DECODER 0 +#define CONFIG_ATRAC3PAL_DECODER 0 +#define CONFIG_BINKAUDIO_DCT_DECODER 0 +#define CONFIG_BINKAUDIO_RDFT_DECODER 0 +#define CONFIG_BMV_AUDIO_DECODER 0 +#define CONFIG_COOK_DECODER 0 +#define CONFIG_DCA_DECODER 0 +#define CONFIG_DSD_LSBF_DECODER 0 +#define CONFIG_DSD_MSBF_DECODER 0 +#define CONFIG_DSD_LSBF_PLANAR_DECODER 0 +#define CONFIG_DSD_MSBF_PLANAR_DECODER 0 +#define CONFIG_DSICINAUDIO_DECODER 0 +#define CONFIG_DSS_SP_DECODER 0 +#define CONFIG_DST_DECODER 0 +#define CONFIG_EAC3_DECODER 0 +#define CONFIG_EVRC_DECODER 0 +#define CONFIG_FFWAVESYNTH_DECODER 0 +#define CONFIG_FLAC_DECODER 1 +#define CONFIG_G723_1_DECODER 0 +#define CONFIG_G729_DECODER 0 +#define CONFIG_GSM_DECODER 0 +#define CONFIG_GSM_MS_DECODER 0 +#define CONFIG_IAC_DECODER 0 +#define CONFIG_IMC_DECODER 0 +#define CONFIG_INTERPLAY_ACM_DECODER 0 +#define CONFIG_MACE3_DECODER 0 +#define CONFIG_MACE6_DECODER 0 +#define CONFIG_METASOUND_DECODER 0 +#define CONFIG_MLP_DECODER 0 +#define CONFIG_MP1_DECODER 0 +#define CONFIG_MP1FLOAT_DECODER 0 +#define CONFIG_MP2_DECODER 0 +#define CONFIG_MP2FLOAT_DECODER 0 +#define CONFIG_MP3_DECODER 1 +#define CONFIG_MP3FLOAT_DECODER 1 +#define CONFIG_MP3ADU_DECODER 1 +#define CONFIG_MP3ADUFLOAT_DECODER 1 +#define CONFIG_MP3ON4_DECODER 1 +#define CONFIG_MP3ON4FLOAT_DECODER 1 +#define CONFIG_MPC7_DECODER 0 +#define CONFIG_MPC8_DECODER 0 +#define CONFIG_NELLYMOSER_DECODER 0 +#define CONFIG_ON2AVC_DECODER 0 +#define CONFIG_OPUS_DECODER 0 +#define CONFIG_PAF_AUDIO_DECODER 0 +#define CONFIG_QCELP_DECODER 0 +#define CONFIG_QDM2_DECODER 0 +#define CONFIG_QDMC_DECODER 0 +#define CONFIG_RA_144_DECODER 0 +#define CONFIG_RA_288_DECODER 0 +#define CONFIG_RALF_DECODER 0 +#define CONFIG_SHORTEN_DECODER 0 +#define CONFIG_SIPR_DECODER 0 +#define CONFIG_SMACKAUD_DECODER 0 +#define CONFIG_SONIC_DECODER 0 +#define CONFIG_TAK_DECODER 0 +#define CONFIG_TRUEHD_DECODER 0 +#define CONFIG_TRUESPEECH_DECODER 0 +#define CONFIG_TTA_DECODER 0 +#define CONFIG_TWINVQ_DECODER 0 +#define CONFIG_VMDAUDIO_DECODER 0 +#define CONFIG_VORBIS_DECODER 0 +#define CONFIG_WAVPACK_DECODER 1 +#define CONFIG_WMALOSSLESS_DECODER 0 +#define CONFIG_WMAPRO_DECODER 0 +#define CONFIG_WMAV1_DECODER 0 +#define CONFIG_WMAV2_DECODER 0 +#define CONFIG_WMAVOICE_DECODER 0 +#define CONFIG_WS_SND1_DECODER 0 +#define CONFIG_XMA1_DECODER 0 +#define CONFIG_XMA2_DECODER 0 +#define CONFIG_PCM_ALAW_DECODER 0 +#define CONFIG_PCM_BLURAY_DECODER 0 +#define CONFIG_PCM_DVD_DECODER 0 +#define CONFIG_PCM_F16LE_DECODER 0 +#define CONFIG_PCM_F24LE_DECODER 0 +#define CONFIG_PCM_F32BE_DECODER 0 +#define CONFIG_PCM_F32LE_DECODER 0 +#define CONFIG_PCM_F64BE_DECODER 0 +#define CONFIG_PCM_F64LE_DECODER 0 +#define CONFIG_PCM_LXF_DECODER 0 +#define CONFIG_PCM_MULAW_DECODER 0 +#define CONFIG_PCM_S8_DECODER 0 +#define CONFIG_PCM_S8_PLANAR_DECODER 0 +#define CONFIG_PCM_S16BE_DECODER 0 +#define CONFIG_PCM_S16BE_PLANAR_DECODER 0 +#define CONFIG_PCM_S16LE_DECODER 1 +#define CONFIG_PCM_S16LE_PLANAR_DECODER 0 +#define CONFIG_PCM_S24BE_DECODER 0 +#define CONFIG_PCM_S24DAUD_DECODER 0 +#define CONFIG_PCM_S24LE_DECODER 0 +#define CONFIG_PCM_S24LE_PLANAR_DECODER 0 +#define CONFIG_PCM_S32BE_DECODER 0 +#define CONFIG_PCM_S32LE_DECODER 0 +#define CONFIG_PCM_S32LE_PLANAR_DECODER 0 +#define CONFIG_PCM_S64BE_DECODER 0 +#define CONFIG_PCM_S64LE_DECODER 0 +#define CONFIG_PCM_U8_DECODER 0 +#define CONFIG_PCM_U16BE_DECODER 0 +#define CONFIG_PCM_U16LE_DECODER 0 +#define CONFIG_PCM_U24BE_DECODER 0 +#define CONFIG_PCM_U24LE_DECODER 0 +#define CONFIG_PCM_U32BE_DECODER 0 +#define CONFIG_PCM_U32LE_DECODER 0 +#define CONFIG_PCM_ZORK_DECODER 0 +#define CONFIG_INTERPLAY_DPCM_DECODER 0 +#define CONFIG_ROQ_DPCM_DECODER 0 +#define CONFIG_SOL_DPCM_DECODER 0 +#define CONFIG_XAN_DPCM_DECODER 0 +#define CONFIG_ADPCM_4XM_DECODER 0 +#define CONFIG_ADPCM_ADX_DECODER 0 +#define CONFIG_ADPCM_AFC_DECODER 0 +#define CONFIG_ADPCM_AICA_DECODER 0 +#define CONFIG_ADPCM_CT_DECODER 0 +#define CONFIG_ADPCM_DTK_DECODER 0 +#define CONFIG_ADPCM_EA_DECODER 0 +#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0 +#define CONFIG_ADPCM_EA_R1_DECODER 0 +#define CONFIG_ADPCM_EA_R2_DECODER 0 +#define CONFIG_ADPCM_EA_R3_DECODER 0 +#define CONFIG_ADPCM_EA_XAS_DECODER 0 +#define CONFIG_ADPCM_G722_DECODER 0 +#define CONFIG_ADPCM_G726_DECODER 0 +#define CONFIG_ADPCM_G726LE_DECODER 0 +#define CONFIG_ADPCM_IMA_AMV_DECODER 0 +#define CONFIG_ADPCM_IMA_APC_DECODER 0 +#define CONFIG_ADPCM_IMA_DAT4_DECODER 0 +#define CONFIG_ADPCM_IMA_DK3_DECODER 0 +#define CONFIG_ADPCM_IMA_DK4_DECODER 0 +#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0 +#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0 +#define CONFIG_ADPCM_IMA_ISS_DECODER 0 +#define CONFIG_ADPCM_IMA_OKI_DECODER 0 +#define CONFIG_ADPCM_IMA_QT_DECODER 0 +#define CONFIG_ADPCM_IMA_RAD_DECODER 0 +#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0 +#define CONFIG_ADPCM_IMA_WAV_DECODER 0 +#define CONFIG_ADPCM_IMA_WS_DECODER 0 +#define CONFIG_ADPCM_MS_DECODER 0 +#define CONFIG_ADPCM_MTAF_DECODER 0 +#define CONFIG_ADPCM_PSX_DECODER 0 +#define CONFIG_ADPCM_SBPRO_2_DECODER 0 +#define CONFIG_ADPCM_SBPRO_3_DECODER 0 +#define CONFIG_ADPCM_SBPRO_4_DECODER 0 +#define CONFIG_ADPCM_SWF_DECODER 0 +#define CONFIG_ADPCM_THP_DECODER 0 +#define CONFIG_ADPCM_THP_LE_DECODER 0 +#define CONFIG_ADPCM_VIMA_DECODER 0 +#define CONFIG_ADPCM_XA_DECODER 0 +#define CONFIG_ADPCM_YAMAHA_DECODER 0 +#define CONFIG_SSA_DECODER 0 +#define CONFIG_ASS_DECODER 0 +#define CONFIG_CCAPTION_DECODER 0 +#define CONFIG_DVBSUB_DECODER 0 +#define CONFIG_DVDSUB_DECODER 0 +#define CONFIG_JACOSUB_DECODER 0 +#define CONFIG_MICRODVD_DECODER 0 +#define CONFIG_MOVTEXT_DECODER 0 +#define CONFIG_MPL2_DECODER 0 +#define CONFIG_PGSSUB_DECODER 0 +#define CONFIG_PJS_DECODER 0 +#define CONFIG_REALTEXT_DECODER 0 +#define CONFIG_SAMI_DECODER 0 +#define CONFIG_SRT_DECODER 0 +#define CONFIG_STL_DECODER 0 +#define CONFIG_SUBRIP_DECODER 0 +#define CONFIG_SUBVIEWER_DECODER 0 +#define CONFIG_SUBVIEWER1_DECODER 0 +#define CONFIG_TEXT_DECODER 0 +#define CONFIG_VPLAYER_DECODER 0 +#define CONFIG_WEBVTT_DECODER 0 +#define CONFIG_XSUB_DECODER 0 +#define CONFIG_AAC_AT_DECODER 0 +#define CONFIG_AC3_AT_DECODER 0 +#define CONFIG_ADPCM_IMA_QT_AT_DECODER 0 +#define CONFIG_ALAC_AT_DECODER 0 +#define CONFIG_AMR_NB_AT_DECODER 0 +#define CONFIG_EAC3_AT_DECODER 0 +#define CONFIG_GSM_MS_AT_DECODER 0 +#define CONFIG_ILBC_AT_DECODER 0 +#define CONFIG_MP1_AT_DECODER 0 +#define CONFIG_MP2_AT_DECODER 0 +#define CONFIG_MP3_AT_DECODER 0 +#define CONFIG_PCM_ALAW_AT_DECODER 0 +#define CONFIG_PCM_MULAW_AT_DECODER 0 +#define CONFIG_QDMC_AT_DECODER 0 +#define CONFIG_QDM2_AT_DECODER 0 +#define CONFIG_LIBCELT_DECODER 0 +#define CONFIG_LIBFDK_AAC_DECODER 0 +#define CONFIG_LIBGSM_DECODER 0 +#define CONFIG_LIBGSM_MS_DECODER 0 +#define CONFIG_LIBILBC_DECODER 0 +#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0 +#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0 +#define CONFIG_LIBOPENJPEG_DECODER 0 +#define CONFIG_LIBOPUS_DECODER 0 +#define CONFIG_LIBSCHROEDINGER_DECODER 0 +#define CONFIG_LIBSPEEX_DECODER 0 +#define CONFIG_LIBVORBIS_DECODER 0 +#define CONFIG_LIBVPX_VP8_DECODER 0 +#define CONFIG_LIBVPX_VP9_DECODER 0 +#define CONFIG_LIBZVBI_TELETEXT_DECODER 0 +#define CONFIG_BINTEXT_DECODER 0 +#define CONFIG_XBIN_DECODER 0 +#define CONFIG_IDF_DECODER 0 +#define CONFIG_LIBOPENH264_DECODER 0 +#define CONFIG_H264_CUVID_DECODER 0 +#define CONFIG_HEVC_CUVID_DECODER 0 +#define CONFIG_HEVC_MEDIACODEC_DECODER 0 +#define CONFIG_MJPEG_CUVID_DECODER 0 +#define CONFIG_MPEG1_CUVID_DECODER 0 +#define CONFIG_MPEG2_CUVID_DECODER 0 +#define CONFIG_MPEG4_CUVID_DECODER 0 +#define CONFIG_MPEG4_MEDIACODEC_DECODER 0 +#define CONFIG_VC1_CUVID_DECODER 0 +#define CONFIG_VP8_CUVID_DECODER 0 +#define CONFIG_VP8_MEDIACODEC_DECODER 0 +#define CONFIG_VP8_QSV_DECODER 0 +#define CONFIG_VP9_CUVID_DECODER 0 +#define CONFIG_VP9_MEDIACODEC_DECODER 0 +#define CONFIG_AA_DEMUXER 0 +#define CONFIG_AAC_DEMUXER 1 +#define CONFIG_AC3_DEMUXER 0 +#define CONFIG_ACM_DEMUXER 0 +#define CONFIG_ACT_DEMUXER 0 +#define CONFIG_ADF_DEMUXER 0 +#define CONFIG_ADP_DEMUXER 0 +#define CONFIG_ADS_DEMUXER 0 +#define CONFIG_ADX_DEMUXER 0 +#define CONFIG_AEA_DEMUXER 0 +#define CONFIG_AFC_DEMUXER 0 +#define CONFIG_AIFF_DEMUXER 0 +#define CONFIG_AIX_DEMUXER 0 +#define CONFIG_AMR_DEMUXER 0 +#define CONFIG_ANM_DEMUXER 0 +#define CONFIG_APC_DEMUXER 0 +#define CONFIG_APE_DEMUXER 0 +#define CONFIG_APNG_DEMUXER 0 +#define CONFIG_AQTITLE_DEMUXER 0 +#define CONFIG_ASF_DEMUXER 0 +#define CONFIG_ASF_O_DEMUXER 0 +#define CONFIG_ASS_DEMUXER 0 +#define CONFIG_AST_DEMUXER 0 +#define CONFIG_AU_DEMUXER 0 +#define CONFIG_AVI_DEMUXER 0 +#define CONFIG_AVISYNTH_DEMUXER 0 +#define CONFIG_AVR_DEMUXER 0 +#define CONFIG_AVS_DEMUXER 0 +#define CONFIG_BETHSOFTVID_DEMUXER 0 +#define CONFIG_BFI_DEMUXER 0 +#define CONFIG_BINTEXT_DEMUXER 0 +#define CONFIG_BINK_DEMUXER 0 +#define CONFIG_BIT_DEMUXER 0 +#define CONFIG_BMV_DEMUXER 0 +#define CONFIG_BFSTM_DEMUXER 0 +#define CONFIG_BRSTM_DEMUXER 0 +#define CONFIG_BOA_DEMUXER 0 +#define CONFIG_C93_DEMUXER 0 +#define CONFIG_CAF_DEMUXER 0 +#define CONFIG_CAVSVIDEO_DEMUXER 0 +#define CONFIG_CDG_DEMUXER 0 +#define CONFIG_CDXL_DEMUXER 0 +#define CONFIG_CINE_DEMUXER 0 +#define CONFIG_CONCAT_DEMUXER 1 +#define CONFIG_DATA_DEMUXER 1 +#define CONFIG_DAUD_DEMUXER 0 +#define CONFIG_DCSTR_DEMUXER 0 +#define CONFIG_DFA_DEMUXER 0 +#define CONFIG_DIRAC_DEMUXER 0 +#define CONFIG_DNXHD_DEMUXER 0 +#define CONFIG_DSF_DEMUXER 0 +#define CONFIG_DSICIN_DEMUXER 0 +#define CONFIG_DSS_DEMUXER 0 +#define CONFIG_DTS_DEMUXER 0 +#define CONFIG_DTSHD_DEMUXER 0 +#define CONFIG_DV_DEMUXER 0 +#define CONFIG_DVBSUB_DEMUXER 0 +#define CONFIG_DVBTXT_DEMUXER 0 +#define CONFIG_DXA_DEMUXER 0 +#define CONFIG_EA_DEMUXER 0 +#define CONFIG_EA_CDATA_DEMUXER 0 +#define CONFIG_EAC3_DEMUXER 0 +#define CONFIG_EPAF_DEMUXER 0 +#define CONFIG_FFM_DEMUXER 0 +#define CONFIG_FFMETADATA_DEMUXER 0 +#define CONFIG_FILMSTRIP_DEMUXER 0 +#define CONFIG_FLAC_DEMUXER 1 +#define CONFIG_FLIC_DEMUXER 0 +#define CONFIG_FLV_DEMUXER 1 +#define CONFIG_LIVE_FLV_DEMUXER 1 +#define CONFIG_FOURXM_DEMUXER 0 +#define CONFIG_FRM_DEMUXER 0 +#define CONFIG_FSB_DEMUXER 0 +#define CONFIG_G722_DEMUXER 0 +#define CONFIG_G723_1_DEMUXER 0 +#define CONFIG_G729_DEMUXER 0 +#define CONFIG_GENH_DEMUXER 0 +#define CONFIG_GIF_DEMUXER 0 +#define CONFIG_GSM_DEMUXER 0 +#define CONFIG_GXF_DEMUXER 0 +#define CONFIG_H261_DEMUXER 0 +#define CONFIG_H263_DEMUXER 0 +#define CONFIG_H264_DEMUXER 0 +#define CONFIG_HEVC_DEMUXER 1 +#define CONFIG_HLS_DEMUXER 1 +#define CONFIG_HNM_DEMUXER 0 +#define CONFIG_ICO_DEMUXER 0 +#define CONFIG_IDCIN_DEMUXER 0 +#define CONFIG_IDF_DEMUXER 0 +#define CONFIG_IFF_DEMUXER 0 +#define CONFIG_ILBC_DEMUXER 0 +#define CONFIG_IMAGE2_DEMUXER 0 +#define CONFIG_IMAGE2PIPE_DEMUXER 0 +#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0 +#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0 +#define CONFIG_INGENIENT_DEMUXER 0 +#define CONFIG_IPMOVIE_DEMUXER 0 +#define CONFIG_IRCAM_DEMUXER 0 +#define CONFIG_ISS_DEMUXER 0 +#define CONFIG_IV8_DEMUXER 0 +#define CONFIG_IVF_DEMUXER 0 +#define CONFIG_IVR_DEMUXER 0 +#define CONFIG_JACOSUB_DEMUXER 0 +#define CONFIG_JV_DEMUXER 0 +#define CONFIG_LMLM4_DEMUXER 0 +#define CONFIG_LOAS_DEMUXER 0 +#define CONFIG_LRC_DEMUXER 0 +#define CONFIG_LVF_DEMUXER 0 +#define CONFIG_LXF_DEMUXER 0 +#define CONFIG_M4V_DEMUXER 0 +#define CONFIG_MATROSKA_DEMUXER 0 +#define CONFIG_MGSTS_DEMUXER 0 +#define CONFIG_MICRODVD_DEMUXER 0 +#define CONFIG_MJPEG_DEMUXER 0 +#define CONFIG_MJPEG_2000_DEMUXER 0 +#define CONFIG_MLP_DEMUXER 0 +#define CONFIG_MLV_DEMUXER 0 +#define CONFIG_MM_DEMUXER 0 +#define CONFIG_MMF_DEMUXER 0 +#define CONFIG_MOV_DEMUXER 1 +#define CONFIG_MP3_DEMUXER 1 +#define CONFIG_MPC_DEMUXER 0 +#define CONFIG_MPC8_DEMUXER 0 +#define CONFIG_MPEGPS_DEMUXER 1 +#define CONFIG_MPEGTS_DEMUXER 1 +#define CONFIG_MPEGTSRAW_DEMUXER 0 +#define CONFIG_MPEGVIDEO_DEMUXER 1 +#define CONFIG_MPJPEG_DEMUXER 0 +#define CONFIG_MPL2_DEMUXER 0 +#define CONFIG_MPSUB_DEMUXER 0 +#define CONFIG_MSF_DEMUXER 0 +#define CONFIG_MSNWC_TCP_DEMUXER 0 +#define CONFIG_MTAF_DEMUXER 0 +#define CONFIG_MTV_DEMUXER 0 +#define CONFIG_MUSX_DEMUXER 0 +#define CONFIG_MV_DEMUXER 0 +#define CONFIG_MVI_DEMUXER 0 +#define CONFIG_MXF_DEMUXER 0 +#define CONFIG_MXG_DEMUXER 0 +#define CONFIG_NC_DEMUXER 0 +#define CONFIG_NISTSPHERE_DEMUXER 0 +#define CONFIG_NSV_DEMUXER 0 +#define CONFIG_NUT_DEMUXER 0 +#define CONFIG_NUV_DEMUXER 0 +#define CONFIG_OGG_DEMUXER 0 +#define CONFIG_OMA_DEMUXER 0 +#define CONFIG_PAF_DEMUXER 0 +#define CONFIG_PCM_ALAW_DEMUXER 0 +#define CONFIG_PCM_MULAW_DEMUXER 0 +#define CONFIG_PCM_F64BE_DEMUXER 0 +#define CONFIG_PCM_F64LE_DEMUXER 0 +#define CONFIG_PCM_F32BE_DEMUXER 0 +#define CONFIG_PCM_F32LE_DEMUXER 0 +#define CONFIG_PCM_S32BE_DEMUXER 0 +#define CONFIG_PCM_S32LE_DEMUXER 0 +#define CONFIG_PCM_S24BE_DEMUXER 0 +#define CONFIG_PCM_S24LE_DEMUXER 0 +#define CONFIG_PCM_S16BE_DEMUXER 0 +#define CONFIG_PCM_S16LE_DEMUXER 0 +#define CONFIG_PCM_S8_DEMUXER 0 +#define CONFIG_PCM_U32BE_DEMUXER 0 +#define CONFIG_PCM_U32LE_DEMUXER 0 +#define CONFIG_PCM_U24BE_DEMUXER 0 +#define CONFIG_PCM_U24LE_DEMUXER 0 +#define CONFIG_PCM_U16BE_DEMUXER 0 +#define CONFIG_PCM_U16LE_DEMUXER 0 +#define CONFIG_PCM_U8_DEMUXER 0 +#define CONFIG_PJS_DEMUXER 0 +#define CONFIG_PMP_DEMUXER 0 +#define CONFIG_PVA_DEMUXER 0 +#define CONFIG_PVF_DEMUXER 0 +#define CONFIG_QCP_DEMUXER 0 +#define CONFIG_R3D_DEMUXER 0 +#define CONFIG_RAWVIDEO_DEMUXER 0 +#define CONFIG_REALTEXT_DEMUXER 0 +#define CONFIG_REDSPARK_DEMUXER 0 +#define CONFIG_RL2_DEMUXER 0 +#define CONFIG_RM_DEMUXER 0 +#define CONFIG_ROQ_DEMUXER 0 +#define CONFIG_RPL_DEMUXER 0 +#define CONFIG_RSD_DEMUXER 0 +#define CONFIG_RSO_DEMUXER 0 +#define CONFIG_RTP_DEMUXER 0 +#define CONFIG_RTSP_DEMUXER 0 +#define CONFIG_SAMI_DEMUXER 0 +#define CONFIG_SAP_DEMUXER 0 +#define CONFIG_SBG_DEMUXER 0 +#define CONFIG_SCC_DEMUXER 0 +#define CONFIG_SDP_DEMUXER 0 +#define CONFIG_SDR2_DEMUXER 0 +#define CONFIG_SDS_DEMUXER 0 +#define CONFIG_SDX_DEMUXER 0 +#define CONFIG_SEGAFILM_DEMUXER 0 +#define CONFIG_SHORTEN_DEMUXER 0 +#define CONFIG_SIFF_DEMUXER 0 +#define CONFIG_SLN_DEMUXER 0 +#define CONFIG_SMACKER_DEMUXER 0 +#define CONFIG_SMJPEG_DEMUXER 0 +#define CONFIG_SMUSH_DEMUXER 0 +#define CONFIG_SOL_DEMUXER 0 +#define CONFIG_SOX_DEMUXER 0 +#define CONFIG_SPDIF_DEMUXER 0 +#define CONFIG_SRT_DEMUXER 0 +#define CONFIG_STR_DEMUXER 0 +#define CONFIG_STL_DEMUXER 0 +#define CONFIG_SUBVIEWER1_DEMUXER 0 +#define CONFIG_SUBVIEWER_DEMUXER 0 +#define CONFIG_SUP_DEMUXER 0 +#define CONFIG_SVAG_DEMUXER 0 +#define CONFIG_SWF_DEMUXER 0 +#define CONFIG_TAK_DEMUXER 0 +#define CONFIG_TEDCAPTIONS_DEMUXER 0 +#define CONFIG_THP_DEMUXER 0 +#define CONFIG_THREEDOSTR_DEMUXER 0 +#define CONFIG_TIERTEXSEQ_DEMUXER 0 +#define CONFIG_TMV_DEMUXER 0 +#define CONFIG_TRUEHD_DEMUXER 0 +#define CONFIG_TTA_DEMUXER 0 +#define CONFIG_TXD_DEMUXER 0 +#define CONFIG_TTY_DEMUXER 0 +#define CONFIG_V210_DEMUXER 0 +#define CONFIG_V210X_DEMUXER 0 +#define CONFIG_VAG_DEMUXER 0 +#define CONFIG_VC1_DEMUXER 0 +#define CONFIG_VC1T_DEMUXER 0 +#define CONFIG_VIVO_DEMUXER 0 +#define CONFIG_VMD_DEMUXER 0 +#define CONFIG_VOBSUB_DEMUXER 0 +#define CONFIG_VOC_DEMUXER 0 +#define CONFIG_VPK_DEMUXER 0 +#define CONFIG_VPLAYER_DEMUXER 0 +#define CONFIG_VQF_DEMUXER 0 +#define CONFIG_W64_DEMUXER 0 +#define CONFIG_WAV_DEMUXER 1 +#define CONFIG_WC3_DEMUXER 0 +#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0 +#define CONFIG_WEBVTT_DEMUXER 0 +#define CONFIG_WSAUD_DEMUXER 0 +#define CONFIG_WSD_DEMUXER 0 +#define CONFIG_WSVQA_DEMUXER 0 +#define CONFIG_WTV_DEMUXER 0 +#define CONFIG_WVE_DEMUXER 0 +#define CONFIG_WV_DEMUXER 0 +#define CONFIG_XA_DEMUXER 0 +#define CONFIG_XBIN_DEMUXER 0 +#define CONFIG_XMV_DEMUXER 0 +#define CONFIG_XVAG_DEMUXER 0 +#define CONFIG_XWMA_DEMUXER 0 +#define CONFIG_YOP_DEMUXER 0 +#define CONFIG_YUV4MPEGPIPE_DEMUXER 0 +#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_DDS_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PPM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PSD_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_QDRAW_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_XPM_PIPE_DEMUXER 0 +#define CONFIG_LIBGME_DEMUXER 0 +#define CONFIG_LIBMODPLUG_DEMUXER 0 +#define CONFIG_LIBNUT_DEMUXER 0 +#define CONFIG_LIBOPENMPT_DEMUXER 0 +#define CONFIG_A64MULTI_ENCODER 0 +#define CONFIG_A64MULTI5_ENCODER 0 +#define CONFIG_ALIAS_PIX_ENCODER 0 +#define CONFIG_AMV_ENCODER 0 +#define CONFIG_APNG_ENCODER 0 +#define CONFIG_ASV1_ENCODER 0 +#define CONFIG_ASV2_ENCODER 0 +#define CONFIG_AVRP_ENCODER 0 +#define CONFIG_AVUI_ENCODER 0 +#define CONFIG_AYUV_ENCODER 0 +#define CONFIG_BMP_ENCODER 0 +#define CONFIG_CINEPAK_ENCODER 0 +#define CONFIG_CLJR_ENCODER 0 +#define CONFIG_COMFORTNOISE_ENCODER 0 +#define CONFIG_DNXHD_ENCODER 0 +#define CONFIG_DPX_ENCODER 0 +#define CONFIG_DVVIDEO_ENCODER 0 +#define CONFIG_FFV1_ENCODER 0 +#define CONFIG_FFVHUFF_ENCODER 0 +#define CONFIG_FLASHSV_ENCODER 0 +#define CONFIG_FLASHSV2_ENCODER 0 +#define CONFIG_FLV_ENCODER 0 +#define CONFIG_GIF_ENCODER 0 +#define CONFIG_H261_ENCODER 0 +#define CONFIG_H263_ENCODER 0 +#define CONFIG_H263P_ENCODER 0 +#define CONFIG_HAP_ENCODER 0 +#define CONFIG_HUFFYUV_ENCODER 0 +#define CONFIG_JPEG2000_ENCODER 0 +#define CONFIG_JPEGLS_ENCODER 0 +#define CONFIG_LJPEG_ENCODER 0 +#define CONFIG_MJPEG_ENCODER 0 +#define CONFIG_MPEG1VIDEO_ENCODER 0 +#define CONFIG_MPEG2VIDEO_ENCODER 0 +#define CONFIG_MPEG4_ENCODER 0 +#define CONFIG_MSMPEG4V2_ENCODER 0 +#define CONFIG_MSMPEG4V3_ENCODER 0 +#define CONFIG_MSVIDEO1_ENCODER 0 +#define CONFIG_PAM_ENCODER 0 +#define CONFIG_PBM_ENCODER 0 +#define CONFIG_PCX_ENCODER 0 +#define CONFIG_PGM_ENCODER 0 +#define CONFIG_PGMYUV_ENCODER 0 +#define CONFIG_PNG_ENCODER 1 +#define CONFIG_PPM_ENCODER 0 +#define CONFIG_PRORES_ENCODER 0 +#define CONFIG_PRORES_AW_ENCODER 0 +#define CONFIG_PRORES_KS_ENCODER 0 +#define CONFIG_QTRLE_ENCODER 0 +#define CONFIG_R10K_ENCODER 0 +#define CONFIG_R210_ENCODER 0 +#define CONFIG_RAWVIDEO_ENCODER 0 +#define CONFIG_ROQ_ENCODER 0 +#define CONFIG_RV10_ENCODER 0 +#define CONFIG_RV20_ENCODER 0 +#define CONFIG_S302M_ENCODER 0 +#define CONFIG_SGI_ENCODER 0 +#define CONFIG_SNOW_ENCODER 0 +#define CONFIG_SUNRAST_ENCODER 0 +#define CONFIG_SVQ1_ENCODER 0 +#define CONFIG_TARGA_ENCODER 0 +#define CONFIG_TIFF_ENCODER 0 +#define CONFIG_UTVIDEO_ENCODER 0 +#define CONFIG_V210_ENCODER 0 +#define CONFIG_V308_ENCODER 0 +#define CONFIG_V408_ENCODER 0 +#define CONFIG_V410_ENCODER 0 +#define CONFIG_VC2_ENCODER 0 +#define CONFIG_WRAPPED_AVFRAME_ENCODER 0 +#define CONFIG_WMV1_ENCODER 0 +#define CONFIG_WMV2_ENCODER 0 +#define CONFIG_XBM_ENCODER 0 +#define CONFIG_XFACE_ENCODER 0 +#define CONFIG_XWD_ENCODER 0 +#define CONFIG_Y41P_ENCODER 0 +#define CONFIG_YUV4_ENCODER 0 +#define CONFIG_ZLIB_ENCODER 0 +#define CONFIG_ZMBV_ENCODER 0 +#define CONFIG_AAC_ENCODER 0 +#define CONFIG_AC3_ENCODER 0 +#define CONFIG_AC3_FIXED_ENCODER 0 +#define CONFIG_ALAC_ENCODER 0 +#define CONFIG_DCA_ENCODER 0 +#define CONFIG_EAC3_ENCODER 0 +#define CONFIG_FLAC_ENCODER 0 +#define CONFIG_G723_1_ENCODER 0 +#define CONFIG_MLP_ENCODER 0 +#define CONFIG_MP2_ENCODER 0 +#define CONFIG_MP2FIXED_ENCODER 0 +#define CONFIG_NELLYMOSER_ENCODER 0 +#define CONFIG_OPUS_ENCODER 0 +#define CONFIG_RA_144_ENCODER 0 +#define CONFIG_SONIC_ENCODER 0 +#define CONFIG_SONIC_LS_ENCODER 0 +#define CONFIG_TRUEHD_ENCODER 0 +#define CONFIG_TTA_ENCODER 0 +#define CONFIG_VORBIS_ENCODER 0 +#define CONFIG_WAVPACK_ENCODER 0 +#define CONFIG_WMAV1_ENCODER 0 +#define CONFIG_WMAV2_ENCODER 0 +#define CONFIG_PCM_ALAW_ENCODER 0 +#define CONFIG_PCM_F32BE_ENCODER 0 +#define CONFIG_PCM_F32LE_ENCODER 0 +#define CONFIG_PCM_F64BE_ENCODER 0 +#define CONFIG_PCM_F64LE_ENCODER 0 +#define CONFIG_PCM_MULAW_ENCODER 0 +#define CONFIG_PCM_S8_ENCODER 0 +#define CONFIG_PCM_S8_PLANAR_ENCODER 0 +#define CONFIG_PCM_S16BE_ENCODER 0 +#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S16LE_ENCODER 0 +#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S24BE_ENCODER 0 +#define CONFIG_PCM_S24DAUD_ENCODER 0 +#define CONFIG_PCM_S24LE_ENCODER 0 +#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S32BE_ENCODER 0 +#define CONFIG_PCM_S32LE_ENCODER 0 +#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S64BE_ENCODER 0 +#define CONFIG_PCM_S64LE_ENCODER 0 +#define CONFIG_PCM_U8_ENCODER 0 +#define CONFIG_PCM_U16BE_ENCODER 0 +#define CONFIG_PCM_U16LE_ENCODER 0 +#define CONFIG_PCM_U24BE_ENCODER 0 +#define CONFIG_PCM_U24LE_ENCODER 0 +#define CONFIG_PCM_U32BE_ENCODER 0 +#define CONFIG_PCM_U32LE_ENCODER 0 +#define CONFIG_ROQ_DPCM_ENCODER 0 +#define CONFIG_ADPCM_ADX_ENCODER 0 +#define CONFIG_ADPCM_G722_ENCODER 0 +#define CONFIG_ADPCM_G726_ENCODER 0 +#define CONFIG_ADPCM_IMA_QT_ENCODER 0 +#define CONFIG_ADPCM_IMA_WAV_ENCODER 0 +#define CONFIG_ADPCM_MS_ENCODER 0 +#define CONFIG_ADPCM_SWF_ENCODER 0 +#define CONFIG_ADPCM_YAMAHA_ENCODER 0 +#define CONFIG_SSA_ENCODER 0 +#define CONFIG_ASS_ENCODER 0 +#define CONFIG_DVBSUB_ENCODER 0 +#define CONFIG_DVDSUB_ENCODER 0 +#define CONFIG_MOVTEXT_ENCODER 0 +#define CONFIG_SRT_ENCODER 0 +#define CONFIG_SUBRIP_ENCODER 0 +#define CONFIG_TEXT_ENCODER 0 +#define CONFIG_WEBVTT_ENCODER 0 +#define CONFIG_XSUB_ENCODER 0 +#define CONFIG_AAC_AT_ENCODER 0 +#define CONFIG_ALAC_AT_ENCODER 0 +#define CONFIG_ILBC_AT_ENCODER 0 +#define CONFIG_PCM_ALAW_AT_ENCODER 0 +#define CONFIG_PCM_MULAW_AT_ENCODER 0 +#define CONFIG_LIBFDK_AAC_ENCODER 0 +#define CONFIG_LIBGSM_ENCODER 0 +#define CONFIG_LIBGSM_MS_ENCODER 0 +#define CONFIG_LIBILBC_ENCODER 0 +#define CONFIG_LIBMP3LAME_ENCODER 0 +#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0 +#define CONFIG_LIBOPENJPEG_ENCODER 0 +#define CONFIG_LIBOPUS_ENCODER 0 +#define CONFIG_LIBSCHROEDINGER_ENCODER 0 +#define CONFIG_LIBSHINE_ENCODER 0 +#define CONFIG_LIBSPEEX_ENCODER 0 +#define CONFIG_LIBTHEORA_ENCODER 0 +#define CONFIG_LIBTWOLAME_ENCODER 0 +#define CONFIG_LIBVO_AMRWBENC_ENCODER 0 +#define CONFIG_LIBVORBIS_ENCODER 0 +#define CONFIG_LIBVPX_VP8_ENCODER 0 +#define CONFIG_LIBVPX_VP9_ENCODER 0 +#define CONFIG_LIBWAVPACK_ENCODER 0 +#define CONFIG_LIBWEBP_ANIM_ENCODER 0 +#define CONFIG_LIBWEBP_ENCODER 0 +#define CONFIG_LIBX262_ENCODER 0 +#define CONFIG_LIBX264_ENCODER 0 +#define CONFIG_LIBX264RGB_ENCODER 0 +#define CONFIG_LIBX265_ENCODER 0 +#define CONFIG_LIBXAVS_ENCODER 0 +#define CONFIG_LIBXVID_ENCODER 0 +#define CONFIG_LIBOPENH264_ENCODER 0 +#define CONFIG_H264_NVENC_ENCODER 0 +#define CONFIG_H264_OMX_ENCODER 0 +#define CONFIG_H264_QSV_ENCODER 0 +#define CONFIG_H264_VAAPI_ENCODER 0 +#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0 +#define CONFIG_NVENC_ENCODER 0 +#define CONFIG_NVENC_H264_ENCODER 0 +#define CONFIG_NVENC_HEVC_ENCODER 0 +#define CONFIG_HEVC_NVENC_ENCODER 0 +#define CONFIG_HEVC_QSV_ENCODER 0 +#define CONFIG_HEVC_VAAPI_ENCODER 0 +#define CONFIG_LIBKVAZAAR_ENCODER 0 +#define CONFIG_MJPEG_VAAPI_ENCODER 0 +#define CONFIG_MPEG2_QSV_ENCODER 0 +#define CONFIG_MPEG2_VAAPI_ENCODER 0 +#define CONFIG_VP8_VAAPI_ENCODER 0 +#define CONFIG_ABENCH_FILTER 0 +#define CONFIG_ACOMPRESSOR_FILTER 0 +#define CONFIG_ACROSSFADE_FILTER 0 +#define CONFIG_ACRUSHER_FILTER 0 +#define CONFIG_ADELAY_FILTER 0 +#define CONFIG_AECHO_FILTER 0 +#define CONFIG_AEMPHASIS_FILTER 0 +#define CONFIG_AEVAL_FILTER 0 +#define CONFIG_AFADE_FILTER 0 +#define CONFIG_AFFTFILT_FILTER 0 +#define CONFIG_AFORMAT_FILTER 0 +#define CONFIG_AGATE_FILTER 0 +#define CONFIG_AINTERLEAVE_FILTER 0 +#define CONFIG_ALIMITER_FILTER 0 +#define CONFIG_ALLPASS_FILTER 0 +#define CONFIG_ALOOP_FILTER 0 +#define CONFIG_AMERGE_FILTER 0 +#define CONFIG_AMETADATA_FILTER 0 +#define CONFIG_AMIX_FILTER 0 +#define CONFIG_ANEQUALIZER_FILTER 0 +#define CONFIG_ANULL_FILTER 0 +#define CONFIG_APAD_FILTER 0 +#define CONFIG_APERMS_FILTER 0 +#define CONFIG_APHASER_FILTER 0 +#define CONFIG_APULSATOR_FILTER 0 +#define CONFIG_AREALTIME_FILTER 0 +#define CONFIG_ARESAMPLE_FILTER 0 +#define CONFIG_AREVERSE_FILTER 0 +#define CONFIG_ASELECT_FILTER 0 +#define CONFIG_ASENDCMD_FILTER 0 +#define CONFIG_ASETNSAMPLES_FILTER 0 +#define CONFIG_ASETPTS_FILTER 0 +#define CONFIG_ASETRATE_FILTER 0 +#define CONFIG_ASETTB_FILTER 0 +#define CONFIG_ASHOWINFO_FILTER 0 +#define CONFIG_ASIDEDATA_FILTER 0 +#define CONFIG_ASPLIT_FILTER 0 +#define CONFIG_ASTATS_FILTER 0 +#define CONFIG_ASTREAMSELECT_FILTER 0 +#define CONFIG_ATEMPO_FILTER 0 +#define CONFIG_ATRIM_FILTER 0 +#define CONFIG_AZMQ_FILTER 0 +#define CONFIG_BANDPASS_FILTER 0 +#define CONFIG_BANDREJECT_FILTER 0 +#define CONFIG_BASS_FILTER 0 +#define CONFIG_BIQUAD_FILTER 0 +#define CONFIG_BS2B_FILTER 0 +#define CONFIG_CHANNELMAP_FILTER 0 +#define CONFIG_CHANNELSPLIT_FILTER 0 +#define CONFIG_CHORUS_FILTER 0 +#define CONFIG_COMPAND_FILTER 0 +#define CONFIG_COMPENSATIONDELAY_FILTER 0 +#define CONFIG_CRYSTALIZER_FILTER 0 +#define CONFIG_DCSHIFT_FILTER 0 +#define CONFIG_DYNAUDNORM_FILTER 0 +#define CONFIG_EARWAX_FILTER 0 +#define CONFIG_EBUR128_FILTER 0 +#define CONFIG_EQUALIZER_FILTER 0 +#define CONFIG_EXTRASTEREO_FILTER 0 +#define CONFIG_FIREQUALIZER_FILTER 0 +#define CONFIG_FLANGER_FILTER 0 +#define CONFIG_HDCD_FILTER 0 +#define CONFIG_HIGHPASS_FILTER 0 +#define CONFIG_JOIN_FILTER 0 +#define CONFIG_LADSPA_FILTER 0 +#define CONFIG_LOUDNORM_FILTER 0 +#define CONFIG_LOWPASS_FILTER 0 +#define CONFIG_PAN_FILTER 0 +#define CONFIG_REPLAYGAIN_FILTER 0 +#define CONFIG_RESAMPLE_FILTER 0 +#define CONFIG_RUBBERBAND_FILTER 0 +#define CONFIG_SIDECHAINCOMPRESS_FILTER 0 +#define CONFIG_SIDECHAINGATE_FILTER 0 +#define CONFIG_SILENCEDETECT_FILTER 0 +#define CONFIG_SILENCEREMOVE_FILTER 0 +#define CONFIG_SOFALIZER_FILTER 0 +#define CONFIG_STEREOTOOLS_FILTER 0 +#define CONFIG_STEREOWIDEN_FILTER 0 +#define CONFIG_TREBLE_FILTER 0 +#define CONFIG_TREMOLO_FILTER 0 +#define CONFIG_VIBRATO_FILTER 0 +#define CONFIG_VOLUME_FILTER 0 +#define CONFIG_VOLUMEDETECT_FILTER 0 +#define CONFIG_AEVALSRC_FILTER 0 +#define CONFIG_ANOISESRC_FILTER 0 +#define CONFIG_ANULLSRC_FILTER 0 +#define CONFIG_FLITE_FILTER 0 +#define CONFIG_SINE_FILTER 0 +#define CONFIG_ANULLSINK_FILTER 0 +#define CONFIG_ALPHAEXTRACT_FILTER 0 +#define CONFIG_ALPHAMERGE_FILTER 0 +#define CONFIG_ASS_FILTER 0 +#define CONFIG_ATADENOISE_FILTER 0 +#define CONFIG_AVGBLUR_FILTER 0 +#define CONFIG_BBOX_FILTER 0 +#define CONFIG_BENCH_FILTER 0 +#define CONFIG_BITPLANENOISE_FILTER 0 +#define CONFIG_BLACKDETECT_FILTER 0 +#define CONFIG_BLACKFRAME_FILTER 0 +#define CONFIG_BLEND_FILTER 0 +#define CONFIG_BOXBLUR_FILTER 0 +#define CONFIG_BWDIF_FILTER 0 +#define CONFIG_CHROMAKEY_FILTER 0 +#define CONFIG_CIESCOPE_FILTER 0 +#define CONFIG_CODECVIEW_FILTER 0 +#define CONFIG_COLORBALANCE_FILTER 0 +#define CONFIG_COLORCHANNELMIXER_FILTER 0 +#define CONFIG_COLORKEY_FILTER 0 +#define CONFIG_COLORLEVELS_FILTER 0 +#define CONFIG_COLORMATRIX_FILTER 0 +#define CONFIG_COLORSPACE_FILTER 0 +#define CONFIG_CONVOLUTION_FILTER 0 +#define CONFIG_COPY_FILTER 0 +#define CONFIG_COREIMAGE_FILTER 0 +#define CONFIG_COVER_RECT_FILTER 0 +#define CONFIG_CROP_FILTER 0 +#define CONFIG_CROPDETECT_FILTER 0 +#define CONFIG_CURVES_FILTER 0 +#define CONFIG_DATASCOPE_FILTER 0 +#define CONFIG_DCTDNOIZ_FILTER 0 +#define CONFIG_DEBAND_FILTER 0 +#define CONFIG_DECIMATE_FILTER 0 +#define CONFIG_DEFLATE_FILTER 0 +#define CONFIG_DEINTERLACE_QSV_FILTER 0 +#define CONFIG_DEINTERLACE_VAAPI_FILTER 0 +#define CONFIG_DEJUDDER_FILTER 0 +#define CONFIG_DELOGO_FILTER 0 +#define CONFIG_DESHAKE_FILTER 0 +#define CONFIG_DETELECINE_FILTER 0 +#define CONFIG_DILATION_FILTER 0 +#define CONFIG_DISPLACE_FILTER 0 +#define CONFIG_DRAWBOX_FILTER 0 +#define CONFIG_DRAWGRAPH_FILTER 0 +#define CONFIG_DRAWGRID_FILTER 0 +#define CONFIG_DRAWTEXT_FILTER 0 +#define CONFIG_EDGEDETECT_FILTER 0 +#define CONFIG_ELBG_FILTER 0 +#define CONFIG_EQ_FILTER 0 +#define CONFIG_EROSION_FILTER 0 +#define CONFIG_EXTRACTPLANES_FILTER 0 +#define CONFIG_FADE_FILTER 0 +#define CONFIG_FFTFILT_FILTER 0 +#define CONFIG_FIELD_FILTER 0 +#define CONFIG_FIELDHINT_FILTER 0 +#define CONFIG_FIELDMATCH_FILTER 0 +#define CONFIG_FIELDORDER_FILTER 0 +#define CONFIG_FIND_RECT_FILTER 0 +#define CONFIG_FORMAT_FILTER 0 +#define CONFIG_FPS_FILTER 0 +#define CONFIG_FRAMEPACK_FILTER 0 +#define CONFIG_FRAMERATE_FILTER 0 +#define CONFIG_FRAMESTEP_FILTER 0 +#define CONFIG_FREI0R_FILTER 0 +#define CONFIG_FSPP_FILTER 0 +#define CONFIG_GBLUR_FILTER 0 +#define CONFIG_GEQ_FILTER 0 +#define CONFIG_GRADFUN_FILTER 0 +#define CONFIG_HALDCLUT_FILTER 0 +#define CONFIG_HFLIP_FILTER 0 +#define CONFIG_HISTEQ_FILTER 0 +#define CONFIG_HISTOGRAM_FILTER 0 +#define CONFIG_HQDN3D_FILTER 0 +#define CONFIG_HQX_FILTER 0 +#define CONFIG_HSTACK_FILTER 0 +#define CONFIG_HUE_FILTER 0 +#define CONFIG_HWDOWNLOAD_FILTER 0 +#define CONFIG_HWMAP_FILTER 0 +#define CONFIG_HWUPLOAD_FILTER 0 +#define CONFIG_HWUPLOAD_CUDA_FILTER 0 +#define CONFIG_HYSTERESIS_FILTER 0 +#define CONFIG_IDET_FILTER 0 +#define CONFIG_IL_FILTER 0 +#define CONFIG_INFLATE_FILTER 0 +#define CONFIG_INTERLACE_FILTER 0 +#define CONFIG_INTERLEAVE_FILTER 0 +#define CONFIG_KERNDEINT_FILTER 0 +#define CONFIG_LENSCORRECTION_FILTER 0 +#define CONFIG_LOOP_FILTER 0 +#define CONFIG_LUT_FILTER 0 +#define CONFIG_LUT2_FILTER 0 +#define CONFIG_LUT3D_FILTER 0 +#define CONFIG_LUTRGB_FILTER 0 +#define CONFIG_LUTYUV_FILTER 0 +#define CONFIG_MASKEDCLAMP_FILTER 0 +#define CONFIG_MASKEDMERGE_FILTER 0 +#define CONFIG_MCDEINT_FILTER 0 +#define CONFIG_MERGEPLANES_FILTER 0 +#define CONFIG_MESTIMATE_FILTER 0 +#define CONFIG_METADATA_FILTER 0 +#define CONFIG_MIDEQUALIZER_FILTER 0 +#define CONFIG_MINTERPOLATE_FILTER 0 +#define CONFIG_MPDECIMATE_FILTER 0 +#define CONFIG_NEGATE_FILTER 0 +#define CONFIG_NLMEANS_FILTER 0 +#define CONFIG_NNEDI_FILTER 0 +#define CONFIG_NOFORMAT_FILTER 0 +#define CONFIG_NOISE_FILTER 0 +#define CONFIG_NULL_FILTER 0 +#define CONFIG_OCR_FILTER 0 +#define CONFIG_OCV_FILTER 0 +#define CONFIG_OVERLAY_FILTER 0 +#define CONFIG_OWDENOISE_FILTER 0 +#define CONFIG_PAD_FILTER 0 +#define CONFIG_PALETTEGEN_FILTER 0 +#define CONFIG_PALETTEUSE_FILTER 0 +#define CONFIG_PERMS_FILTER 0 +#define CONFIG_PERSPECTIVE_FILTER 0 +#define CONFIG_PHASE_FILTER 0 +#define CONFIG_PIXDESCTEST_FILTER 0 +#define CONFIG_PP_FILTER 0 +#define CONFIG_PP7_FILTER 0 +#define CONFIG_PREMULTIPLY_FILTER 0 +#define CONFIG_PREWITT_FILTER 0 +#define CONFIG_PSNR_FILTER 0 +#define CONFIG_PULLUP_FILTER 0 +#define CONFIG_QP_FILTER 0 +#define CONFIG_RANDOM_FILTER 0 +#define CONFIG_READEIA608_FILTER 0 +#define CONFIG_READVITC_FILTER 0 +#define CONFIG_REALTIME_FILTER 0 +#define CONFIG_REMAP_FILTER 0 +#define CONFIG_REMOVEGRAIN_FILTER 0 +#define CONFIG_REMOVELOGO_FILTER 0 +#define CONFIG_REPEATFIELDS_FILTER 0 +#define CONFIG_REVERSE_FILTER 0 +#define CONFIG_ROTATE_FILTER 0 +#define CONFIG_SAB_FILTER 0 +#define CONFIG_SCALE_FILTER 0 +#define CONFIG_SCALE_NPP_FILTER 0 +#define CONFIG_SCALE_QSV_FILTER 0 +#define CONFIG_SCALE_VAAPI_FILTER 0 +#define CONFIG_SCALE2REF_FILTER 0 +#define CONFIG_SELECT_FILTER 0 +#define CONFIG_SELECTIVECOLOR_FILTER 0 +#define CONFIG_SENDCMD_FILTER 0 +#define CONFIG_SEPARATEFIELDS_FILTER 0 +#define CONFIG_SETDAR_FILTER 0 +#define CONFIG_SETFIELD_FILTER 0 +#define CONFIG_SETPTS_FILTER 0 +#define CONFIG_SETSAR_FILTER 0 +#define CONFIG_SETTB_FILTER 0 +#define CONFIG_SHOWINFO_FILTER 0 +#define CONFIG_SHOWPALETTE_FILTER 0 +#define CONFIG_SHUFFLEFRAMES_FILTER 0 +#define CONFIG_SHUFFLEPLANES_FILTER 0 +#define CONFIG_SIDEDATA_FILTER 0 +#define CONFIG_SIGNALSTATS_FILTER 0 +#define CONFIG_SIGNATURE_FILTER 0 +#define CONFIG_SMARTBLUR_FILTER 0 +#define CONFIG_SOBEL_FILTER 0 +#define CONFIG_SPLIT_FILTER 0 +#define CONFIG_SPP_FILTER 0 +#define CONFIG_SSIM_FILTER 0 +#define CONFIG_STEREO3D_FILTER 0 +#define CONFIG_STREAMSELECT_FILTER 0 +#define CONFIG_SUBTITLES_FILTER 0 +#define CONFIG_SUPER2XSAI_FILTER 0 +#define CONFIG_SWAPRECT_FILTER 0 +#define CONFIG_SWAPUV_FILTER 0 +#define CONFIG_TBLEND_FILTER 0 +#define CONFIG_TELECINE_FILTER 0 +#define CONFIG_THRESHOLD_FILTER 0 +#define CONFIG_THUMBNAIL_FILTER 0 +#define CONFIG_TILE_FILTER 0 +#define CONFIG_TINTERLACE_FILTER 0 +#define CONFIG_TRANSPOSE_FILTER 0 +#define CONFIG_TRIM_FILTER 0 +#define CONFIG_UNSHARP_FILTER 0 +#define CONFIG_USPP_FILTER 0 +#define CONFIG_VAGUEDENOISER_FILTER 0 +#define CONFIG_VECTORSCOPE_FILTER 0 +#define CONFIG_VFLIP_FILTER 0 +#define CONFIG_VIDSTABDETECT_FILTER 0 +#define CONFIG_VIDSTABTRANSFORM_FILTER 0 +#define CONFIG_VIGNETTE_FILTER 0 +#define CONFIG_VSTACK_FILTER 0 +#define CONFIG_W3FDIF_FILTER 0 +#define CONFIG_WAVEFORM_FILTER 0 +#define CONFIG_WEAVE_FILTER 0 +#define CONFIG_XBR_FILTER 0 +#define CONFIG_YADIF_FILTER 0 +#define CONFIG_ZMQ_FILTER 0 +#define CONFIG_ZOOMPAN_FILTER 0 +#define CONFIG_ZSCALE_FILTER 0 +#define CONFIG_ALLRGB_FILTER 0 +#define CONFIG_ALLYUV_FILTER 0 +#define CONFIG_CELLAUTO_FILTER 0 +#define CONFIG_COLOR_FILTER 0 +#define CONFIG_COREIMAGESRC_FILTER 0 +#define CONFIG_FREI0R_SRC_FILTER 0 +#define CONFIG_HALDCLUTSRC_FILTER 0 +#define CONFIG_LIFE_FILTER 0 +#define CONFIG_MANDELBROT_FILTER 0 +#define CONFIG_MPTESTSRC_FILTER 0 +#define CONFIG_NULLSRC_FILTER 0 +#define CONFIG_RGBTESTSRC_FILTER 0 +#define CONFIG_SMPTEBARS_FILTER 0 +#define CONFIG_SMPTEHDBARS_FILTER 0 +#define CONFIG_TESTSRC_FILTER 0 +#define CONFIG_TESTSRC2_FILTER 0 +#define CONFIG_YUVTESTSRC_FILTER 0 +#define CONFIG_NULLSINK_FILTER 0 +#define CONFIG_ABITSCOPE_FILTER 0 +#define CONFIG_ADRAWGRAPH_FILTER 0 +#define CONFIG_AHISTOGRAM_FILTER 0 +#define CONFIG_APHASEMETER_FILTER 0 +#define CONFIG_AVECTORSCOPE_FILTER 0 +#define CONFIG_CONCAT_FILTER 0 +#define CONFIG_SHOWCQT_FILTER 0 +#define CONFIG_SHOWFREQS_FILTER 0 +#define CONFIG_SHOWSPECTRUM_FILTER 0 +#define CONFIG_SHOWSPECTRUMPIC_FILTER 0 +#define CONFIG_SHOWVOLUME_FILTER 0 +#define CONFIG_SHOWWAVES_FILTER 0 +#define CONFIG_SHOWWAVESPIC_FILTER 0 +#define CONFIG_SPECTRUMSYNTH_FILTER 0 +#define CONFIG_AMOVIE_FILTER 0 +#define CONFIG_MOVIE_FILTER 0 +#define CONFIG_H263_VAAPI_HWACCEL 0 +#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_H264_CUVID_HWACCEL 0 +#define CONFIG_H264_D3D11VA_HWACCEL 0 +#define CONFIG_H264_DXVA2_HWACCEL 0 +#define CONFIG_H264_MEDIACODEC_HWACCEL 0 +#define CONFIG_H264_MMAL_HWACCEL 0 +#define CONFIG_H264_QSV_HWACCEL 0 +#define CONFIG_H264_VAAPI_HWACCEL 0 +#define CONFIG_H264_VDA_HWACCEL 0 +#define CONFIG_H264_VDA_OLD_HWACCEL 0 +#define CONFIG_H264_VDPAU_HWACCEL 0 +#define CONFIG_H264_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_HEVC_CUVID_HWACCEL 0 +#define CONFIG_HEVC_D3D11VA_HWACCEL 0 +#define CONFIG_HEVC_DXVA2_HWACCEL 0 +#define CONFIG_HEVC_MEDIACODEC_HWACCEL 0 +#define CONFIG_HEVC_QSV_HWACCEL 0 +#define CONFIG_HEVC_VAAPI_HWACCEL 0 +#define CONFIG_HEVC_VDPAU_HWACCEL 0 +#define CONFIG_MJPEG_CUVID_HWACCEL 0 +#define CONFIG_MPEG1_CUVID_HWACCEL 0 +#define CONFIG_MPEG1_XVMC_HWACCEL 0 +#define CONFIG_MPEG1_VDPAU_HWACCEL 0 +#define CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_MPEG2_CUVID_HWACCEL 0 +#define CONFIG_MPEG2_XVMC_HWACCEL 0 +#define CONFIG_MPEG2_D3D11VA_HWACCEL 0 +#define CONFIG_MPEG2_DXVA2_HWACCEL 0 +#define CONFIG_MPEG2_MMAL_HWACCEL 0 +#define CONFIG_MPEG2_QSV_HWACCEL 0 +#define CONFIG_MPEG2_VAAPI_HWACCEL 0 +#define CONFIG_MPEG2_VDPAU_HWACCEL 0 +#define CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_MPEG4_CUVID_HWACCEL 0 +#define CONFIG_MPEG4_MEDIACODEC_HWACCEL 0 +#define CONFIG_MPEG4_MMAL_HWACCEL 0 +#define CONFIG_MPEG4_VAAPI_HWACCEL 0 +#define CONFIG_MPEG4_VDPAU_HWACCEL 0 +#define CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_VC1_CUVID_HWACCEL 0 +#define CONFIG_VC1_D3D11VA_HWACCEL 0 +#define CONFIG_VC1_DXVA2_HWACCEL 0 +#define CONFIG_VC1_VAAPI_HWACCEL 0 +#define CONFIG_VC1_VDPAU_HWACCEL 0 +#define CONFIG_VC1_MMAL_HWACCEL 0 +#define CONFIG_VC1_QSV_HWACCEL 0 +#define CONFIG_VP8_CUVID_HWACCEL 0 +#define CONFIG_VP8_MEDIACODEC_HWACCEL 0 +#define CONFIG_VP8_QSV_HWACCEL 0 +#define CONFIG_VP9_CUVID_HWACCEL 0 +#define CONFIG_VP9_D3D11VA_HWACCEL 0 +#define CONFIG_VP9_DXVA2_HWACCEL 0 +#define CONFIG_VP9_MEDIACODEC_HWACCEL 0 +#define CONFIG_VP9_VAAPI_HWACCEL 0 +#define CONFIG_WMV3_D3D11VA_HWACCEL 0 +#define CONFIG_WMV3_DXVA2_HWACCEL 0 +#define CONFIG_WMV3_VAAPI_HWACCEL 0 +#define CONFIG_WMV3_VDPAU_HWACCEL 0 +#define CONFIG_ALSA_INDEV 0 +#define CONFIG_AVFOUNDATION_INDEV 0 +#define CONFIG_BKTR_INDEV 0 +#define CONFIG_DECKLINK_INDEV 0 +#define CONFIG_DSHOW_INDEV 0 +#define CONFIG_DV1394_INDEV 0 +#define CONFIG_FBDEV_INDEV 0 +#define CONFIG_GDIGRAB_INDEV 0 +#define CONFIG_IEC61883_INDEV 0 +#define CONFIG_JACK_INDEV 0 +#define CONFIG_LAVFI_INDEV 0 +#define CONFIG_OPENAL_INDEV 0 +#define CONFIG_OSS_INDEV 0 +#define CONFIG_PULSE_INDEV 0 +#define CONFIG_QTKIT_INDEV 0 +#define CONFIG_SNDIO_INDEV 0 +#define CONFIG_V4L2_INDEV 0 +#define CONFIG_VFWCAP_INDEV 0 +#define CONFIG_XCBGRAB_INDEV 0 +#define CONFIG_LIBCDIO_INDEV 0 +#define CONFIG_LIBDC1394_INDEV 0 +#define CONFIG_A64_MUXER 0 +#define CONFIG_AC3_MUXER 0 +#define CONFIG_ADTS_MUXER 0 +#define CONFIG_ADX_MUXER 0 +#define CONFIG_AIFF_MUXER 0 +#define CONFIG_AMR_MUXER 0 +#define CONFIG_APNG_MUXER 0 +#define CONFIG_ASF_MUXER 0 +#define CONFIG_ASS_MUXER 0 +#define CONFIG_AST_MUXER 0 +#define CONFIG_ASF_STREAM_MUXER 0 +#define CONFIG_AU_MUXER 0 +#define CONFIG_AVI_MUXER 0 +#define CONFIG_AVM2_MUXER 0 +#define CONFIG_BIT_MUXER 0 +#define CONFIG_CAF_MUXER 0 +#define CONFIG_CAVSVIDEO_MUXER 0 +#define CONFIG_CRC_MUXER 0 +#define CONFIG_DASH_MUXER 0 +#define CONFIG_DATA_MUXER 0 +#define CONFIG_DAUD_MUXER 0 +#define CONFIG_DIRAC_MUXER 0 +#define CONFIG_DNXHD_MUXER 0 +#define CONFIG_DTS_MUXER 0 +#define CONFIG_DV_MUXER 0 +#define CONFIG_EAC3_MUXER 0 +#define CONFIG_F4V_MUXER 0 +#define CONFIG_FFM_MUXER 0 +#define CONFIG_FFMETADATA_MUXER 0 +#define CONFIG_FIFO_MUXER 0 +#define CONFIG_FILMSTRIP_MUXER 0 +#define CONFIG_FLAC_MUXER 0 +#define CONFIG_FLV_MUXER 0 +#define CONFIG_FRAMECRC_MUXER 0 +#define CONFIG_FRAMEHASH_MUXER 0 +#define CONFIG_FRAMEMD5_MUXER 0 +#define CONFIG_G722_MUXER 0 +#define CONFIG_G723_1_MUXER 0 +#define CONFIG_GIF_MUXER 0 +#define CONFIG_GSM_MUXER 0 +#define CONFIG_GXF_MUXER 0 +#define CONFIG_H261_MUXER 0 +#define CONFIG_H263_MUXER 0 +#define CONFIG_H264_MUXER 0 +#define CONFIG_HASH_MUXER 0 +#define CONFIG_HDS_MUXER 0 +#define CONFIG_HEVC_MUXER 0 +#define CONFIG_HLS_MUXER 0 +#define CONFIG_ICO_MUXER 0 +#define CONFIG_ILBC_MUXER 0 +#define CONFIG_IMAGE2_MUXER 0 +#define CONFIG_IMAGE2PIPE_MUXER 0 +#define CONFIG_IPOD_MUXER 0 +#define CONFIG_IRCAM_MUXER 0 +#define CONFIG_ISMV_MUXER 0 +#define CONFIG_IVF_MUXER 0 +#define CONFIG_JACOSUB_MUXER 0 +#define CONFIG_LATM_MUXER 0 +#define CONFIG_LRC_MUXER 0 +#define CONFIG_M4V_MUXER 0 +#define CONFIG_MD5_MUXER 0 +#define CONFIG_MATROSKA_MUXER 0 +#define CONFIG_MATROSKA_AUDIO_MUXER 0 +#define CONFIG_MICRODVD_MUXER 0 +#define CONFIG_MJPEG_MUXER 0 +#define CONFIG_MLP_MUXER 0 +#define CONFIG_MMF_MUXER 0 +#define CONFIG_MOV_MUXER 1 +#define CONFIG_MP2_MUXER 0 +#define CONFIG_MP3_MUXER 0 +#define CONFIG_MP4_MUXER 1 +#define CONFIG_MPEG1SYSTEM_MUXER 0 +#define CONFIG_MPEG1VCD_MUXER 0 +#define CONFIG_MPEG1VIDEO_MUXER 0 +#define CONFIG_MPEG2DVD_MUXER 0 +#define CONFIG_MPEG2SVCD_MUXER 0 +#define CONFIG_MPEG2VIDEO_MUXER 0 +#define CONFIG_MPEG2VOB_MUXER 0 +#define CONFIG_MPEGTS_MUXER 0 +#define CONFIG_MPJPEG_MUXER 0 +#define CONFIG_MXF_MUXER 0 +#define CONFIG_MXF_D10_MUXER 0 +#define CONFIG_MXF_OPATOM_MUXER 0 +#define CONFIG_NULL_MUXER 0 +#define CONFIG_NUT_MUXER 0 +#define CONFIG_OGA_MUXER 0 +#define CONFIG_OGG_MUXER 0 +#define CONFIG_OGV_MUXER 0 +#define CONFIG_OMA_MUXER 0 +#define CONFIG_OPUS_MUXER 0 +#define CONFIG_PCM_ALAW_MUXER 0 +#define CONFIG_PCM_MULAW_MUXER 0 +#define CONFIG_PCM_F64BE_MUXER 0 +#define CONFIG_PCM_F64LE_MUXER 0 +#define CONFIG_PCM_F32BE_MUXER 0 +#define CONFIG_PCM_F32LE_MUXER 0 +#define CONFIG_PCM_S32BE_MUXER 0 +#define CONFIG_PCM_S32LE_MUXER 0 +#define CONFIG_PCM_S24BE_MUXER 0 +#define CONFIG_PCM_S24LE_MUXER 0 +#define CONFIG_PCM_S16BE_MUXER 0 +#define CONFIG_PCM_S16LE_MUXER 0 +#define CONFIG_PCM_S8_MUXER 0 +#define CONFIG_PCM_U32BE_MUXER 0 +#define CONFIG_PCM_U32LE_MUXER 0 +#define CONFIG_PCM_U24BE_MUXER 0 +#define CONFIG_PCM_U24LE_MUXER 0 +#define CONFIG_PCM_U16BE_MUXER 0 +#define CONFIG_PCM_U16LE_MUXER 0 +#define CONFIG_PCM_U8_MUXER 0 +#define CONFIG_PSP_MUXER 0 +#define CONFIG_RAWVIDEO_MUXER 0 +#define CONFIG_RM_MUXER 0 +#define CONFIG_ROQ_MUXER 0 +#define CONFIG_RSO_MUXER 0 +#define CONFIG_RTP_MUXER 0 +#define CONFIG_RTP_MPEGTS_MUXER 0 +#define CONFIG_RTSP_MUXER 0 +#define CONFIG_SAP_MUXER 0 +#define CONFIG_SCC_MUXER 0 +#define CONFIG_SEGMENT_MUXER 0 +#define CONFIG_STREAM_SEGMENT_MUXER 0 +#define CONFIG_SINGLEJPEG_MUXER 0 +#define CONFIG_SMJPEG_MUXER 0 +#define CONFIG_SMOOTHSTREAMING_MUXER 0 +#define CONFIG_SOX_MUXER 0 +#define CONFIG_SPX_MUXER 0 +#define CONFIG_SPDIF_MUXER 0 +#define CONFIG_SRT_MUXER 0 +#define CONFIG_SWF_MUXER 0 +#define CONFIG_TEE_MUXER 0 +#define CONFIG_TG2_MUXER 0 +#define CONFIG_TGP_MUXER 0 +#define CONFIG_MKVTIMESTAMP_V2_MUXER 0 +#define CONFIG_TRUEHD_MUXER 0 +#define CONFIG_TTA_MUXER 0 +#define CONFIG_UNCODEDFRAMECRC_MUXER 0 +#define CONFIG_VC1_MUXER 0 +#define CONFIG_VC1T_MUXER 0 +#define CONFIG_VOC_MUXER 0 +#define CONFIG_W64_MUXER 0 +#define CONFIG_WAV_MUXER 0 +#define CONFIG_WEBM_MUXER 0 +#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0 +#define CONFIG_WEBM_CHUNK_MUXER 0 +#define CONFIG_WEBP_MUXER 0 +#define CONFIG_WEBVTT_MUXER 0 +#define CONFIG_WTV_MUXER 0 +#define CONFIG_WV_MUXER 0 +#define CONFIG_YUV4MPEGPIPE_MUXER 0 +#define CONFIG_CHROMAPRINT_MUXER 0 +#define CONFIG_LIBNUT_MUXER 0 +#define CONFIG_ALSA_OUTDEV 0 +#define CONFIG_CACA_OUTDEV 0 +#define CONFIG_DECKLINK_OUTDEV 0 +#define CONFIG_FBDEV_OUTDEV 0 +#define CONFIG_OPENGL_OUTDEV 0 +#define CONFIG_OSS_OUTDEV 0 +#define CONFIG_PULSE_OUTDEV 0 +#define CONFIG_SDL2_OUTDEV 0 +#define CONFIG_SNDIO_OUTDEV 0 +#define CONFIG_V4L2_OUTDEV 0 +#define CONFIG_XV_OUTDEV 0 +#define CONFIG_AAC_PARSER 1 +#define CONFIG_AAC_LATM_PARSER 1 +#define CONFIG_AC3_PARSER 0 +#define CONFIG_ADX_PARSER 0 +#define CONFIG_BMP_PARSER 0 +#define CONFIG_CAVSVIDEO_PARSER 0 +#define CONFIG_COOK_PARSER 0 +#define CONFIG_DCA_PARSER 0 +#define CONFIG_DIRAC_PARSER 0 +#define CONFIG_DNXHD_PARSER 0 +#define CONFIG_DPX_PARSER 0 +#define CONFIG_DVAUDIO_PARSER 0 +#define CONFIG_DVBSUB_PARSER 0 +#define CONFIG_DVDSUB_PARSER 0 +#define CONFIG_DVD_NAV_PARSER 0 +#define CONFIG_FLAC_PARSER 1 +#define CONFIG_G729_PARSER 0 +#define CONFIG_GSM_PARSER 0 +#define CONFIG_H261_PARSER 0 +#define CONFIG_H263_PARSER 1 +#define CONFIG_H264_PARSER 1 +#define CONFIG_HEVC_PARSER 1 +#define CONFIG_MJPEG_PARSER 0 +#define CONFIG_MLP_PARSER 0 +#define CONFIG_MPEG4VIDEO_PARSER 1 +#define CONFIG_MPEGAUDIO_PARSER 1 +#define CONFIG_MPEGVIDEO_PARSER 0 +#define CONFIG_OPUS_PARSER 0 +#define CONFIG_PNG_PARSER 0 +#define CONFIG_PNM_PARSER 0 +#define CONFIG_RV30_PARSER 0 +#define CONFIG_RV40_PARSER 0 +#define CONFIG_SIPR_PARSER 0 +#define CONFIG_TAK_PARSER 0 +#define CONFIG_VC1_PARSER 0 +#define CONFIG_VORBIS_PARSER 0 +#define CONFIG_VP3_PARSER 0 +#define CONFIG_VP8_PARSER 0 +#define CONFIG_VP9_PARSER 0 +#define CONFIG_XMA_PARSER 0 +#define CONFIG_ASYNC_PROTOCOL 1 +#define CONFIG_BLURAY_PROTOCOL 0 +#define CONFIG_CACHE_PROTOCOL 1 +#define CONFIG_CONCAT_PROTOCOL 0 +#define CONFIG_CRYPTO_PROTOCOL 1 +#define CONFIG_DATA_PROTOCOL 1 +#define CONFIG_FFRTMPCRYPT_PROTOCOL 0 +#define CONFIG_FFRTMPHTTP_PROTOCOL 1 +#define CONFIG_FILE_PROTOCOL 1 +#define CONFIG_FTP_PROTOCOL 1 +#define CONFIG_GOPHER_PROTOCOL 0 +#define CONFIG_HLS_PROTOCOL 1 +#define CONFIG_HTTP_PROTOCOL 1 +#define CONFIG_HTTPPROXY_PROTOCOL 1 +#define CONFIG_HTTPS_PROTOCOL 1 +#define CONFIG_ICECAST_PROTOCOL 0 +#define CONFIG_IJKHTTPHOOK_PROTOCOL 1 +#define CONFIG_IJKHLSCACHE_PROTOCOL 1 +#define CONFIG_IJKLONGURL_PROTOCOL 1 +#define CONFIG_IJKMEDIADATASOURCE_PROTOCOL 1 +#define CONFIG_IJKSEGMENT_PROTOCOL 1 +#define CONFIG_IJKTCPHOOK_PROTOCOL 1 +#define CONFIG_IJKIO_PROTOCOL 1 +#define CONFIG_MMSH_PROTOCOL 0 +#define CONFIG_MMST_PROTOCOL 0 +#define CONFIG_MD5_PROTOCOL 0 +#define CONFIG_PIPE_PROTOCOL 1 +#define CONFIG_PROMPEG_PROTOCOL 1 +#define CONFIG_RTMP_PROTOCOL 1 +#define CONFIG_RTMPE_PROTOCOL 0 +#define CONFIG_RTMPS_PROTOCOL 0 +#define CONFIG_RTMPT_PROTOCOL 1 +#define CONFIG_RTMPTE_PROTOCOL 0 +#define CONFIG_RTMPTS_PROTOCOL 0 +#define CONFIG_RTP_PROTOCOL 0 +#define CONFIG_SCTP_PROTOCOL 0 +#define CONFIG_SRTP_PROTOCOL 0 +#define CONFIG_SUBFILE_PROTOCOL 0 +#define CONFIG_TEE_PROTOCOL 1 +#define CONFIG_TCP_PROTOCOL 1 +#define CONFIG_TLS_GNUTLS_PROTOCOL 0 +#define CONFIG_TLS_SCHANNEL_PROTOCOL 0 +#define CONFIG_TLS_SECURETRANSPORT_PROTOCOL 0 +#define CONFIG_TLS_OPENSSL_PROTOCOL 1 +#define CONFIG_UDP_PROTOCOL 1 +#define CONFIG_UDPLITE_PROTOCOL 1 +#define CONFIG_UNIX_PROTOCOL 0 +#define CONFIG_LIBRTMP_PROTOCOL 0 +#define CONFIG_LIBRTMPE_PROTOCOL 0 +#define CONFIG_LIBRTMPS_PROTOCOL 0 +#define CONFIG_LIBRTMPT_PROTOCOL 0 +#define CONFIG_LIBRTMPTE_PROTOCOL 0 +#define CONFIG_LIBSSH_PROTOCOL 0 +#define CONFIG_LIBSMBCLIENT_PROTOCOL 0 +#endif /* FFMPEG_CONFIG_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/x86_64/config.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/x86_64/config.h new file mode 100644 index 0000000..e530ee9 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libffmpeg/x86_64/config.h @@ -0,0 +1,2277 @@ +/* Automatically generated by configure - do not modify! */ +#ifndef FFMPEG_CONFIG_H +#define FFMPEG_CONFIG_H +#define FFMPEG_CONFIGURATION "--disable-gpl --disable-nonfree --enable-runtime-cpudetect --disable-gray --disable-swscale-alpha --disable-programs --disable-ffmpeg --disable-ffplay --disable-ffprobe --disable-ffserver --disable-doc --disable-htmlpages --disable-manpages --disable-podpages --disable-txtpages --disable-avdevice --enable-avcodec --enable-avformat --enable-avutil --enable-swresample --enable-swscale --disable-postproc --enable-avfilter --disable-avresample --enable-network --disable-d3d11va --disable-dxva2 --disable-vaapi --disable-vda --disable-vdpau --disable-videotoolbox --disable-encoders --enable-encoder=png --disable-decoders --enable-decoder=aac --enable-decoder=aac_latm --enable-decoder=flv --enable-decoder=h264 --enable-decoder='mp3*' --enable-decoder=vp6f --enable-decoder=flac --enable-decoder=mpeg4 --enable-decoder=wavpack --enable-decoder=wav --enable-decoder=pcm_s16le --disable-hwaccels --disable-muxers --enable-muxer=mp4 --disable-demuxers --enable-demuxer=aac --enable-demuxer=concat --enable-demuxer=data --enable-demuxer=flv --enable-demuxer=hls --enable-demuxer=live_flv --enable-demuxer=mov --enable-demuxer=mp3 --enable-demuxer=mpegps --enable-demuxer=mpegts --enable-demuxer=mpegvideo --enable-demuxer=flac --enable-demuxer=hevc --enable-demuxer=wav --disable-parsers --enable-parser=aac --enable-parser=aac_latm --enable-parser=h264 --enable-parser=flac --enable-parser=hevc --enable-bsfs --disable-bsf=chomp --disable-bsf=dca_core --disable-bsf=dump_extradata --disable-bsf=hevc_mp4toannexb --disable-bsf=imx_dump_header --disable-bsf=mjpeg2jpeg --disable-bsf=mjpega_dump_header --disable-bsf=mov2textsub --disable-bsf=mp3_header_decompress --disable-bsf=mpeg4_unpack_bframes --disable-bsf=noise --disable-bsf=remove_extradata --disable-bsf=text2movsub --disable-bsf=vp9_superframe --enable-protocols --enable-protocol=async --disable-protocol=bluray --disable-protocol=concat --disable-protocol=ffrtmpcrypt --enable-protocol=ffrtmphttp --disable-protocol=gopher --disable-protocol=icecast --disable-protocol='librtmp*' --disable-protocol=libssh --disable-protocol=md5 --disable-protocol=mmsh --disable-protocol=mmst --disable-protocol='rtmp*' --enable-protocol=rtmp --enable-protocol=rtmpt --disable-protocol=rtp --disable-protocol=sctp --disable-protocol=srtp --disable-protocol=subfile --disable-protocol=unix --disable-devices --disable-filters --disable-iconv --disable-audiotoolbox --disable-videotoolbox --enable-cross-compile --disable-stripping --arch=x86_64 --target-os=darwin --enable-static --disable-shared --disable-asm --disable-mmx --assert-level=2 --prefix=/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/build/ffmpeg-x86_64/output --enable-openssl --cc='xcrun -sdk iphonesimulator clang' --extra-cflags=' -arch x86_64 -mios-simulator-version-min=7.0 -I/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/include -DQCLOUDSSL' --extra-cxxflags=' -arch x86_64 -mios-simulator-version-min=7.0 -I/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/include -DQCLOUDSSL' --extra-ldflags=' -arch x86_64 -mios-simulator-version-min=7.0 -arch x86_64 -mios-simulator-version-min=7.0 -I/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/include -DQCLOUDSSL -L/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/boringssl/lib -lboringssl'" +#define FFMPEG_LICENSE "LGPL version 2.1 or later" +#define CONFIG_THIS_YEAR 2017 +#define FFMPEG_DATADIR "/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/build/ffmpeg-x86_64/output/share/ffmpeg" +#define AVCONV_DATADIR "/Users/aazgulhuang/landun/workspace/p-d49e1b3ea73f4e5a814be5e51260f68f/src/ios/build/ffmpeg-x86_64/output/share/ffmpeg" +#define CC_IDENT "Apple LLVM version 10.0.1 (clang-1001.0.46.4)" +#define av_restrict restrict +#define EXTERN_PREFIX "_" +#define EXTERN_ASM _ +#define BUILDSUF "" +#define SLIBSUF ".dylib" +#define HAVE_MMX2 HAVE_MMXEXT +#define SWS_MAX_FILTER_SIZE 256 +#define ASSERT_LEVEL 2 +#define ARCH_AARCH64 0 +#define ARCH_ALPHA 0 +#define ARCH_ARM 0 +#define ARCH_AVR32 0 +#define ARCH_AVR32_AP 0 +#define ARCH_AVR32_UC 0 +#define ARCH_BFIN 0 +#define ARCH_IA64 0 +#define ARCH_M68K 0 +#define ARCH_MIPS 0 +#define ARCH_MIPS64 0 +#define ARCH_PARISC 0 +#define ARCH_PPC 0 +#define ARCH_PPC64 0 +#define ARCH_S390 0 +#define ARCH_SH4 0 +#define ARCH_SPARC 0 +#define ARCH_SPARC64 0 +#define ARCH_TILEGX 0 +#define ARCH_TILEPRO 0 +#define ARCH_TOMI 0 +#define ARCH_X86 0 +#define ARCH_X86_32 0 +#define ARCH_X86_64 0 +#define HAVE_ARMV5TE 0 +#define HAVE_ARMV6 0 +#define HAVE_ARMV6T2 0 +#define HAVE_ARMV8 0 +#define HAVE_NEON 0 +#define HAVE_VFP 0 +#define HAVE_VFPV3 0 +#define HAVE_SETEND 0 +#define HAVE_ALTIVEC 0 +#define HAVE_DCBZL 0 +#define HAVE_LDBRX 0 +#define HAVE_POWER8 0 +#define HAVE_PPC4XX 0 +#define HAVE_VSX 0 +#define HAVE_AESNI 0 +#define HAVE_AMD3DNOW 0 +#define HAVE_AMD3DNOWEXT 0 +#define HAVE_AVX 0 +#define HAVE_AVX2 0 +#define HAVE_FMA3 0 +#define HAVE_FMA4 0 +#define HAVE_MMX 0 +#define HAVE_MMXEXT 0 +#define HAVE_SSE 0 +#define HAVE_SSE2 0 +#define HAVE_SSE3 0 +#define HAVE_SSE4 0 +#define HAVE_SSE42 0 +#define HAVE_SSSE3 0 +#define HAVE_XOP 0 +#define HAVE_CPUNOP 0 +#define HAVE_I686 0 +#define HAVE_MIPSFPU 0 +#define HAVE_MIPS32R2 0 +#define HAVE_MIPS32R5 0 +#define HAVE_MIPS64R2 0 +#define HAVE_MIPS32R6 0 +#define HAVE_MIPS64R6 0 +#define HAVE_MIPSDSP 0 +#define HAVE_MIPSDSPR2 0 +#define HAVE_MSA 0 +#define HAVE_LOONGSON2 0 +#define HAVE_LOONGSON3 0 +#define HAVE_MMI 0 +#define HAVE_ARMV5TE_EXTERNAL 0 +#define HAVE_ARMV6_EXTERNAL 0 +#define HAVE_ARMV6T2_EXTERNAL 0 +#define HAVE_ARMV8_EXTERNAL 0 +#define HAVE_NEON_EXTERNAL 0 +#define HAVE_VFP_EXTERNAL 0 +#define HAVE_VFPV3_EXTERNAL 0 +#define HAVE_SETEND_EXTERNAL 0 +#define HAVE_ALTIVEC_EXTERNAL 0 +#define HAVE_DCBZL_EXTERNAL 0 +#define HAVE_LDBRX_EXTERNAL 0 +#define HAVE_POWER8_EXTERNAL 0 +#define HAVE_PPC4XX_EXTERNAL 0 +#define HAVE_VSX_EXTERNAL 0 +#define HAVE_AESNI_EXTERNAL 0 +#define HAVE_AMD3DNOW_EXTERNAL 0 +#define HAVE_AMD3DNOWEXT_EXTERNAL 0 +#define HAVE_AVX_EXTERNAL 0 +#define HAVE_AVX2_EXTERNAL 0 +#define HAVE_FMA3_EXTERNAL 0 +#define HAVE_FMA4_EXTERNAL 0 +#define HAVE_MMX_EXTERNAL 0 +#define HAVE_MMXEXT_EXTERNAL 0 +#define HAVE_SSE_EXTERNAL 0 +#define HAVE_SSE2_EXTERNAL 0 +#define HAVE_SSE3_EXTERNAL 0 +#define HAVE_SSE4_EXTERNAL 0 +#define HAVE_SSE42_EXTERNAL 0 +#define HAVE_SSSE3_EXTERNAL 0 +#define HAVE_XOP_EXTERNAL 0 +#define HAVE_CPUNOP_EXTERNAL 0 +#define HAVE_I686_EXTERNAL 0 +#define HAVE_MIPSFPU_EXTERNAL 0 +#define HAVE_MIPS32R2_EXTERNAL 0 +#define HAVE_MIPS32R5_EXTERNAL 0 +#define HAVE_MIPS64R2_EXTERNAL 0 +#define HAVE_MIPS32R6_EXTERNAL 0 +#define HAVE_MIPS64R6_EXTERNAL 0 +#define HAVE_MIPSDSP_EXTERNAL 0 +#define HAVE_MIPSDSPR2_EXTERNAL 0 +#define HAVE_MSA_EXTERNAL 0 +#define HAVE_LOONGSON2_EXTERNAL 0 +#define HAVE_LOONGSON3_EXTERNAL 0 +#define HAVE_MMI_EXTERNAL 0 +#define HAVE_ARMV5TE_INLINE 0 +#define HAVE_ARMV6_INLINE 0 +#define HAVE_ARMV6T2_INLINE 0 +#define HAVE_ARMV8_INLINE 0 +#define HAVE_NEON_INLINE 0 +#define HAVE_VFP_INLINE 0 +#define HAVE_VFPV3_INLINE 0 +#define HAVE_SETEND_INLINE 0 +#define HAVE_ALTIVEC_INLINE 0 +#define HAVE_DCBZL_INLINE 0 +#define HAVE_LDBRX_INLINE 0 +#define HAVE_POWER8_INLINE 0 +#define HAVE_PPC4XX_INLINE 0 +#define HAVE_VSX_INLINE 0 +#define HAVE_AESNI_INLINE 0 +#define HAVE_AMD3DNOW_INLINE 0 +#define HAVE_AMD3DNOWEXT_INLINE 0 +#define HAVE_AVX_INLINE 0 +#define HAVE_AVX2_INLINE 0 +#define HAVE_FMA3_INLINE 0 +#define HAVE_FMA4_INLINE 0 +#define HAVE_MMX_INLINE 0 +#define HAVE_MMXEXT_INLINE 0 +#define HAVE_SSE_INLINE 0 +#define HAVE_SSE2_INLINE 0 +#define HAVE_SSE3_INLINE 0 +#define HAVE_SSE4_INLINE 0 +#define HAVE_SSE42_INLINE 0 +#define HAVE_SSSE3_INLINE 0 +#define HAVE_XOP_INLINE 0 +#define HAVE_CPUNOP_INLINE 0 +#define HAVE_I686_INLINE 0 +#define HAVE_MIPSFPU_INLINE 0 +#define HAVE_MIPS32R2_INLINE 0 +#define HAVE_MIPS32R5_INLINE 0 +#define HAVE_MIPS64R2_INLINE 0 +#define HAVE_MIPS32R6_INLINE 0 +#define HAVE_MIPS64R6_INLINE 0 +#define HAVE_MIPSDSP_INLINE 0 +#define HAVE_MIPSDSPR2_INLINE 0 +#define HAVE_MSA_INLINE 0 +#define HAVE_LOONGSON2_INLINE 0 +#define HAVE_LOONGSON3_INLINE 0 +#define HAVE_MMI_INLINE 0 +#define HAVE_ALIGNED_STACK 0 +#define HAVE_FAST_64BIT 0 +#define HAVE_FAST_CLZ 0 +#define HAVE_FAST_CMOV 0 +#define HAVE_LOCAL_ALIGNED_8 1 +#define HAVE_LOCAL_ALIGNED_16 1 +#define HAVE_LOCAL_ALIGNED_32 1 +#define HAVE_SIMD_ALIGN_16 0 +#define HAVE_SIMD_ALIGN_32 0 +#define HAVE_ATOMICS_GCC 1 +#define HAVE_ATOMICS_SUNCC 0 +#define HAVE_ATOMICS_WIN32 0 +#define HAVE_ATOMIC_CAS_PTR 0 +#define HAVE_MACHINE_RW_BARRIER 0 +#define HAVE_MEMORYBARRIER 0 +#define HAVE_MM_EMPTY 1 +#define HAVE_RDTSC 0 +#define HAVE_SARESTART 1 +#define HAVE_SEM_TIMEDWAIT 0 +#define HAVE_SYNC_VAL_COMPARE_AND_SWAP 1 +#define HAVE_CABS 1 +#define HAVE_CEXP 1 +#define HAVE_INLINE_ASM 1 +#define HAVE_SYMVER 1 +#define HAVE_YASM 0 +#define HAVE_BIGENDIAN 0 +#define HAVE_FAST_UNALIGNED 0 +#define HAVE_ALSA_ASOUNDLIB_H 0 +#define HAVE_ALTIVEC_H 0 +#define HAVE_ARPA_INET_H 1 +#define HAVE_ASM_TYPES_H 0 +#define HAVE_CDIO_PARANOIA_H 0 +#define HAVE_CDIO_PARANOIA_PARANOIA_H 0 +#define HAVE_CUDA_H 0 +#define HAVE_DISPATCH_DISPATCH_H 1 +#define HAVE_DEV_BKTR_IOCTL_BT848_H 0 +#define HAVE_DEV_BKTR_IOCTL_METEOR_H 0 +#define HAVE_DEV_IC_BT8XX_H 0 +#define HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H 0 +#define HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H 0 +#define HAVE_DIRECT_H 0 +#define HAVE_DIRENT_H 1 +#define HAVE_DLFCN_H 1 +#define HAVE_D3D11_H 0 +#define HAVE_DXVA_H 0 +#define HAVE_ES2_GL_H 0 +#define HAVE_GSM_H 0 +#define HAVE_IO_H 0 +#define HAVE_MACH_MACH_TIME_H 1 +#define HAVE_MACHINE_IOCTL_BT848_H 0 +#define HAVE_MACHINE_IOCTL_METEOR_H 0 +#define HAVE_MALLOC_H 0 +#define HAVE_OPENCV2_CORE_CORE_C_H 0 +#define HAVE_OPENJPEG_2_1_OPENJPEG_H 0 +#define HAVE_OPENJPEG_2_0_OPENJPEG_H 0 +#define HAVE_OPENJPEG_1_5_OPENJPEG_H 0 +#define HAVE_OPENGL_GL3_H 0 +#define HAVE_POLL_H 1 +#define HAVE_SNDIO_H 0 +#define HAVE_SOUNDCARD_H 0 +#define HAVE_STDATOMIC_H 1 +#define HAVE_SYS_MMAN_H 1 +#define HAVE_SYS_PARAM_H 1 +#define HAVE_SYS_RESOURCE_H 1 +#define HAVE_SYS_SELECT_H 1 +#define HAVE_SYS_SOUNDCARD_H 0 +#define HAVE_SYS_TIME_H 1 +#define HAVE_SYS_UN_H 1 +#define HAVE_SYS_VIDEOIO_H 0 +#define HAVE_TERMIOS_H 1 +#define HAVE_UDPLITE_H 0 +#define HAVE_UNISTD_H 1 +#define HAVE_VALGRIND_VALGRIND_H 0 +#define HAVE_WINDOWS_H 0 +#define HAVE_WINSOCK2_H 0 +#define HAVE_INTRINSICS_NEON 0 +#define HAVE_ATANF 1 +#define HAVE_ATAN2F 1 +#define HAVE_CBRT 1 +#define HAVE_CBRTF 1 +#define HAVE_COPYSIGN 1 +#define HAVE_COSF 1 +#define HAVE_ERF 1 +#define HAVE_EXP2 1 +#define HAVE_EXP2F 1 +#define HAVE_EXPF 1 +#define HAVE_HYPOT 1 +#define HAVE_ISFINITE 1 +#define HAVE_ISINF 1 +#define HAVE_ISNAN 1 +#define HAVE_LDEXPF 1 +#define HAVE_LLRINT 1 +#define HAVE_LLRINTF 1 +#define HAVE_LOG2 1 +#define HAVE_LOG2F 1 +#define HAVE_LOG10F 1 +#define HAVE_LRINT 1 +#define HAVE_LRINTF 1 +#define HAVE_POWF 1 +#define HAVE_RINT 1 +#define HAVE_ROUND 1 +#define HAVE_ROUNDF 1 +#define HAVE_SINF 1 +#define HAVE_TRUNC 1 +#define HAVE_TRUNCF 1 +#define HAVE_ACCESS 1 +#define HAVE_ALIGNED_MALLOC 0 +#define HAVE_ARC4RANDOM 1 +#define HAVE_CLOCK_GETTIME 1 +#define HAVE_CLOSESOCKET 0 +#define HAVE_COMMANDLINETOARGVW 0 +#define HAVE_COTASKMEMFREE 0 +#define HAVE_CRYPTGENRANDOM 0 +#define HAVE_DLOPEN 1 +#define HAVE_FCNTL 1 +#define HAVE_FLT_LIM 1 +#define HAVE_FORK 1 +#define HAVE_GETADDRINFO 1 +#define HAVE_GETHRTIME 0 +#define HAVE_GETOPT 1 +#define HAVE_GETPROCESSAFFINITYMASK 0 +#define HAVE_GETPROCESSMEMORYINFO 0 +#define HAVE_GETPROCESSTIMES 0 +#define HAVE_GETRUSAGE 1 +#define HAVE_GETSYSTEMTIMEASFILETIME 0 +#define HAVE_GETTIMEOFDAY 1 +#define HAVE_GLOB 1 +#define HAVE_GLXGETPROCADDRESS 0 +#define HAVE_GMTIME_R 1 +#define HAVE_INET_ATON 1 +#define HAVE_ISATTY 1 +#define HAVE_JACK_PORT_GET_LATENCY_RANGE 0 +#define HAVE_KBHIT 0 +#define HAVE_LOADLIBRARY 0 +#define HAVE_LOCALTIME_R 1 +#define HAVE_LSTAT 1 +#define HAVE_LZO1X_999_COMPRESS 0 +#define HAVE_MACH_ABSOLUTE_TIME 1 +#define HAVE_MAPVIEWOFFILE 0 +#define HAVE_MEMALIGN 0 +#define HAVE_MKSTEMP 1 +#define HAVE_MMAP 1 +#define HAVE_MPROTECT 1 +#define HAVE_NANOSLEEP 1 +#define HAVE_PEEKNAMEDPIPE 0 +#define HAVE_POSIX_MEMALIGN 1 +#define HAVE_PTHREAD_CANCEL 1 +#define HAVE_SCHED_GETAFFINITY 0 +#define HAVE_SETCONSOLETEXTATTRIBUTE 0 +#define HAVE_SETCONSOLECTRLHANDLER 0 +#define HAVE_SETMODE 0 +#define HAVE_SETRLIMIT 1 +#define HAVE_SLEEP 0 +#define HAVE_STRERROR_R 1 +#define HAVE_SYSCONF 1 +#define HAVE_SYSCTL 1 +#define HAVE_USLEEP 1 +#define HAVE_UTGETOSTYPEFROMSTRING 0 +#define HAVE_VIRTUALALLOC 0 +#define HAVE_WGLGETPROCADDRESS 0 +#define HAVE_PTHREADS 1 +#define HAVE_OS2THREADS 0 +#define HAVE_W32THREADS 0 +#define HAVE_AS_DN_DIRECTIVE 0 +#define HAVE_AS_FPU_DIRECTIVE 0 +#define HAVE_AS_FUNC 0 +#define HAVE_AS_OBJECT_ARCH 0 +#define HAVE_ASM_MOD_Q 0 +#define HAVE_ATTRIBUTE_MAY_ALIAS 1 +#define HAVE_ATTRIBUTE_PACKED 1 +#define HAVE_EBP_AVAILABLE 1 +#define HAVE_EBX_AVAILABLE 1 +#define HAVE_GNU_AS 0 +#define HAVE_GNU_WINDRES 0 +#define HAVE_IBM_ASM 0 +#define HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS 1 +#define HAVE_INLINE_ASM_LABELS 1 +#define HAVE_INLINE_ASM_NONLOCAL_LABELS 1 +#define HAVE_PRAGMA_DEPRECATED 1 +#define HAVE_RSYNC_CONTIMEOUT 0 +#define HAVE_SYMVER_ASM_LABEL 1 +#define HAVE_SYMVER_GNU_ASM 0 +#define HAVE_VFP_ARGS 0 +#define HAVE_XFORM_ASM 0 +#define HAVE_XMM_CLOBBERS 1 +#define HAVE_CONDITION_VARIABLE_PTR 0 +#define HAVE_SOCKLEN_T 1 +#define HAVE_STRUCT_ADDRINFO 1 +#define HAVE_STRUCT_GROUP_SOURCE_REQ 1 +#define HAVE_STRUCT_IP_MREQ_SOURCE 1 +#define HAVE_STRUCT_IPV6_MREQ 1 +#define HAVE_STRUCT_MSGHDR_MSG_FLAGS 1 +#define HAVE_STRUCT_POLLFD 1 +#define HAVE_STRUCT_RUSAGE_RU_MAXRSS 1 +#define HAVE_STRUCT_SCTP_EVENT_SUBSCRIBE 0 +#define HAVE_STRUCT_SOCKADDR_IN6 1 +#define HAVE_STRUCT_SOCKADDR_SA_LEN 1 +#define HAVE_STRUCT_SOCKADDR_STORAGE 1 +#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 0 +#define HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE 0 +#define HAVE_ATOMICS_NATIVE 1 +#define HAVE_DOS_PATHS 0 +#define HAVE_DXVA2_LIB 0 +#define HAVE_DXVA2API_COBJ 0 +#define HAVE_LIBC_MSVCRT 0 +#define HAVE_LIBDC1394_1 0 +#define HAVE_LIBDC1394_2 0 +#define HAVE_MAKEINFO 1 +#define HAVE_MAKEINFO_HTML 0 +#define HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS 0 +#define HAVE_PERL 1 +#define HAVE_POD2MAN 1 +#define HAVE_SDL2 0 +#define HAVE_SECTION_DATA_REL_RO 0 +#define HAVE_TEXI2HTML 0 +#define HAVE_THREADS 1 +#define HAVE_VAAPI_DRM 0 +#define HAVE_VAAPI_X11 0 +#define HAVE_VDPAU_X11 0 +#define HAVE_WINRT 0 +#define HAVE_XLIB 1 +#define CONFIG_BSFS 1 +#define CONFIG_DECODERS 1 +#define CONFIG_ENCODERS 1 +#define CONFIG_HWACCELS 0 +#define CONFIG_PARSERS 1 +#define CONFIG_INDEVS 0 +#define CONFIG_OUTDEVS 0 +#define CONFIG_FILTERS 0 +#define CONFIG_DEMUXERS 1 +#define CONFIG_MUXERS 1 +#define CONFIG_PROTOCOLS 1 +#define CONFIG_DOC 0 +#define CONFIG_HTMLPAGES 0 +#define CONFIG_MANPAGES 0 +#define CONFIG_PODPAGES 0 +#define CONFIG_TXTPAGES 0 +#define CONFIG_AVIO_DIR_CMD_EXAMPLE 1 +#define CONFIG_AVIO_READING_EXAMPLE 1 +#define CONFIG_DECODE_AUDIO_EXAMPLE 1 +#define CONFIG_DECODE_VIDEO_EXAMPLE 1 +#define CONFIG_DEMUXING_DECODING_EXAMPLE 1 +#define CONFIG_ENCODE_AUDIO_EXAMPLE 1 +#define CONFIG_ENCODE_VIDEO_EXAMPLE 1 +#define CONFIG_EXTRACT_MVS_EXAMPLE 1 +#define CONFIG_FILTER_AUDIO_EXAMPLE 1 +#define CONFIG_FILTERING_AUDIO_EXAMPLE 1 +#define CONFIG_FILTERING_VIDEO_EXAMPLE 1 +#define CONFIG_HTTP_MULTICLIENT_EXAMPLE 1 +#define CONFIG_METADATA_EXAMPLE 1 +#define CONFIG_MUXING_EXAMPLE 1 +#define CONFIG_QSVDEC_EXAMPLE 0 +#define CONFIG_REMUXING_EXAMPLE 1 +#define CONFIG_RESAMPLING_AUDIO_EXAMPLE 1 +#define CONFIG_SCALING_VIDEO_EXAMPLE 1 +#define CONFIG_TRANSCODE_AAC_EXAMPLE 1 +#define CONFIG_TRANSCODING_EXAMPLE 1 +#define CONFIG_BZLIB 0 +#define CONFIG_ICONV 0 +#define CONFIG_LIBXCB 0 +#define CONFIG_LIBXCB_SHM 0 +#define CONFIG_LIBXCB_SHAPE 0 +#define CONFIG_LIBXCB_XFIXES 0 +#define CONFIG_LZMA 0 +#define CONFIG_SCHANNEL 0 +#define CONFIG_SDL 0 +#define CONFIG_SDL2 0 +#define CONFIG_SECURETRANSPORT 0 +#define CONFIG_XLIB 1 +#define CONFIG_ZLIB 1 +#define CONFIG_AVISYNTH 0 +#define CONFIG_FREI0R 0 +#define CONFIG_LIBCDIO 0 +#define CONFIG_LIBRUBBERBAND 0 +#define CONFIG_LIBVIDSTAB 0 +#define CONFIG_LIBX264 0 +#define CONFIG_LIBX265 0 +#define CONFIG_LIBXAVS 0 +#define CONFIG_LIBXVID 0 +#define CONFIG_DECKLINK 0 +#define CONFIG_LIBFDK_AAC 0 +#define CONFIG_OPENSSL 1 +#define CONFIG_GMP 0 +#define CONFIG_LIBOPENCORE_AMRNB 0 +#define CONFIG_LIBOPENCORE_AMRWB 0 +#define CONFIG_LIBVO_AMRWBENC 0 +#define CONFIG_LIBSMBCLIENT 0 +#define CONFIG_CHROMAPRINT 0 +#define CONFIG_CRYSTALHD 0 +#define CONFIG_GCRYPT 0 +#define CONFIG_GNUTLS 0 +#define CONFIG_JNI 0 +#define CONFIG_LADSPA 0 +#define CONFIG_LIBASS 0 +#define CONFIG_LIBBLURAY 0 +#define CONFIG_LIBBS2B 0 +#define CONFIG_LIBCACA 0 +#define CONFIG_LIBCELT 0 +#define CONFIG_LIBDC1394 0 +#define CONFIG_LIBFLITE 0 +#define CONFIG_LIBFONTCONFIG 0 +#define CONFIG_LIBFREETYPE 0 +#define CONFIG_LIBFRIBIDI 0 +#define CONFIG_LIBGME 0 +#define CONFIG_LIBGSM 0 +#define CONFIG_LIBIEC61883 0 +#define CONFIG_LIBILBC 0 +#define CONFIG_LIBKVAZAAR 0 +#define CONFIG_LIBMODPLUG 0 +#define CONFIG_LIBMP3LAME 0 +#define CONFIG_LIBNUT 0 +#define CONFIG_LIBOPENCV 0 +#define CONFIG_LIBOPENH264 0 +#define CONFIG_LIBOPENJPEG 0 +#define CONFIG_LIBOPENMPT 0 +#define CONFIG_LIBOPUS 0 +#define CONFIG_LIBPULSE 0 +#define CONFIG_LIBRTMP 0 +#define CONFIG_LIBSCHROEDINGER 0 +#define CONFIG_LIBSHINE 0 +#define CONFIG_LIBSMBCLIENT 0 +#define CONFIG_LIBSNAPPY 0 +#define CONFIG_LIBSOXR 0 +#define CONFIG_LIBSPEEX 0 +#define CONFIG_LIBSSH 0 +#define CONFIG_LIBTESSERACT 0 +#define CONFIG_LIBTHEORA 0 +#define CONFIG_LIBTWOLAME 0 +#define CONFIG_LIBV4L2 0 +#define CONFIG_LIBVORBIS 0 +#define CONFIG_LIBVPX 0 +#define CONFIG_LIBWAVPACK 0 +#define CONFIG_LIBWEBP 0 +#define CONFIG_LIBZIMG 0 +#define CONFIG_LIBZMQ 0 +#define CONFIG_LIBZVBI 0 +#define CONFIG_MEDIACODEC 0 +#define CONFIG_NETCDF 0 +#define CONFIG_OPENAL 0 +#define CONFIG_OPENCL 0 +#define CONFIG_OPENGL 0 +#define CONFIG_VIDEOTOOLBOX 0 +#define CONFIG_AUDIOTOOLBOX 0 +#define CONFIG_CUDA 0 +#define CONFIG_CUVID 0 +#define CONFIG_D3D11VA 0 +#define CONFIG_DXVA2 0 +#define CONFIG_NVENC 0 +#define CONFIG_VAAPI 0 +#define CONFIG_VDA 0 +#define CONFIG_VDPAU 0 +#define CONFIG_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_XVMC 0 +#define CONFIG_LIBNPP 0 +#define CONFIG_LIBMFX 0 +#define CONFIG_MMAL 0 +#define CONFIG_OMX 0 +#define CONFIG_FTRAPV 0 +#define CONFIG_GRAY 0 +#define CONFIG_HARDCODED_TABLES 0 +#define CONFIG_OMX_RPI 0 +#define CONFIG_RUNTIME_CPUDETECT 1 +#define CONFIG_SAFE_BITSTREAM_READER 1 +#define CONFIG_SHARED 0 +#define CONFIG_SMALL 0 +#define CONFIG_STATIC 1 +#define CONFIG_SWSCALE_ALPHA 0 +#define CONFIG_GPL 0 +#define CONFIG_NONFREE 0 +#define CONFIG_VERSION3 0 +#define CONFIG_AVCODEC 1 +#define CONFIG_AVDEVICE 0 +#define CONFIG_AVFILTER 1 +#define CONFIG_AVFORMAT 1 +#define CONFIG_AVRESAMPLE 0 +#define CONFIG_AVUTIL 1 +#define CONFIG_POSTPROC 0 +#define CONFIG_SWRESAMPLE 1 +#define CONFIG_SWSCALE 1 +#define CONFIG_FFPLAY 0 +#define CONFIG_FFPROBE 0 +#define CONFIG_FFSERVER 0 +#define CONFIG_FFMPEG 0 +#define CONFIG_DCT 1 +#define CONFIG_DWT 0 +#define CONFIG_ERROR_RESILIENCE 1 +#define CONFIG_FAAN 1 +#define CONFIG_FAST_UNALIGNED 0 +#define CONFIG_FFT 1 +#define CONFIG_LSP 0 +#define CONFIG_LZO 0 +#define CONFIG_MDCT 1 +#define CONFIG_PIXELUTILS 0 +#define CONFIG_NETWORK 1 +#define CONFIG_RDFT 1 +#define CONFIG_FONTCONFIG 0 +#define CONFIG_MEMORY_POISONING 0 +#define CONFIG_NEON_CLOBBER_TEST 0 +#define CONFIG_PIC 1 +#define CONFIG_RAISE_MAJOR 0 +#define CONFIG_THUMB 0 +#define CONFIG_VALGRIND_BACKTRACE 0 +#define CONFIG_XMM_CLOBBER_TEST 0 +#define CONFIG_AANDCTTABLES 0 +#define CONFIG_AC3DSP 0 +#define CONFIG_AUDIO_FRAME_QUEUE 0 +#define CONFIG_AUDIODSP 0 +#define CONFIG_BLOCKDSP 1 +#define CONFIG_BSWAPDSP 0 +#define CONFIG_CABAC 1 +#define CONFIG_DIRAC_PARSE 0 +#define CONFIG_DVPROFILE 0 +#define CONFIG_EXIF 0 +#define CONFIG_FAANDCT 1 +#define CONFIG_FAANIDCT 1 +#define CONFIG_FDCTDSP 1 +#define CONFIG_FLACDSP 1 +#define CONFIG_FMTCONVERT 0 +#define CONFIG_FRAME_THREAD_ENCODER 1 +#define CONFIG_G722DSP 0 +#define CONFIG_GOLOMB 1 +#define CONFIG_GPLV3 0 +#define CONFIG_H263DSP 1 +#define CONFIG_H264CHROMA 1 +#define CONFIG_H264DSP 1 +#define CONFIG_H264PARSE 1 +#define CONFIG_H264PRED 1 +#define CONFIG_H264QPEL 1 +#define CONFIG_HPELDSP 1 +#define CONFIG_HUFFMAN 1 +#define CONFIG_HUFFYUVDSP 0 +#define CONFIG_HUFFYUVENCDSP 0 +#define CONFIG_IDCTDSP 1 +#define CONFIG_IIRFILTER 0 +#define CONFIG_MDCT15 1 +#define CONFIG_INTRAX8 0 +#define CONFIG_ISO_MEDIA 1 +#define CONFIG_IVIDSP 0 +#define CONFIG_JPEGTABLES 0 +#define CONFIG_LGPLV3 0 +#define CONFIG_LIBX262 0 +#define CONFIG_LLAUDDSP 0 +#define CONFIG_LLVIDDSP 0 +#define CONFIG_LLVIDENCDSP 1 +#define CONFIG_LPC 0 +#define CONFIG_LZF 0 +#define CONFIG_ME_CMP 1 +#define CONFIG_MPEG_ER 1 +#define CONFIG_MPEGAUDIO 1 +#define CONFIG_MPEGAUDIODSP 1 +#define CONFIG_MPEGVIDEO 1 +#define CONFIG_MPEGVIDEOENC 0 +#define CONFIG_MSS34DSP 0 +#define CONFIG_PIXBLOCKDSP 1 +#define CONFIG_QPELDSP 1 +#define CONFIG_QSV 0 +#define CONFIG_QSVDEC 0 +#define CONFIG_QSVENC 0 +#define CONFIG_RANGECODER 0 +#define CONFIG_RIFFDEC 1 +#define CONFIG_RIFFENC 1 +#define CONFIG_RTPDEC 0 +#define CONFIG_RTPENC_CHAIN 1 +#define CONFIG_RV34DSP 0 +#define CONFIG_SINEWIN 1 +#define CONFIG_SNAPPY 0 +#define CONFIG_SRTP 0 +#define CONFIG_STARTCODE 1 +#define CONFIG_TEXTUREDSP 0 +#define CONFIG_TEXTUREDSPENC 0 +#define CONFIG_TPELDSP 0 +#define CONFIG_VAAPI_ENCODE 0 +#define CONFIG_VC1DSP 0 +#define CONFIG_VIDEODSP 1 +#define CONFIG_VP3DSP 1 +#define CONFIG_VP56DSP 1 +#define CONFIG_VP8DSP 0 +#define CONFIG_VT_BT2020 0 +#define CONFIG_WMA_FREQS 0 +#define CONFIG_WMV2DSP 0 +#define CONFIG_AAC_ADTSTOASC_BSF 1 +#define CONFIG_CHOMP_BSF 0 +#define CONFIG_DUMP_EXTRADATA_BSF 0 +#define CONFIG_DCA_CORE_BSF 0 +#define CONFIG_EXTRACT_EXTRADATA_BSF 1 +#define CONFIG_H264_MP4TOANNEXB_BSF 1 +#define CONFIG_HEVC_MP4TOANNEXB_BSF 0 +#define CONFIG_IMX_DUMP_HEADER_BSF 0 +#define CONFIG_MJPEG2JPEG_BSF 0 +#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0 +#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0 +#define CONFIG_MPEG4_UNPACK_BFRAMES_BSF 0 +#define CONFIG_MOV2TEXTSUB_BSF 0 +#define CONFIG_NOISE_BSF 0 +#define CONFIG_REMOVE_EXTRADATA_BSF 0 +#define CONFIG_TEXT2MOVSUB_BSF 0 +#define CONFIG_VP9_SUPERFRAME_BSF 0 +#define CONFIG_AASC_DECODER 0 +#define CONFIG_AIC_DECODER 0 +#define CONFIG_ALIAS_PIX_DECODER 0 +#define CONFIG_AMV_DECODER 0 +#define CONFIG_ANM_DECODER 0 +#define CONFIG_ANSI_DECODER 0 +#define CONFIG_APNG_DECODER 0 +#define CONFIG_ASV1_DECODER 0 +#define CONFIG_ASV2_DECODER 0 +#define CONFIG_AURA_DECODER 0 +#define CONFIG_AURA2_DECODER 0 +#define CONFIG_AVRP_DECODER 0 +#define CONFIG_AVRN_DECODER 0 +#define CONFIG_AVS_DECODER 0 +#define CONFIG_AVUI_DECODER 0 +#define CONFIG_AYUV_DECODER 0 +#define CONFIG_BETHSOFTVID_DECODER 0 +#define CONFIG_BFI_DECODER 0 +#define CONFIG_BINK_DECODER 0 +#define CONFIG_BMP_DECODER 0 +#define CONFIG_BMV_VIDEO_DECODER 0 +#define CONFIG_BRENDER_PIX_DECODER 0 +#define CONFIG_C93_DECODER 0 +#define CONFIG_CAVS_DECODER 0 +#define CONFIG_CDGRAPHICS_DECODER 0 +#define CONFIG_CDXL_DECODER 0 +#define CONFIG_CFHD_DECODER 0 +#define CONFIG_CINEPAK_DECODER 0 +#define CONFIG_CLEARVIDEO_DECODER 0 +#define CONFIG_CLJR_DECODER 0 +#define CONFIG_CLLC_DECODER 0 +#define CONFIG_COMFORTNOISE_DECODER 0 +#define CONFIG_CPIA_DECODER 0 +#define CONFIG_CSCD_DECODER 0 +#define CONFIG_CYUV_DECODER 0 +#define CONFIG_DDS_DECODER 0 +#define CONFIG_DFA_DECODER 0 +#define CONFIG_DIRAC_DECODER 0 +#define CONFIG_DNXHD_DECODER 0 +#define CONFIG_DPX_DECODER 0 +#define CONFIG_DSICINVIDEO_DECODER 0 +#define CONFIG_DVAUDIO_DECODER 0 +#define CONFIG_DVVIDEO_DECODER 0 +#define CONFIG_DXA_DECODER 0 +#define CONFIG_DXTORY_DECODER 0 +#define CONFIG_DXV_DECODER 0 +#define CONFIG_EACMV_DECODER 0 +#define CONFIG_EAMAD_DECODER 0 +#define CONFIG_EATGQ_DECODER 0 +#define CONFIG_EATGV_DECODER 0 +#define CONFIG_EATQI_DECODER 0 +#define CONFIG_EIGHTBPS_DECODER 0 +#define CONFIG_EIGHTSVX_EXP_DECODER 0 +#define CONFIG_EIGHTSVX_FIB_DECODER 0 +#define CONFIG_ESCAPE124_DECODER 0 +#define CONFIG_ESCAPE130_DECODER 0 +#define CONFIG_EXR_DECODER 0 +#define CONFIG_FFV1_DECODER 0 +#define CONFIG_FFVHUFF_DECODER 0 +#define CONFIG_FIC_DECODER 0 +#define CONFIG_FLASHSV_DECODER 0 +#define CONFIG_FLASHSV2_DECODER 0 +#define CONFIG_FLIC_DECODER 0 +#define CONFIG_FLV_DECODER 1 +#define CONFIG_FMVC_DECODER 0 +#define CONFIG_FOURXM_DECODER 0 +#define CONFIG_FRAPS_DECODER 0 +#define CONFIG_FRWU_DECODER 0 +#define CONFIG_G2M_DECODER 0 +#define CONFIG_GIF_DECODER 0 +#define CONFIG_H261_DECODER 0 +#define CONFIG_H263_DECODER 1 +#define CONFIG_H263I_DECODER 0 +#define CONFIG_H263P_DECODER 0 +#define CONFIG_H264_DECODER 1 +#define CONFIG_H264_CRYSTALHD_DECODER 0 +#define CONFIG_H264_MEDIACODEC_DECODER 0 +#define CONFIG_H264_MMAL_DECODER 0 +#define CONFIG_H264_QSV_DECODER 0 +#define CONFIG_H264_VDA_DECODER 0 +#define CONFIG_H264_VDPAU_DECODER 0 +#define CONFIG_HAP_DECODER 0 +#define CONFIG_HEVC_DECODER 0 +#define CONFIG_HEVC_QSV_DECODER 0 +#define CONFIG_HNM4_VIDEO_DECODER 0 +#define CONFIG_HQ_HQA_DECODER 0 +#define CONFIG_HQX_DECODER 0 +#define CONFIG_HUFFYUV_DECODER 0 +#define CONFIG_IDCIN_DECODER 0 +#define CONFIG_IFF_ILBM_DECODER 0 +#define CONFIG_INDEO2_DECODER 0 +#define CONFIG_INDEO3_DECODER 0 +#define CONFIG_INDEO4_DECODER 0 +#define CONFIG_INDEO5_DECODER 0 +#define CONFIG_INTERPLAY_VIDEO_DECODER 0 +#define CONFIG_JPEG2000_DECODER 0 +#define CONFIG_JPEGLS_DECODER 0 +#define CONFIG_JV_DECODER 0 +#define CONFIG_KGV1_DECODER 0 +#define CONFIG_KMVC_DECODER 0 +#define CONFIG_LAGARITH_DECODER 0 +#define CONFIG_LOCO_DECODER 0 +#define CONFIG_M101_DECODER 0 +#define CONFIG_MAGICYUV_DECODER 0 +#define CONFIG_MDEC_DECODER 0 +#define CONFIG_MIMIC_DECODER 0 +#define CONFIG_MJPEG_DECODER 0 +#define CONFIG_MJPEGB_DECODER 0 +#define CONFIG_MMVIDEO_DECODER 0 +#define CONFIG_MOTIONPIXELS_DECODER 0 +#define CONFIG_MPEG_XVMC_DECODER 0 +#define CONFIG_MPEG1VIDEO_DECODER 0 +#define CONFIG_MPEG2VIDEO_DECODER 0 +#define CONFIG_MPEG4_DECODER 1 +#define CONFIG_MPEG4_CRYSTALHD_DECODER 0 +#define CONFIG_MPEG4_MMAL_DECODER 0 +#define CONFIG_MPEG4_VDPAU_DECODER 0 +#define CONFIG_MPEGVIDEO_DECODER 0 +#define CONFIG_MPEG_VDPAU_DECODER 0 +#define CONFIG_MPEG1_VDPAU_DECODER 0 +#define CONFIG_MPEG2_MMAL_DECODER 0 +#define CONFIG_MPEG2_CRYSTALHD_DECODER 0 +#define CONFIG_MPEG2_QSV_DECODER 0 +#define CONFIG_MSA1_DECODER 0 +#define CONFIG_MSMPEG4V1_DECODER 0 +#define CONFIG_MSMPEG4V2_DECODER 0 +#define CONFIG_MSMPEG4V3_DECODER 0 +#define CONFIG_MSMPEG4_CRYSTALHD_DECODER 0 +#define CONFIG_MSRLE_DECODER 0 +#define CONFIG_MSS1_DECODER 0 +#define CONFIG_MSS2_DECODER 0 +#define CONFIG_MSVIDEO1_DECODER 0 +#define CONFIG_MSZH_DECODER 0 +#define CONFIG_MTS2_DECODER 0 +#define CONFIG_MVC1_DECODER 0 +#define CONFIG_MVC2_DECODER 0 +#define CONFIG_MXPEG_DECODER 0 +#define CONFIG_NUV_DECODER 0 +#define CONFIG_PAF_VIDEO_DECODER 0 +#define CONFIG_PAM_DECODER 0 +#define CONFIG_PBM_DECODER 0 +#define CONFIG_PCX_DECODER 0 +#define CONFIG_PGM_DECODER 0 +#define CONFIG_PGMYUV_DECODER 0 +#define CONFIG_PICTOR_DECODER 0 +#define CONFIG_PIXLET_DECODER 0 +#define CONFIG_PNG_DECODER 0 +#define CONFIG_PPM_DECODER 0 +#define CONFIG_PRORES_DECODER 0 +#define CONFIG_PRORES_LGPL_DECODER 0 +#define CONFIG_PSD_DECODER 0 +#define CONFIG_PTX_DECODER 0 +#define CONFIG_QDRAW_DECODER 0 +#define CONFIG_QPEG_DECODER 0 +#define CONFIG_QTRLE_DECODER 0 +#define CONFIG_R10K_DECODER 0 +#define CONFIG_R210_DECODER 0 +#define CONFIG_RAWVIDEO_DECODER 0 +#define CONFIG_RL2_DECODER 0 +#define CONFIG_ROQ_DECODER 0 +#define CONFIG_RPZA_DECODER 0 +#define CONFIG_RSCC_DECODER 0 +#define CONFIG_RV10_DECODER 0 +#define CONFIG_RV20_DECODER 0 +#define CONFIG_RV30_DECODER 0 +#define CONFIG_RV40_DECODER 0 +#define CONFIG_S302M_DECODER 0 +#define CONFIG_SANM_DECODER 0 +#define CONFIG_SCPR_DECODER 0 +#define CONFIG_SCREENPRESSO_DECODER 0 +#define CONFIG_SDX2_DPCM_DECODER 0 +#define CONFIG_SGI_DECODER 0 +#define CONFIG_SGIRLE_DECODER 0 +#define CONFIG_SHEERVIDEO_DECODER 0 +#define CONFIG_SMACKER_DECODER 0 +#define CONFIG_SMC_DECODER 0 +#define CONFIG_SMVJPEG_DECODER 0 +#define CONFIG_SNOW_DECODER 0 +#define CONFIG_SP5X_DECODER 0 +#define CONFIG_SPEEDHQ_DECODER 0 +#define CONFIG_SUNRAST_DECODER 0 +#define CONFIG_SVQ1_DECODER 0 +#define CONFIG_SVQ3_DECODER 0 +#define CONFIG_TARGA_DECODER 0 +#define CONFIG_TARGA_Y216_DECODER 0 +#define CONFIG_TDSC_DECODER 0 +#define CONFIG_THEORA_DECODER 0 +#define CONFIG_THP_DECODER 0 +#define CONFIG_TIERTEXSEQVIDEO_DECODER 0 +#define CONFIG_TIFF_DECODER 0 +#define CONFIG_TMV_DECODER 0 +#define CONFIG_TRUEMOTION1_DECODER 0 +#define CONFIG_TRUEMOTION2_DECODER 0 +#define CONFIG_TRUEMOTION2RT_DECODER 0 +#define CONFIG_TSCC_DECODER 0 +#define CONFIG_TSCC2_DECODER 0 +#define CONFIG_TXD_DECODER 0 +#define CONFIG_ULTI_DECODER 0 +#define CONFIG_UTVIDEO_DECODER 0 +#define CONFIG_V210_DECODER 0 +#define CONFIG_V210X_DECODER 0 +#define CONFIG_V308_DECODER 0 +#define CONFIG_V408_DECODER 0 +#define CONFIG_V410_DECODER 0 +#define CONFIG_VB_DECODER 0 +#define CONFIG_VBLE_DECODER 0 +#define CONFIG_VC1_DECODER 0 +#define CONFIG_VC1_CRYSTALHD_DECODER 0 +#define CONFIG_VC1_VDPAU_DECODER 0 +#define CONFIG_VC1IMAGE_DECODER 0 +#define CONFIG_VC1_MMAL_DECODER 0 +#define CONFIG_VC1_QSV_DECODER 0 +#define CONFIG_VCR1_DECODER 0 +#define CONFIG_VMDVIDEO_DECODER 0 +#define CONFIG_VMNC_DECODER 0 +#define CONFIG_VP3_DECODER 0 +#define CONFIG_VP5_DECODER 0 +#define CONFIG_VP6_DECODER 1 +#define CONFIG_VP6A_DECODER 0 +#define CONFIG_VP6F_DECODER 1 +#define CONFIG_VP7_DECODER 0 +#define CONFIG_VP8_DECODER 0 +#define CONFIG_VP9_DECODER 0 +#define CONFIG_VQA_DECODER 0 +#define CONFIG_WEBP_DECODER 0 +#define CONFIG_WMV1_DECODER 0 +#define CONFIG_WMV2_DECODER 0 +#define CONFIG_WMV3_DECODER 0 +#define CONFIG_WMV3_CRYSTALHD_DECODER 0 +#define CONFIG_WMV3_VDPAU_DECODER 0 +#define CONFIG_WMV3IMAGE_DECODER 0 +#define CONFIG_WNV1_DECODER 0 +#define CONFIG_XAN_WC3_DECODER 0 +#define CONFIG_XAN_WC4_DECODER 0 +#define CONFIG_XBM_DECODER 0 +#define CONFIG_XFACE_DECODER 0 +#define CONFIG_XL_DECODER 0 +#define CONFIG_XPM_DECODER 0 +#define CONFIG_XWD_DECODER 0 +#define CONFIG_Y41P_DECODER 0 +#define CONFIG_YLC_DECODER 0 +#define CONFIG_YOP_DECODER 0 +#define CONFIG_YUV4_DECODER 0 +#define CONFIG_ZERO12V_DECODER 0 +#define CONFIG_ZEROCODEC_DECODER 0 +#define CONFIG_ZLIB_DECODER 0 +#define CONFIG_ZMBV_DECODER 0 +#define CONFIG_AAC_DECODER 1 +#define CONFIG_AAC_FIXED_DECODER 0 +#define CONFIG_AAC_LATM_DECODER 1 +#define CONFIG_AC3_DECODER 0 +#define CONFIG_AC3_FIXED_DECODER 0 +#define CONFIG_ALAC_DECODER 0 +#define CONFIG_ALS_DECODER 0 +#define CONFIG_AMRNB_DECODER 0 +#define CONFIG_AMRWB_DECODER 0 +#define CONFIG_APE_DECODER 0 +#define CONFIG_ATRAC1_DECODER 0 +#define CONFIG_ATRAC3_DECODER 0 +#define CONFIG_ATRAC3AL_DECODER 0 +#define CONFIG_ATRAC3P_DECODER 0 +#define CONFIG_ATRAC3PAL_DECODER 0 +#define CONFIG_BINKAUDIO_DCT_DECODER 0 +#define CONFIG_BINKAUDIO_RDFT_DECODER 0 +#define CONFIG_BMV_AUDIO_DECODER 0 +#define CONFIG_COOK_DECODER 0 +#define CONFIG_DCA_DECODER 0 +#define CONFIG_DSD_LSBF_DECODER 0 +#define CONFIG_DSD_MSBF_DECODER 0 +#define CONFIG_DSD_LSBF_PLANAR_DECODER 0 +#define CONFIG_DSD_MSBF_PLANAR_DECODER 0 +#define CONFIG_DSICINAUDIO_DECODER 0 +#define CONFIG_DSS_SP_DECODER 0 +#define CONFIG_DST_DECODER 0 +#define CONFIG_EAC3_DECODER 0 +#define CONFIG_EVRC_DECODER 0 +#define CONFIG_FFWAVESYNTH_DECODER 0 +#define CONFIG_FLAC_DECODER 1 +#define CONFIG_G723_1_DECODER 0 +#define CONFIG_G729_DECODER 0 +#define CONFIG_GSM_DECODER 0 +#define CONFIG_GSM_MS_DECODER 0 +#define CONFIG_IAC_DECODER 0 +#define CONFIG_IMC_DECODER 0 +#define CONFIG_INTERPLAY_ACM_DECODER 0 +#define CONFIG_MACE3_DECODER 0 +#define CONFIG_MACE6_DECODER 0 +#define CONFIG_METASOUND_DECODER 0 +#define CONFIG_MLP_DECODER 0 +#define CONFIG_MP1_DECODER 0 +#define CONFIG_MP1FLOAT_DECODER 0 +#define CONFIG_MP2_DECODER 0 +#define CONFIG_MP2FLOAT_DECODER 0 +#define CONFIG_MP3_DECODER 1 +#define CONFIG_MP3FLOAT_DECODER 1 +#define CONFIG_MP3ADU_DECODER 1 +#define CONFIG_MP3ADUFLOAT_DECODER 1 +#define CONFIG_MP3ON4_DECODER 1 +#define CONFIG_MP3ON4FLOAT_DECODER 1 +#define CONFIG_MPC7_DECODER 0 +#define CONFIG_MPC8_DECODER 0 +#define CONFIG_NELLYMOSER_DECODER 0 +#define CONFIG_ON2AVC_DECODER 0 +#define CONFIG_OPUS_DECODER 0 +#define CONFIG_PAF_AUDIO_DECODER 0 +#define CONFIG_QCELP_DECODER 0 +#define CONFIG_QDM2_DECODER 0 +#define CONFIG_QDMC_DECODER 0 +#define CONFIG_RA_144_DECODER 0 +#define CONFIG_RA_288_DECODER 0 +#define CONFIG_RALF_DECODER 0 +#define CONFIG_SHORTEN_DECODER 0 +#define CONFIG_SIPR_DECODER 0 +#define CONFIG_SMACKAUD_DECODER 0 +#define CONFIG_SONIC_DECODER 0 +#define CONFIG_TAK_DECODER 0 +#define CONFIG_TRUEHD_DECODER 0 +#define CONFIG_TRUESPEECH_DECODER 0 +#define CONFIG_TTA_DECODER 0 +#define CONFIG_TWINVQ_DECODER 0 +#define CONFIG_VMDAUDIO_DECODER 0 +#define CONFIG_VORBIS_DECODER 0 +#define CONFIG_WAVPACK_DECODER 1 +#define CONFIG_WMALOSSLESS_DECODER 0 +#define CONFIG_WMAPRO_DECODER 0 +#define CONFIG_WMAV1_DECODER 0 +#define CONFIG_WMAV2_DECODER 0 +#define CONFIG_WMAVOICE_DECODER 0 +#define CONFIG_WS_SND1_DECODER 0 +#define CONFIG_XMA1_DECODER 0 +#define CONFIG_XMA2_DECODER 0 +#define CONFIG_PCM_ALAW_DECODER 0 +#define CONFIG_PCM_BLURAY_DECODER 0 +#define CONFIG_PCM_DVD_DECODER 0 +#define CONFIG_PCM_F16LE_DECODER 0 +#define CONFIG_PCM_F24LE_DECODER 0 +#define CONFIG_PCM_F32BE_DECODER 0 +#define CONFIG_PCM_F32LE_DECODER 0 +#define CONFIG_PCM_F64BE_DECODER 0 +#define CONFIG_PCM_F64LE_DECODER 0 +#define CONFIG_PCM_LXF_DECODER 0 +#define CONFIG_PCM_MULAW_DECODER 0 +#define CONFIG_PCM_S8_DECODER 0 +#define CONFIG_PCM_S8_PLANAR_DECODER 0 +#define CONFIG_PCM_S16BE_DECODER 0 +#define CONFIG_PCM_S16BE_PLANAR_DECODER 0 +#define CONFIG_PCM_S16LE_DECODER 1 +#define CONFIG_PCM_S16LE_PLANAR_DECODER 0 +#define CONFIG_PCM_S24BE_DECODER 0 +#define CONFIG_PCM_S24DAUD_DECODER 0 +#define CONFIG_PCM_S24LE_DECODER 0 +#define CONFIG_PCM_S24LE_PLANAR_DECODER 0 +#define CONFIG_PCM_S32BE_DECODER 0 +#define CONFIG_PCM_S32LE_DECODER 0 +#define CONFIG_PCM_S32LE_PLANAR_DECODER 0 +#define CONFIG_PCM_S64BE_DECODER 0 +#define CONFIG_PCM_S64LE_DECODER 0 +#define CONFIG_PCM_U8_DECODER 0 +#define CONFIG_PCM_U16BE_DECODER 0 +#define CONFIG_PCM_U16LE_DECODER 0 +#define CONFIG_PCM_U24BE_DECODER 0 +#define CONFIG_PCM_U24LE_DECODER 0 +#define CONFIG_PCM_U32BE_DECODER 0 +#define CONFIG_PCM_U32LE_DECODER 0 +#define CONFIG_PCM_ZORK_DECODER 0 +#define CONFIG_INTERPLAY_DPCM_DECODER 0 +#define CONFIG_ROQ_DPCM_DECODER 0 +#define CONFIG_SOL_DPCM_DECODER 0 +#define CONFIG_XAN_DPCM_DECODER 0 +#define CONFIG_ADPCM_4XM_DECODER 0 +#define CONFIG_ADPCM_ADX_DECODER 0 +#define CONFIG_ADPCM_AFC_DECODER 0 +#define CONFIG_ADPCM_AICA_DECODER 0 +#define CONFIG_ADPCM_CT_DECODER 0 +#define CONFIG_ADPCM_DTK_DECODER 0 +#define CONFIG_ADPCM_EA_DECODER 0 +#define CONFIG_ADPCM_EA_MAXIS_XA_DECODER 0 +#define CONFIG_ADPCM_EA_R1_DECODER 0 +#define CONFIG_ADPCM_EA_R2_DECODER 0 +#define CONFIG_ADPCM_EA_R3_DECODER 0 +#define CONFIG_ADPCM_EA_XAS_DECODER 0 +#define CONFIG_ADPCM_G722_DECODER 0 +#define CONFIG_ADPCM_G726_DECODER 0 +#define CONFIG_ADPCM_G726LE_DECODER 0 +#define CONFIG_ADPCM_IMA_AMV_DECODER 0 +#define CONFIG_ADPCM_IMA_APC_DECODER 0 +#define CONFIG_ADPCM_IMA_DAT4_DECODER 0 +#define CONFIG_ADPCM_IMA_DK3_DECODER 0 +#define CONFIG_ADPCM_IMA_DK4_DECODER 0 +#define CONFIG_ADPCM_IMA_EA_EACS_DECODER 0 +#define CONFIG_ADPCM_IMA_EA_SEAD_DECODER 0 +#define CONFIG_ADPCM_IMA_ISS_DECODER 0 +#define CONFIG_ADPCM_IMA_OKI_DECODER 0 +#define CONFIG_ADPCM_IMA_QT_DECODER 0 +#define CONFIG_ADPCM_IMA_RAD_DECODER 0 +#define CONFIG_ADPCM_IMA_SMJPEG_DECODER 0 +#define CONFIG_ADPCM_IMA_WAV_DECODER 0 +#define CONFIG_ADPCM_IMA_WS_DECODER 0 +#define CONFIG_ADPCM_MS_DECODER 0 +#define CONFIG_ADPCM_MTAF_DECODER 0 +#define CONFIG_ADPCM_PSX_DECODER 0 +#define CONFIG_ADPCM_SBPRO_2_DECODER 0 +#define CONFIG_ADPCM_SBPRO_3_DECODER 0 +#define CONFIG_ADPCM_SBPRO_4_DECODER 0 +#define CONFIG_ADPCM_SWF_DECODER 0 +#define CONFIG_ADPCM_THP_DECODER 0 +#define CONFIG_ADPCM_THP_LE_DECODER 0 +#define CONFIG_ADPCM_VIMA_DECODER 0 +#define CONFIG_ADPCM_XA_DECODER 0 +#define CONFIG_ADPCM_YAMAHA_DECODER 0 +#define CONFIG_SSA_DECODER 0 +#define CONFIG_ASS_DECODER 0 +#define CONFIG_CCAPTION_DECODER 0 +#define CONFIG_DVBSUB_DECODER 0 +#define CONFIG_DVDSUB_DECODER 0 +#define CONFIG_JACOSUB_DECODER 0 +#define CONFIG_MICRODVD_DECODER 0 +#define CONFIG_MOVTEXT_DECODER 0 +#define CONFIG_MPL2_DECODER 0 +#define CONFIG_PGSSUB_DECODER 0 +#define CONFIG_PJS_DECODER 0 +#define CONFIG_REALTEXT_DECODER 0 +#define CONFIG_SAMI_DECODER 0 +#define CONFIG_SRT_DECODER 0 +#define CONFIG_STL_DECODER 0 +#define CONFIG_SUBRIP_DECODER 0 +#define CONFIG_SUBVIEWER_DECODER 0 +#define CONFIG_SUBVIEWER1_DECODER 0 +#define CONFIG_TEXT_DECODER 0 +#define CONFIG_VPLAYER_DECODER 0 +#define CONFIG_WEBVTT_DECODER 0 +#define CONFIG_XSUB_DECODER 0 +#define CONFIG_AAC_AT_DECODER 0 +#define CONFIG_AC3_AT_DECODER 0 +#define CONFIG_ADPCM_IMA_QT_AT_DECODER 0 +#define CONFIG_ALAC_AT_DECODER 0 +#define CONFIG_AMR_NB_AT_DECODER 0 +#define CONFIG_EAC3_AT_DECODER 0 +#define CONFIG_GSM_MS_AT_DECODER 0 +#define CONFIG_ILBC_AT_DECODER 0 +#define CONFIG_MP1_AT_DECODER 0 +#define CONFIG_MP2_AT_DECODER 0 +#define CONFIG_MP3_AT_DECODER 0 +#define CONFIG_PCM_ALAW_AT_DECODER 0 +#define CONFIG_PCM_MULAW_AT_DECODER 0 +#define CONFIG_QDMC_AT_DECODER 0 +#define CONFIG_QDM2_AT_DECODER 0 +#define CONFIG_LIBCELT_DECODER 0 +#define CONFIG_LIBFDK_AAC_DECODER 0 +#define CONFIG_LIBGSM_DECODER 0 +#define CONFIG_LIBGSM_MS_DECODER 0 +#define CONFIG_LIBILBC_DECODER 0 +#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0 +#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0 +#define CONFIG_LIBOPENJPEG_DECODER 0 +#define CONFIG_LIBOPUS_DECODER 0 +#define CONFIG_LIBSCHROEDINGER_DECODER 0 +#define CONFIG_LIBSPEEX_DECODER 0 +#define CONFIG_LIBVORBIS_DECODER 0 +#define CONFIG_LIBVPX_VP8_DECODER 0 +#define CONFIG_LIBVPX_VP9_DECODER 0 +#define CONFIG_LIBZVBI_TELETEXT_DECODER 0 +#define CONFIG_BINTEXT_DECODER 0 +#define CONFIG_XBIN_DECODER 0 +#define CONFIG_IDF_DECODER 0 +#define CONFIG_LIBOPENH264_DECODER 0 +#define CONFIG_H264_CUVID_DECODER 0 +#define CONFIG_HEVC_CUVID_DECODER 0 +#define CONFIG_HEVC_MEDIACODEC_DECODER 0 +#define CONFIG_MJPEG_CUVID_DECODER 0 +#define CONFIG_MPEG1_CUVID_DECODER 0 +#define CONFIG_MPEG2_CUVID_DECODER 0 +#define CONFIG_MPEG4_CUVID_DECODER 0 +#define CONFIG_MPEG4_MEDIACODEC_DECODER 0 +#define CONFIG_VC1_CUVID_DECODER 0 +#define CONFIG_VP8_CUVID_DECODER 0 +#define CONFIG_VP8_MEDIACODEC_DECODER 0 +#define CONFIG_VP8_QSV_DECODER 0 +#define CONFIG_VP9_CUVID_DECODER 0 +#define CONFIG_VP9_MEDIACODEC_DECODER 0 +#define CONFIG_AA_DEMUXER 0 +#define CONFIG_AAC_DEMUXER 1 +#define CONFIG_AC3_DEMUXER 0 +#define CONFIG_ACM_DEMUXER 0 +#define CONFIG_ACT_DEMUXER 0 +#define CONFIG_ADF_DEMUXER 0 +#define CONFIG_ADP_DEMUXER 0 +#define CONFIG_ADS_DEMUXER 0 +#define CONFIG_ADX_DEMUXER 0 +#define CONFIG_AEA_DEMUXER 0 +#define CONFIG_AFC_DEMUXER 0 +#define CONFIG_AIFF_DEMUXER 0 +#define CONFIG_AIX_DEMUXER 0 +#define CONFIG_AMR_DEMUXER 0 +#define CONFIG_ANM_DEMUXER 0 +#define CONFIG_APC_DEMUXER 0 +#define CONFIG_APE_DEMUXER 0 +#define CONFIG_APNG_DEMUXER 0 +#define CONFIG_AQTITLE_DEMUXER 0 +#define CONFIG_ASF_DEMUXER 0 +#define CONFIG_ASF_O_DEMUXER 0 +#define CONFIG_ASS_DEMUXER 0 +#define CONFIG_AST_DEMUXER 0 +#define CONFIG_AU_DEMUXER 0 +#define CONFIG_AVI_DEMUXER 0 +#define CONFIG_AVISYNTH_DEMUXER 0 +#define CONFIG_AVR_DEMUXER 0 +#define CONFIG_AVS_DEMUXER 0 +#define CONFIG_BETHSOFTVID_DEMUXER 0 +#define CONFIG_BFI_DEMUXER 0 +#define CONFIG_BINTEXT_DEMUXER 0 +#define CONFIG_BINK_DEMUXER 0 +#define CONFIG_BIT_DEMUXER 0 +#define CONFIG_BMV_DEMUXER 0 +#define CONFIG_BFSTM_DEMUXER 0 +#define CONFIG_BRSTM_DEMUXER 0 +#define CONFIG_BOA_DEMUXER 0 +#define CONFIG_C93_DEMUXER 0 +#define CONFIG_CAF_DEMUXER 0 +#define CONFIG_CAVSVIDEO_DEMUXER 0 +#define CONFIG_CDG_DEMUXER 0 +#define CONFIG_CDXL_DEMUXER 0 +#define CONFIG_CINE_DEMUXER 0 +#define CONFIG_CONCAT_DEMUXER 1 +#define CONFIG_DATA_DEMUXER 1 +#define CONFIG_DAUD_DEMUXER 0 +#define CONFIG_DCSTR_DEMUXER 0 +#define CONFIG_DFA_DEMUXER 0 +#define CONFIG_DIRAC_DEMUXER 0 +#define CONFIG_DNXHD_DEMUXER 0 +#define CONFIG_DSF_DEMUXER 0 +#define CONFIG_DSICIN_DEMUXER 0 +#define CONFIG_DSS_DEMUXER 0 +#define CONFIG_DTS_DEMUXER 0 +#define CONFIG_DTSHD_DEMUXER 0 +#define CONFIG_DV_DEMUXER 0 +#define CONFIG_DVBSUB_DEMUXER 0 +#define CONFIG_DVBTXT_DEMUXER 0 +#define CONFIG_DXA_DEMUXER 0 +#define CONFIG_EA_DEMUXER 0 +#define CONFIG_EA_CDATA_DEMUXER 0 +#define CONFIG_EAC3_DEMUXER 0 +#define CONFIG_EPAF_DEMUXER 0 +#define CONFIG_FFM_DEMUXER 0 +#define CONFIG_FFMETADATA_DEMUXER 0 +#define CONFIG_FILMSTRIP_DEMUXER 0 +#define CONFIG_FLAC_DEMUXER 1 +#define CONFIG_FLIC_DEMUXER 0 +#define CONFIG_FLV_DEMUXER 1 +#define CONFIG_LIVE_FLV_DEMUXER 1 +#define CONFIG_FOURXM_DEMUXER 0 +#define CONFIG_FRM_DEMUXER 0 +#define CONFIG_FSB_DEMUXER 0 +#define CONFIG_G722_DEMUXER 0 +#define CONFIG_G723_1_DEMUXER 0 +#define CONFIG_G729_DEMUXER 0 +#define CONFIG_GENH_DEMUXER 0 +#define CONFIG_GIF_DEMUXER 0 +#define CONFIG_GSM_DEMUXER 0 +#define CONFIG_GXF_DEMUXER 0 +#define CONFIG_H261_DEMUXER 0 +#define CONFIG_H263_DEMUXER 0 +#define CONFIG_H264_DEMUXER 0 +#define CONFIG_HEVC_DEMUXER 1 +#define CONFIG_HLS_DEMUXER 1 +#define CONFIG_HNM_DEMUXER 0 +#define CONFIG_ICO_DEMUXER 0 +#define CONFIG_IDCIN_DEMUXER 0 +#define CONFIG_IDF_DEMUXER 0 +#define CONFIG_IFF_DEMUXER 0 +#define CONFIG_ILBC_DEMUXER 0 +#define CONFIG_IMAGE2_DEMUXER 0 +#define CONFIG_IMAGE2PIPE_DEMUXER 0 +#define CONFIG_IMAGE2_ALIAS_PIX_DEMUXER 0 +#define CONFIG_IMAGE2_BRENDER_PIX_DEMUXER 0 +#define CONFIG_INGENIENT_DEMUXER 0 +#define CONFIG_IPMOVIE_DEMUXER 0 +#define CONFIG_IRCAM_DEMUXER 0 +#define CONFIG_ISS_DEMUXER 0 +#define CONFIG_IV8_DEMUXER 0 +#define CONFIG_IVF_DEMUXER 0 +#define CONFIG_IVR_DEMUXER 0 +#define CONFIG_JACOSUB_DEMUXER 0 +#define CONFIG_JV_DEMUXER 0 +#define CONFIG_LMLM4_DEMUXER 0 +#define CONFIG_LOAS_DEMUXER 0 +#define CONFIG_LRC_DEMUXER 0 +#define CONFIG_LVF_DEMUXER 0 +#define CONFIG_LXF_DEMUXER 0 +#define CONFIG_M4V_DEMUXER 0 +#define CONFIG_MATROSKA_DEMUXER 0 +#define CONFIG_MGSTS_DEMUXER 0 +#define CONFIG_MICRODVD_DEMUXER 0 +#define CONFIG_MJPEG_DEMUXER 0 +#define CONFIG_MJPEG_2000_DEMUXER 0 +#define CONFIG_MLP_DEMUXER 0 +#define CONFIG_MLV_DEMUXER 0 +#define CONFIG_MM_DEMUXER 0 +#define CONFIG_MMF_DEMUXER 0 +#define CONFIG_MOV_DEMUXER 1 +#define CONFIG_MP3_DEMUXER 1 +#define CONFIG_MPC_DEMUXER 0 +#define CONFIG_MPC8_DEMUXER 0 +#define CONFIG_MPEGPS_DEMUXER 1 +#define CONFIG_MPEGTS_DEMUXER 1 +#define CONFIG_MPEGTSRAW_DEMUXER 0 +#define CONFIG_MPEGVIDEO_DEMUXER 1 +#define CONFIG_MPJPEG_DEMUXER 0 +#define CONFIG_MPL2_DEMUXER 0 +#define CONFIG_MPSUB_DEMUXER 0 +#define CONFIG_MSF_DEMUXER 0 +#define CONFIG_MSNWC_TCP_DEMUXER 0 +#define CONFIG_MTAF_DEMUXER 0 +#define CONFIG_MTV_DEMUXER 0 +#define CONFIG_MUSX_DEMUXER 0 +#define CONFIG_MV_DEMUXER 0 +#define CONFIG_MVI_DEMUXER 0 +#define CONFIG_MXF_DEMUXER 0 +#define CONFIG_MXG_DEMUXER 0 +#define CONFIG_NC_DEMUXER 0 +#define CONFIG_NISTSPHERE_DEMUXER 0 +#define CONFIG_NSV_DEMUXER 0 +#define CONFIG_NUT_DEMUXER 0 +#define CONFIG_NUV_DEMUXER 0 +#define CONFIG_OGG_DEMUXER 0 +#define CONFIG_OMA_DEMUXER 0 +#define CONFIG_PAF_DEMUXER 0 +#define CONFIG_PCM_ALAW_DEMUXER 0 +#define CONFIG_PCM_MULAW_DEMUXER 0 +#define CONFIG_PCM_F64BE_DEMUXER 0 +#define CONFIG_PCM_F64LE_DEMUXER 0 +#define CONFIG_PCM_F32BE_DEMUXER 0 +#define CONFIG_PCM_F32LE_DEMUXER 0 +#define CONFIG_PCM_S32BE_DEMUXER 0 +#define CONFIG_PCM_S32LE_DEMUXER 0 +#define CONFIG_PCM_S24BE_DEMUXER 0 +#define CONFIG_PCM_S24LE_DEMUXER 0 +#define CONFIG_PCM_S16BE_DEMUXER 0 +#define CONFIG_PCM_S16LE_DEMUXER 0 +#define CONFIG_PCM_S8_DEMUXER 0 +#define CONFIG_PCM_U32BE_DEMUXER 0 +#define CONFIG_PCM_U32LE_DEMUXER 0 +#define CONFIG_PCM_U24BE_DEMUXER 0 +#define CONFIG_PCM_U24LE_DEMUXER 0 +#define CONFIG_PCM_U16BE_DEMUXER 0 +#define CONFIG_PCM_U16LE_DEMUXER 0 +#define CONFIG_PCM_U8_DEMUXER 0 +#define CONFIG_PJS_DEMUXER 0 +#define CONFIG_PMP_DEMUXER 0 +#define CONFIG_PVA_DEMUXER 0 +#define CONFIG_PVF_DEMUXER 0 +#define CONFIG_QCP_DEMUXER 0 +#define CONFIG_R3D_DEMUXER 0 +#define CONFIG_RAWVIDEO_DEMUXER 0 +#define CONFIG_REALTEXT_DEMUXER 0 +#define CONFIG_REDSPARK_DEMUXER 0 +#define CONFIG_RL2_DEMUXER 0 +#define CONFIG_RM_DEMUXER 0 +#define CONFIG_ROQ_DEMUXER 0 +#define CONFIG_RPL_DEMUXER 0 +#define CONFIG_RSD_DEMUXER 0 +#define CONFIG_RSO_DEMUXER 0 +#define CONFIG_RTP_DEMUXER 0 +#define CONFIG_RTSP_DEMUXER 0 +#define CONFIG_SAMI_DEMUXER 0 +#define CONFIG_SAP_DEMUXER 0 +#define CONFIG_SBG_DEMUXER 0 +#define CONFIG_SCC_DEMUXER 0 +#define CONFIG_SDP_DEMUXER 0 +#define CONFIG_SDR2_DEMUXER 0 +#define CONFIG_SDS_DEMUXER 0 +#define CONFIG_SDX_DEMUXER 0 +#define CONFIG_SEGAFILM_DEMUXER 0 +#define CONFIG_SHORTEN_DEMUXER 0 +#define CONFIG_SIFF_DEMUXER 0 +#define CONFIG_SLN_DEMUXER 0 +#define CONFIG_SMACKER_DEMUXER 0 +#define CONFIG_SMJPEG_DEMUXER 0 +#define CONFIG_SMUSH_DEMUXER 0 +#define CONFIG_SOL_DEMUXER 0 +#define CONFIG_SOX_DEMUXER 0 +#define CONFIG_SPDIF_DEMUXER 0 +#define CONFIG_SRT_DEMUXER 0 +#define CONFIG_STR_DEMUXER 0 +#define CONFIG_STL_DEMUXER 0 +#define CONFIG_SUBVIEWER1_DEMUXER 0 +#define CONFIG_SUBVIEWER_DEMUXER 0 +#define CONFIG_SUP_DEMUXER 0 +#define CONFIG_SVAG_DEMUXER 0 +#define CONFIG_SWF_DEMUXER 0 +#define CONFIG_TAK_DEMUXER 0 +#define CONFIG_TEDCAPTIONS_DEMUXER 0 +#define CONFIG_THP_DEMUXER 0 +#define CONFIG_THREEDOSTR_DEMUXER 0 +#define CONFIG_TIERTEXSEQ_DEMUXER 0 +#define CONFIG_TMV_DEMUXER 0 +#define CONFIG_TRUEHD_DEMUXER 0 +#define CONFIG_TTA_DEMUXER 0 +#define CONFIG_TXD_DEMUXER 0 +#define CONFIG_TTY_DEMUXER 0 +#define CONFIG_V210_DEMUXER 0 +#define CONFIG_V210X_DEMUXER 0 +#define CONFIG_VAG_DEMUXER 0 +#define CONFIG_VC1_DEMUXER 0 +#define CONFIG_VC1T_DEMUXER 0 +#define CONFIG_VIVO_DEMUXER 0 +#define CONFIG_VMD_DEMUXER 0 +#define CONFIG_VOBSUB_DEMUXER 0 +#define CONFIG_VOC_DEMUXER 0 +#define CONFIG_VPK_DEMUXER 0 +#define CONFIG_VPLAYER_DEMUXER 0 +#define CONFIG_VQF_DEMUXER 0 +#define CONFIG_W64_DEMUXER 0 +#define CONFIG_WAV_DEMUXER 1 +#define CONFIG_WC3_DEMUXER 0 +#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0 +#define CONFIG_WEBVTT_DEMUXER 0 +#define CONFIG_WSAUD_DEMUXER 0 +#define CONFIG_WSD_DEMUXER 0 +#define CONFIG_WSVQA_DEMUXER 0 +#define CONFIG_WTV_DEMUXER 0 +#define CONFIG_WVE_DEMUXER 0 +#define CONFIG_WV_DEMUXER 0 +#define CONFIG_XA_DEMUXER 0 +#define CONFIG_XBIN_DEMUXER 0 +#define CONFIG_XMV_DEMUXER 0 +#define CONFIG_XVAG_DEMUXER 0 +#define CONFIG_XWMA_DEMUXER 0 +#define CONFIG_YOP_DEMUXER 0 +#define CONFIG_YUV4MPEGPIPE_DEMUXER 0 +#define CONFIG_IMAGE_BMP_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_DDS_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_DPX_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PPM_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_PSD_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_QDRAW_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_SGI_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_SUNRAST_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_TIFF_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_WEBP_PIPE_DEMUXER 0 +#define CONFIG_IMAGE_XPM_PIPE_DEMUXER 0 +#define CONFIG_LIBGME_DEMUXER 0 +#define CONFIG_LIBMODPLUG_DEMUXER 0 +#define CONFIG_LIBNUT_DEMUXER 0 +#define CONFIG_LIBOPENMPT_DEMUXER 0 +#define CONFIG_A64MULTI_ENCODER 0 +#define CONFIG_A64MULTI5_ENCODER 0 +#define CONFIG_ALIAS_PIX_ENCODER 0 +#define CONFIG_AMV_ENCODER 0 +#define CONFIG_APNG_ENCODER 0 +#define CONFIG_ASV1_ENCODER 0 +#define CONFIG_ASV2_ENCODER 0 +#define CONFIG_AVRP_ENCODER 0 +#define CONFIG_AVUI_ENCODER 0 +#define CONFIG_AYUV_ENCODER 0 +#define CONFIG_BMP_ENCODER 0 +#define CONFIG_CINEPAK_ENCODER 0 +#define CONFIG_CLJR_ENCODER 0 +#define CONFIG_COMFORTNOISE_ENCODER 0 +#define CONFIG_DNXHD_ENCODER 0 +#define CONFIG_DPX_ENCODER 0 +#define CONFIG_DVVIDEO_ENCODER 0 +#define CONFIG_FFV1_ENCODER 0 +#define CONFIG_FFVHUFF_ENCODER 0 +#define CONFIG_FLASHSV_ENCODER 0 +#define CONFIG_FLASHSV2_ENCODER 0 +#define CONFIG_FLV_ENCODER 0 +#define CONFIG_GIF_ENCODER 0 +#define CONFIG_H261_ENCODER 0 +#define CONFIG_H263_ENCODER 0 +#define CONFIG_H263P_ENCODER 0 +#define CONFIG_HAP_ENCODER 0 +#define CONFIG_HUFFYUV_ENCODER 0 +#define CONFIG_JPEG2000_ENCODER 0 +#define CONFIG_JPEGLS_ENCODER 0 +#define CONFIG_LJPEG_ENCODER 0 +#define CONFIG_MJPEG_ENCODER 0 +#define CONFIG_MPEG1VIDEO_ENCODER 0 +#define CONFIG_MPEG2VIDEO_ENCODER 0 +#define CONFIG_MPEG4_ENCODER 0 +#define CONFIG_MSMPEG4V2_ENCODER 0 +#define CONFIG_MSMPEG4V3_ENCODER 0 +#define CONFIG_MSVIDEO1_ENCODER 0 +#define CONFIG_PAM_ENCODER 0 +#define CONFIG_PBM_ENCODER 0 +#define CONFIG_PCX_ENCODER 0 +#define CONFIG_PGM_ENCODER 0 +#define CONFIG_PGMYUV_ENCODER 0 +#define CONFIG_PNG_ENCODER 1 +#define CONFIG_PPM_ENCODER 0 +#define CONFIG_PRORES_ENCODER 0 +#define CONFIG_PRORES_AW_ENCODER 0 +#define CONFIG_PRORES_KS_ENCODER 0 +#define CONFIG_QTRLE_ENCODER 0 +#define CONFIG_R10K_ENCODER 0 +#define CONFIG_R210_ENCODER 0 +#define CONFIG_RAWVIDEO_ENCODER 0 +#define CONFIG_ROQ_ENCODER 0 +#define CONFIG_RV10_ENCODER 0 +#define CONFIG_RV20_ENCODER 0 +#define CONFIG_S302M_ENCODER 0 +#define CONFIG_SGI_ENCODER 0 +#define CONFIG_SNOW_ENCODER 0 +#define CONFIG_SUNRAST_ENCODER 0 +#define CONFIG_SVQ1_ENCODER 0 +#define CONFIG_TARGA_ENCODER 0 +#define CONFIG_TIFF_ENCODER 0 +#define CONFIG_UTVIDEO_ENCODER 0 +#define CONFIG_V210_ENCODER 0 +#define CONFIG_V308_ENCODER 0 +#define CONFIG_V408_ENCODER 0 +#define CONFIG_V410_ENCODER 0 +#define CONFIG_VC2_ENCODER 0 +#define CONFIG_WRAPPED_AVFRAME_ENCODER 0 +#define CONFIG_WMV1_ENCODER 0 +#define CONFIG_WMV2_ENCODER 0 +#define CONFIG_XBM_ENCODER 0 +#define CONFIG_XFACE_ENCODER 0 +#define CONFIG_XWD_ENCODER 0 +#define CONFIG_Y41P_ENCODER 0 +#define CONFIG_YUV4_ENCODER 0 +#define CONFIG_ZLIB_ENCODER 0 +#define CONFIG_ZMBV_ENCODER 0 +#define CONFIG_AAC_ENCODER 0 +#define CONFIG_AC3_ENCODER 0 +#define CONFIG_AC3_FIXED_ENCODER 0 +#define CONFIG_ALAC_ENCODER 0 +#define CONFIG_DCA_ENCODER 0 +#define CONFIG_EAC3_ENCODER 0 +#define CONFIG_FLAC_ENCODER 0 +#define CONFIG_G723_1_ENCODER 0 +#define CONFIG_MLP_ENCODER 0 +#define CONFIG_MP2_ENCODER 0 +#define CONFIG_MP2FIXED_ENCODER 0 +#define CONFIG_NELLYMOSER_ENCODER 0 +#define CONFIG_OPUS_ENCODER 0 +#define CONFIG_RA_144_ENCODER 0 +#define CONFIG_SONIC_ENCODER 0 +#define CONFIG_SONIC_LS_ENCODER 0 +#define CONFIG_TRUEHD_ENCODER 0 +#define CONFIG_TTA_ENCODER 0 +#define CONFIG_VORBIS_ENCODER 0 +#define CONFIG_WAVPACK_ENCODER 0 +#define CONFIG_WMAV1_ENCODER 0 +#define CONFIG_WMAV2_ENCODER 0 +#define CONFIG_PCM_ALAW_ENCODER 0 +#define CONFIG_PCM_F32BE_ENCODER 0 +#define CONFIG_PCM_F32LE_ENCODER 0 +#define CONFIG_PCM_F64BE_ENCODER 0 +#define CONFIG_PCM_F64LE_ENCODER 0 +#define CONFIG_PCM_MULAW_ENCODER 0 +#define CONFIG_PCM_S8_ENCODER 0 +#define CONFIG_PCM_S8_PLANAR_ENCODER 0 +#define CONFIG_PCM_S16BE_ENCODER 0 +#define CONFIG_PCM_S16BE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S16LE_ENCODER 0 +#define CONFIG_PCM_S16LE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S24BE_ENCODER 0 +#define CONFIG_PCM_S24DAUD_ENCODER 0 +#define CONFIG_PCM_S24LE_ENCODER 0 +#define CONFIG_PCM_S24LE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S32BE_ENCODER 0 +#define CONFIG_PCM_S32LE_ENCODER 0 +#define CONFIG_PCM_S32LE_PLANAR_ENCODER 0 +#define CONFIG_PCM_S64BE_ENCODER 0 +#define CONFIG_PCM_S64LE_ENCODER 0 +#define CONFIG_PCM_U8_ENCODER 0 +#define CONFIG_PCM_U16BE_ENCODER 0 +#define CONFIG_PCM_U16LE_ENCODER 0 +#define CONFIG_PCM_U24BE_ENCODER 0 +#define CONFIG_PCM_U24LE_ENCODER 0 +#define CONFIG_PCM_U32BE_ENCODER 0 +#define CONFIG_PCM_U32LE_ENCODER 0 +#define CONFIG_ROQ_DPCM_ENCODER 0 +#define CONFIG_ADPCM_ADX_ENCODER 0 +#define CONFIG_ADPCM_G722_ENCODER 0 +#define CONFIG_ADPCM_G726_ENCODER 0 +#define CONFIG_ADPCM_IMA_QT_ENCODER 0 +#define CONFIG_ADPCM_IMA_WAV_ENCODER 0 +#define CONFIG_ADPCM_MS_ENCODER 0 +#define CONFIG_ADPCM_SWF_ENCODER 0 +#define CONFIG_ADPCM_YAMAHA_ENCODER 0 +#define CONFIG_SSA_ENCODER 0 +#define CONFIG_ASS_ENCODER 0 +#define CONFIG_DVBSUB_ENCODER 0 +#define CONFIG_DVDSUB_ENCODER 0 +#define CONFIG_MOVTEXT_ENCODER 0 +#define CONFIG_SRT_ENCODER 0 +#define CONFIG_SUBRIP_ENCODER 0 +#define CONFIG_TEXT_ENCODER 0 +#define CONFIG_WEBVTT_ENCODER 0 +#define CONFIG_XSUB_ENCODER 0 +#define CONFIG_AAC_AT_ENCODER 0 +#define CONFIG_ALAC_AT_ENCODER 0 +#define CONFIG_ILBC_AT_ENCODER 0 +#define CONFIG_PCM_ALAW_AT_ENCODER 0 +#define CONFIG_PCM_MULAW_AT_ENCODER 0 +#define CONFIG_LIBFDK_AAC_ENCODER 0 +#define CONFIG_LIBGSM_ENCODER 0 +#define CONFIG_LIBGSM_MS_ENCODER 0 +#define CONFIG_LIBILBC_ENCODER 0 +#define CONFIG_LIBMP3LAME_ENCODER 0 +#define CONFIG_LIBOPENCORE_AMRNB_ENCODER 0 +#define CONFIG_LIBOPENJPEG_ENCODER 0 +#define CONFIG_LIBOPUS_ENCODER 0 +#define CONFIG_LIBSCHROEDINGER_ENCODER 0 +#define CONFIG_LIBSHINE_ENCODER 0 +#define CONFIG_LIBSPEEX_ENCODER 0 +#define CONFIG_LIBTHEORA_ENCODER 0 +#define CONFIG_LIBTWOLAME_ENCODER 0 +#define CONFIG_LIBVO_AMRWBENC_ENCODER 0 +#define CONFIG_LIBVORBIS_ENCODER 0 +#define CONFIG_LIBVPX_VP8_ENCODER 0 +#define CONFIG_LIBVPX_VP9_ENCODER 0 +#define CONFIG_LIBWAVPACK_ENCODER 0 +#define CONFIG_LIBWEBP_ANIM_ENCODER 0 +#define CONFIG_LIBWEBP_ENCODER 0 +#define CONFIG_LIBX262_ENCODER 0 +#define CONFIG_LIBX264_ENCODER 0 +#define CONFIG_LIBX264RGB_ENCODER 0 +#define CONFIG_LIBX265_ENCODER 0 +#define CONFIG_LIBXAVS_ENCODER 0 +#define CONFIG_LIBXVID_ENCODER 0 +#define CONFIG_LIBOPENH264_ENCODER 0 +#define CONFIG_H264_NVENC_ENCODER 0 +#define CONFIG_H264_OMX_ENCODER 0 +#define CONFIG_H264_QSV_ENCODER 0 +#define CONFIG_H264_VAAPI_ENCODER 0 +#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0 +#define CONFIG_NVENC_ENCODER 0 +#define CONFIG_NVENC_H264_ENCODER 0 +#define CONFIG_NVENC_HEVC_ENCODER 0 +#define CONFIG_HEVC_NVENC_ENCODER 0 +#define CONFIG_HEVC_QSV_ENCODER 0 +#define CONFIG_HEVC_VAAPI_ENCODER 0 +#define CONFIG_LIBKVAZAAR_ENCODER 0 +#define CONFIG_MJPEG_VAAPI_ENCODER 0 +#define CONFIG_MPEG2_QSV_ENCODER 0 +#define CONFIG_MPEG2_VAAPI_ENCODER 0 +#define CONFIG_VP8_VAAPI_ENCODER 0 +#define CONFIG_ABENCH_FILTER 0 +#define CONFIG_ACOMPRESSOR_FILTER 0 +#define CONFIG_ACROSSFADE_FILTER 0 +#define CONFIG_ACRUSHER_FILTER 0 +#define CONFIG_ADELAY_FILTER 0 +#define CONFIG_AECHO_FILTER 0 +#define CONFIG_AEMPHASIS_FILTER 0 +#define CONFIG_AEVAL_FILTER 0 +#define CONFIG_AFADE_FILTER 0 +#define CONFIG_AFFTFILT_FILTER 0 +#define CONFIG_AFORMAT_FILTER 0 +#define CONFIG_AGATE_FILTER 0 +#define CONFIG_AINTERLEAVE_FILTER 0 +#define CONFIG_ALIMITER_FILTER 0 +#define CONFIG_ALLPASS_FILTER 0 +#define CONFIG_ALOOP_FILTER 0 +#define CONFIG_AMERGE_FILTER 0 +#define CONFIG_AMETADATA_FILTER 0 +#define CONFIG_AMIX_FILTER 0 +#define CONFIG_ANEQUALIZER_FILTER 0 +#define CONFIG_ANULL_FILTER 0 +#define CONFIG_APAD_FILTER 0 +#define CONFIG_APERMS_FILTER 0 +#define CONFIG_APHASER_FILTER 0 +#define CONFIG_APULSATOR_FILTER 0 +#define CONFIG_AREALTIME_FILTER 0 +#define CONFIG_ARESAMPLE_FILTER 0 +#define CONFIG_AREVERSE_FILTER 0 +#define CONFIG_ASELECT_FILTER 0 +#define CONFIG_ASENDCMD_FILTER 0 +#define CONFIG_ASETNSAMPLES_FILTER 0 +#define CONFIG_ASETPTS_FILTER 0 +#define CONFIG_ASETRATE_FILTER 0 +#define CONFIG_ASETTB_FILTER 0 +#define CONFIG_ASHOWINFO_FILTER 0 +#define CONFIG_ASIDEDATA_FILTER 0 +#define CONFIG_ASPLIT_FILTER 0 +#define CONFIG_ASTATS_FILTER 0 +#define CONFIG_ASTREAMSELECT_FILTER 0 +#define CONFIG_ATEMPO_FILTER 0 +#define CONFIG_ATRIM_FILTER 0 +#define CONFIG_AZMQ_FILTER 0 +#define CONFIG_BANDPASS_FILTER 0 +#define CONFIG_BANDREJECT_FILTER 0 +#define CONFIG_BASS_FILTER 0 +#define CONFIG_BIQUAD_FILTER 0 +#define CONFIG_BS2B_FILTER 0 +#define CONFIG_CHANNELMAP_FILTER 0 +#define CONFIG_CHANNELSPLIT_FILTER 0 +#define CONFIG_CHORUS_FILTER 0 +#define CONFIG_COMPAND_FILTER 0 +#define CONFIG_COMPENSATIONDELAY_FILTER 0 +#define CONFIG_CRYSTALIZER_FILTER 0 +#define CONFIG_DCSHIFT_FILTER 0 +#define CONFIG_DYNAUDNORM_FILTER 0 +#define CONFIG_EARWAX_FILTER 0 +#define CONFIG_EBUR128_FILTER 0 +#define CONFIG_EQUALIZER_FILTER 0 +#define CONFIG_EXTRASTEREO_FILTER 0 +#define CONFIG_FIREQUALIZER_FILTER 0 +#define CONFIG_FLANGER_FILTER 0 +#define CONFIG_HDCD_FILTER 0 +#define CONFIG_HIGHPASS_FILTER 0 +#define CONFIG_JOIN_FILTER 0 +#define CONFIG_LADSPA_FILTER 0 +#define CONFIG_LOUDNORM_FILTER 0 +#define CONFIG_LOWPASS_FILTER 0 +#define CONFIG_PAN_FILTER 0 +#define CONFIG_REPLAYGAIN_FILTER 0 +#define CONFIG_RESAMPLE_FILTER 0 +#define CONFIG_RUBBERBAND_FILTER 0 +#define CONFIG_SIDECHAINCOMPRESS_FILTER 0 +#define CONFIG_SIDECHAINGATE_FILTER 0 +#define CONFIG_SILENCEDETECT_FILTER 0 +#define CONFIG_SILENCEREMOVE_FILTER 0 +#define CONFIG_SOFALIZER_FILTER 0 +#define CONFIG_STEREOTOOLS_FILTER 0 +#define CONFIG_STEREOWIDEN_FILTER 0 +#define CONFIG_TREBLE_FILTER 0 +#define CONFIG_TREMOLO_FILTER 0 +#define CONFIG_VIBRATO_FILTER 0 +#define CONFIG_VOLUME_FILTER 0 +#define CONFIG_VOLUMEDETECT_FILTER 0 +#define CONFIG_AEVALSRC_FILTER 0 +#define CONFIG_ANOISESRC_FILTER 0 +#define CONFIG_ANULLSRC_FILTER 0 +#define CONFIG_FLITE_FILTER 0 +#define CONFIG_SINE_FILTER 0 +#define CONFIG_ANULLSINK_FILTER 0 +#define CONFIG_ALPHAEXTRACT_FILTER 0 +#define CONFIG_ALPHAMERGE_FILTER 0 +#define CONFIG_ASS_FILTER 0 +#define CONFIG_ATADENOISE_FILTER 0 +#define CONFIG_AVGBLUR_FILTER 0 +#define CONFIG_BBOX_FILTER 0 +#define CONFIG_BENCH_FILTER 0 +#define CONFIG_BITPLANENOISE_FILTER 0 +#define CONFIG_BLACKDETECT_FILTER 0 +#define CONFIG_BLACKFRAME_FILTER 0 +#define CONFIG_BLEND_FILTER 0 +#define CONFIG_BOXBLUR_FILTER 0 +#define CONFIG_BWDIF_FILTER 0 +#define CONFIG_CHROMAKEY_FILTER 0 +#define CONFIG_CIESCOPE_FILTER 0 +#define CONFIG_CODECVIEW_FILTER 0 +#define CONFIG_COLORBALANCE_FILTER 0 +#define CONFIG_COLORCHANNELMIXER_FILTER 0 +#define CONFIG_COLORKEY_FILTER 0 +#define CONFIG_COLORLEVELS_FILTER 0 +#define CONFIG_COLORMATRIX_FILTER 0 +#define CONFIG_COLORSPACE_FILTER 0 +#define CONFIG_CONVOLUTION_FILTER 0 +#define CONFIG_COPY_FILTER 0 +#define CONFIG_COREIMAGE_FILTER 0 +#define CONFIG_COVER_RECT_FILTER 0 +#define CONFIG_CROP_FILTER 0 +#define CONFIG_CROPDETECT_FILTER 0 +#define CONFIG_CURVES_FILTER 0 +#define CONFIG_DATASCOPE_FILTER 0 +#define CONFIG_DCTDNOIZ_FILTER 0 +#define CONFIG_DEBAND_FILTER 0 +#define CONFIG_DECIMATE_FILTER 0 +#define CONFIG_DEFLATE_FILTER 0 +#define CONFIG_DEINTERLACE_QSV_FILTER 0 +#define CONFIG_DEINTERLACE_VAAPI_FILTER 0 +#define CONFIG_DEJUDDER_FILTER 0 +#define CONFIG_DELOGO_FILTER 0 +#define CONFIG_DESHAKE_FILTER 0 +#define CONFIG_DETELECINE_FILTER 0 +#define CONFIG_DILATION_FILTER 0 +#define CONFIG_DISPLACE_FILTER 0 +#define CONFIG_DRAWBOX_FILTER 0 +#define CONFIG_DRAWGRAPH_FILTER 0 +#define CONFIG_DRAWGRID_FILTER 0 +#define CONFIG_DRAWTEXT_FILTER 0 +#define CONFIG_EDGEDETECT_FILTER 0 +#define CONFIG_ELBG_FILTER 0 +#define CONFIG_EQ_FILTER 0 +#define CONFIG_EROSION_FILTER 0 +#define CONFIG_EXTRACTPLANES_FILTER 0 +#define CONFIG_FADE_FILTER 0 +#define CONFIG_FFTFILT_FILTER 0 +#define CONFIG_FIELD_FILTER 0 +#define CONFIG_FIELDHINT_FILTER 0 +#define CONFIG_FIELDMATCH_FILTER 0 +#define CONFIG_FIELDORDER_FILTER 0 +#define CONFIG_FIND_RECT_FILTER 0 +#define CONFIG_FORMAT_FILTER 0 +#define CONFIG_FPS_FILTER 0 +#define CONFIG_FRAMEPACK_FILTER 0 +#define CONFIG_FRAMERATE_FILTER 0 +#define CONFIG_FRAMESTEP_FILTER 0 +#define CONFIG_FREI0R_FILTER 0 +#define CONFIG_FSPP_FILTER 0 +#define CONFIG_GBLUR_FILTER 0 +#define CONFIG_GEQ_FILTER 0 +#define CONFIG_GRADFUN_FILTER 0 +#define CONFIG_HALDCLUT_FILTER 0 +#define CONFIG_HFLIP_FILTER 0 +#define CONFIG_HISTEQ_FILTER 0 +#define CONFIG_HISTOGRAM_FILTER 0 +#define CONFIG_HQDN3D_FILTER 0 +#define CONFIG_HQX_FILTER 0 +#define CONFIG_HSTACK_FILTER 0 +#define CONFIG_HUE_FILTER 0 +#define CONFIG_HWDOWNLOAD_FILTER 0 +#define CONFIG_HWMAP_FILTER 0 +#define CONFIG_HWUPLOAD_FILTER 0 +#define CONFIG_HWUPLOAD_CUDA_FILTER 0 +#define CONFIG_HYSTERESIS_FILTER 0 +#define CONFIG_IDET_FILTER 0 +#define CONFIG_IL_FILTER 0 +#define CONFIG_INFLATE_FILTER 0 +#define CONFIG_INTERLACE_FILTER 0 +#define CONFIG_INTERLEAVE_FILTER 0 +#define CONFIG_KERNDEINT_FILTER 0 +#define CONFIG_LENSCORRECTION_FILTER 0 +#define CONFIG_LOOP_FILTER 0 +#define CONFIG_LUT_FILTER 0 +#define CONFIG_LUT2_FILTER 0 +#define CONFIG_LUT3D_FILTER 0 +#define CONFIG_LUTRGB_FILTER 0 +#define CONFIG_LUTYUV_FILTER 0 +#define CONFIG_MASKEDCLAMP_FILTER 0 +#define CONFIG_MASKEDMERGE_FILTER 0 +#define CONFIG_MCDEINT_FILTER 0 +#define CONFIG_MERGEPLANES_FILTER 0 +#define CONFIG_MESTIMATE_FILTER 0 +#define CONFIG_METADATA_FILTER 0 +#define CONFIG_MIDEQUALIZER_FILTER 0 +#define CONFIG_MINTERPOLATE_FILTER 0 +#define CONFIG_MPDECIMATE_FILTER 0 +#define CONFIG_NEGATE_FILTER 0 +#define CONFIG_NLMEANS_FILTER 0 +#define CONFIG_NNEDI_FILTER 0 +#define CONFIG_NOFORMAT_FILTER 0 +#define CONFIG_NOISE_FILTER 0 +#define CONFIG_NULL_FILTER 0 +#define CONFIG_OCR_FILTER 0 +#define CONFIG_OCV_FILTER 0 +#define CONFIG_OVERLAY_FILTER 0 +#define CONFIG_OWDENOISE_FILTER 0 +#define CONFIG_PAD_FILTER 0 +#define CONFIG_PALETTEGEN_FILTER 0 +#define CONFIG_PALETTEUSE_FILTER 0 +#define CONFIG_PERMS_FILTER 0 +#define CONFIG_PERSPECTIVE_FILTER 0 +#define CONFIG_PHASE_FILTER 0 +#define CONFIG_PIXDESCTEST_FILTER 0 +#define CONFIG_PP_FILTER 0 +#define CONFIG_PP7_FILTER 0 +#define CONFIG_PREMULTIPLY_FILTER 0 +#define CONFIG_PREWITT_FILTER 0 +#define CONFIG_PSNR_FILTER 0 +#define CONFIG_PULLUP_FILTER 0 +#define CONFIG_QP_FILTER 0 +#define CONFIG_RANDOM_FILTER 0 +#define CONFIG_READEIA608_FILTER 0 +#define CONFIG_READVITC_FILTER 0 +#define CONFIG_REALTIME_FILTER 0 +#define CONFIG_REMAP_FILTER 0 +#define CONFIG_REMOVEGRAIN_FILTER 0 +#define CONFIG_REMOVELOGO_FILTER 0 +#define CONFIG_REPEATFIELDS_FILTER 0 +#define CONFIG_REVERSE_FILTER 0 +#define CONFIG_ROTATE_FILTER 0 +#define CONFIG_SAB_FILTER 0 +#define CONFIG_SCALE_FILTER 0 +#define CONFIG_SCALE_NPP_FILTER 0 +#define CONFIG_SCALE_QSV_FILTER 0 +#define CONFIG_SCALE_VAAPI_FILTER 0 +#define CONFIG_SCALE2REF_FILTER 0 +#define CONFIG_SELECT_FILTER 0 +#define CONFIG_SELECTIVECOLOR_FILTER 0 +#define CONFIG_SENDCMD_FILTER 0 +#define CONFIG_SEPARATEFIELDS_FILTER 0 +#define CONFIG_SETDAR_FILTER 0 +#define CONFIG_SETFIELD_FILTER 0 +#define CONFIG_SETPTS_FILTER 0 +#define CONFIG_SETSAR_FILTER 0 +#define CONFIG_SETTB_FILTER 0 +#define CONFIG_SHOWINFO_FILTER 0 +#define CONFIG_SHOWPALETTE_FILTER 0 +#define CONFIG_SHUFFLEFRAMES_FILTER 0 +#define CONFIG_SHUFFLEPLANES_FILTER 0 +#define CONFIG_SIDEDATA_FILTER 0 +#define CONFIG_SIGNALSTATS_FILTER 0 +#define CONFIG_SIGNATURE_FILTER 0 +#define CONFIG_SMARTBLUR_FILTER 0 +#define CONFIG_SOBEL_FILTER 0 +#define CONFIG_SPLIT_FILTER 0 +#define CONFIG_SPP_FILTER 0 +#define CONFIG_SSIM_FILTER 0 +#define CONFIG_STEREO3D_FILTER 0 +#define CONFIG_STREAMSELECT_FILTER 0 +#define CONFIG_SUBTITLES_FILTER 0 +#define CONFIG_SUPER2XSAI_FILTER 0 +#define CONFIG_SWAPRECT_FILTER 0 +#define CONFIG_SWAPUV_FILTER 0 +#define CONFIG_TBLEND_FILTER 0 +#define CONFIG_TELECINE_FILTER 0 +#define CONFIG_THRESHOLD_FILTER 0 +#define CONFIG_THUMBNAIL_FILTER 0 +#define CONFIG_TILE_FILTER 0 +#define CONFIG_TINTERLACE_FILTER 0 +#define CONFIG_TRANSPOSE_FILTER 0 +#define CONFIG_TRIM_FILTER 0 +#define CONFIG_UNSHARP_FILTER 0 +#define CONFIG_USPP_FILTER 0 +#define CONFIG_VAGUEDENOISER_FILTER 0 +#define CONFIG_VECTORSCOPE_FILTER 0 +#define CONFIG_VFLIP_FILTER 0 +#define CONFIG_VIDSTABDETECT_FILTER 0 +#define CONFIG_VIDSTABTRANSFORM_FILTER 0 +#define CONFIG_VIGNETTE_FILTER 0 +#define CONFIG_VSTACK_FILTER 0 +#define CONFIG_W3FDIF_FILTER 0 +#define CONFIG_WAVEFORM_FILTER 0 +#define CONFIG_WEAVE_FILTER 0 +#define CONFIG_XBR_FILTER 0 +#define CONFIG_YADIF_FILTER 0 +#define CONFIG_ZMQ_FILTER 0 +#define CONFIG_ZOOMPAN_FILTER 0 +#define CONFIG_ZSCALE_FILTER 0 +#define CONFIG_ALLRGB_FILTER 0 +#define CONFIG_ALLYUV_FILTER 0 +#define CONFIG_CELLAUTO_FILTER 0 +#define CONFIG_COLOR_FILTER 0 +#define CONFIG_COREIMAGESRC_FILTER 0 +#define CONFIG_FREI0R_SRC_FILTER 0 +#define CONFIG_HALDCLUTSRC_FILTER 0 +#define CONFIG_LIFE_FILTER 0 +#define CONFIG_MANDELBROT_FILTER 0 +#define CONFIG_MPTESTSRC_FILTER 0 +#define CONFIG_NULLSRC_FILTER 0 +#define CONFIG_RGBTESTSRC_FILTER 0 +#define CONFIG_SMPTEBARS_FILTER 0 +#define CONFIG_SMPTEHDBARS_FILTER 0 +#define CONFIG_TESTSRC_FILTER 0 +#define CONFIG_TESTSRC2_FILTER 0 +#define CONFIG_YUVTESTSRC_FILTER 0 +#define CONFIG_NULLSINK_FILTER 0 +#define CONFIG_ABITSCOPE_FILTER 0 +#define CONFIG_ADRAWGRAPH_FILTER 0 +#define CONFIG_AHISTOGRAM_FILTER 0 +#define CONFIG_APHASEMETER_FILTER 0 +#define CONFIG_AVECTORSCOPE_FILTER 0 +#define CONFIG_CONCAT_FILTER 0 +#define CONFIG_SHOWCQT_FILTER 0 +#define CONFIG_SHOWFREQS_FILTER 0 +#define CONFIG_SHOWSPECTRUM_FILTER 0 +#define CONFIG_SHOWSPECTRUMPIC_FILTER 0 +#define CONFIG_SHOWVOLUME_FILTER 0 +#define CONFIG_SHOWWAVES_FILTER 0 +#define CONFIG_SHOWWAVESPIC_FILTER 0 +#define CONFIG_SPECTRUMSYNTH_FILTER 0 +#define CONFIG_AMOVIE_FILTER 0 +#define CONFIG_MOVIE_FILTER 0 +#define CONFIG_H263_VAAPI_HWACCEL 0 +#define CONFIG_H263_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_H264_CUVID_HWACCEL 0 +#define CONFIG_H264_D3D11VA_HWACCEL 0 +#define CONFIG_H264_DXVA2_HWACCEL 0 +#define CONFIG_H264_MEDIACODEC_HWACCEL 0 +#define CONFIG_H264_MMAL_HWACCEL 0 +#define CONFIG_H264_QSV_HWACCEL 0 +#define CONFIG_H264_VAAPI_HWACCEL 0 +#define CONFIG_H264_VDA_HWACCEL 0 +#define CONFIG_H264_VDA_OLD_HWACCEL 0 +#define CONFIG_H264_VDPAU_HWACCEL 0 +#define CONFIG_H264_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_HEVC_CUVID_HWACCEL 0 +#define CONFIG_HEVC_D3D11VA_HWACCEL 0 +#define CONFIG_HEVC_DXVA2_HWACCEL 0 +#define CONFIG_HEVC_MEDIACODEC_HWACCEL 0 +#define CONFIG_HEVC_QSV_HWACCEL 0 +#define CONFIG_HEVC_VAAPI_HWACCEL 0 +#define CONFIG_HEVC_VDPAU_HWACCEL 0 +#define CONFIG_MJPEG_CUVID_HWACCEL 0 +#define CONFIG_MPEG1_CUVID_HWACCEL 0 +#define CONFIG_MPEG1_XVMC_HWACCEL 0 +#define CONFIG_MPEG1_VDPAU_HWACCEL 0 +#define CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_MPEG2_CUVID_HWACCEL 0 +#define CONFIG_MPEG2_XVMC_HWACCEL 0 +#define CONFIG_MPEG2_D3D11VA_HWACCEL 0 +#define CONFIG_MPEG2_DXVA2_HWACCEL 0 +#define CONFIG_MPEG2_MMAL_HWACCEL 0 +#define CONFIG_MPEG2_QSV_HWACCEL 0 +#define CONFIG_MPEG2_VAAPI_HWACCEL 0 +#define CONFIG_MPEG2_VDPAU_HWACCEL 0 +#define CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_MPEG4_CUVID_HWACCEL 0 +#define CONFIG_MPEG4_MEDIACODEC_HWACCEL 0 +#define CONFIG_MPEG4_MMAL_HWACCEL 0 +#define CONFIG_MPEG4_VAAPI_HWACCEL 0 +#define CONFIG_MPEG4_VDPAU_HWACCEL 0 +#define CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL 0 +#define CONFIG_VC1_CUVID_HWACCEL 0 +#define CONFIG_VC1_D3D11VA_HWACCEL 0 +#define CONFIG_VC1_DXVA2_HWACCEL 0 +#define CONFIG_VC1_VAAPI_HWACCEL 0 +#define CONFIG_VC1_VDPAU_HWACCEL 0 +#define CONFIG_VC1_MMAL_HWACCEL 0 +#define CONFIG_VC1_QSV_HWACCEL 0 +#define CONFIG_VP8_CUVID_HWACCEL 0 +#define CONFIG_VP8_MEDIACODEC_HWACCEL 0 +#define CONFIG_VP8_QSV_HWACCEL 0 +#define CONFIG_VP9_CUVID_HWACCEL 0 +#define CONFIG_VP9_D3D11VA_HWACCEL 0 +#define CONFIG_VP9_DXVA2_HWACCEL 0 +#define CONFIG_VP9_MEDIACODEC_HWACCEL 0 +#define CONFIG_VP9_VAAPI_HWACCEL 0 +#define CONFIG_WMV3_D3D11VA_HWACCEL 0 +#define CONFIG_WMV3_DXVA2_HWACCEL 0 +#define CONFIG_WMV3_VAAPI_HWACCEL 0 +#define CONFIG_WMV3_VDPAU_HWACCEL 0 +#define CONFIG_ALSA_INDEV 0 +#define CONFIG_AVFOUNDATION_INDEV 0 +#define CONFIG_BKTR_INDEV 0 +#define CONFIG_DECKLINK_INDEV 0 +#define CONFIG_DSHOW_INDEV 0 +#define CONFIG_DV1394_INDEV 0 +#define CONFIG_FBDEV_INDEV 0 +#define CONFIG_GDIGRAB_INDEV 0 +#define CONFIG_IEC61883_INDEV 0 +#define CONFIG_JACK_INDEV 0 +#define CONFIG_LAVFI_INDEV 0 +#define CONFIG_OPENAL_INDEV 0 +#define CONFIG_OSS_INDEV 0 +#define CONFIG_PULSE_INDEV 0 +#define CONFIG_QTKIT_INDEV 0 +#define CONFIG_SNDIO_INDEV 0 +#define CONFIG_V4L2_INDEV 0 +#define CONFIG_VFWCAP_INDEV 0 +#define CONFIG_XCBGRAB_INDEV 0 +#define CONFIG_LIBCDIO_INDEV 0 +#define CONFIG_LIBDC1394_INDEV 0 +#define CONFIG_A64_MUXER 0 +#define CONFIG_AC3_MUXER 0 +#define CONFIG_ADTS_MUXER 0 +#define CONFIG_ADX_MUXER 0 +#define CONFIG_AIFF_MUXER 0 +#define CONFIG_AMR_MUXER 0 +#define CONFIG_APNG_MUXER 0 +#define CONFIG_ASF_MUXER 0 +#define CONFIG_ASS_MUXER 0 +#define CONFIG_AST_MUXER 0 +#define CONFIG_ASF_STREAM_MUXER 0 +#define CONFIG_AU_MUXER 0 +#define CONFIG_AVI_MUXER 0 +#define CONFIG_AVM2_MUXER 0 +#define CONFIG_BIT_MUXER 0 +#define CONFIG_CAF_MUXER 0 +#define CONFIG_CAVSVIDEO_MUXER 0 +#define CONFIG_CRC_MUXER 0 +#define CONFIG_DASH_MUXER 0 +#define CONFIG_DATA_MUXER 0 +#define CONFIG_DAUD_MUXER 0 +#define CONFIG_DIRAC_MUXER 0 +#define CONFIG_DNXHD_MUXER 0 +#define CONFIG_DTS_MUXER 0 +#define CONFIG_DV_MUXER 0 +#define CONFIG_EAC3_MUXER 0 +#define CONFIG_F4V_MUXER 0 +#define CONFIG_FFM_MUXER 0 +#define CONFIG_FFMETADATA_MUXER 0 +#define CONFIG_FIFO_MUXER 0 +#define CONFIG_FILMSTRIP_MUXER 0 +#define CONFIG_FLAC_MUXER 0 +#define CONFIG_FLV_MUXER 0 +#define CONFIG_FRAMECRC_MUXER 0 +#define CONFIG_FRAMEHASH_MUXER 0 +#define CONFIG_FRAMEMD5_MUXER 0 +#define CONFIG_G722_MUXER 0 +#define CONFIG_G723_1_MUXER 0 +#define CONFIG_GIF_MUXER 0 +#define CONFIG_GSM_MUXER 0 +#define CONFIG_GXF_MUXER 0 +#define CONFIG_H261_MUXER 0 +#define CONFIG_H263_MUXER 0 +#define CONFIG_H264_MUXER 0 +#define CONFIG_HASH_MUXER 0 +#define CONFIG_HDS_MUXER 0 +#define CONFIG_HEVC_MUXER 0 +#define CONFIG_HLS_MUXER 0 +#define CONFIG_ICO_MUXER 0 +#define CONFIG_ILBC_MUXER 0 +#define CONFIG_IMAGE2_MUXER 0 +#define CONFIG_IMAGE2PIPE_MUXER 0 +#define CONFIG_IPOD_MUXER 0 +#define CONFIG_IRCAM_MUXER 0 +#define CONFIG_ISMV_MUXER 0 +#define CONFIG_IVF_MUXER 0 +#define CONFIG_JACOSUB_MUXER 0 +#define CONFIG_LATM_MUXER 0 +#define CONFIG_LRC_MUXER 0 +#define CONFIG_M4V_MUXER 0 +#define CONFIG_MD5_MUXER 0 +#define CONFIG_MATROSKA_MUXER 0 +#define CONFIG_MATROSKA_AUDIO_MUXER 0 +#define CONFIG_MICRODVD_MUXER 0 +#define CONFIG_MJPEG_MUXER 0 +#define CONFIG_MLP_MUXER 0 +#define CONFIG_MMF_MUXER 0 +#define CONFIG_MOV_MUXER 1 +#define CONFIG_MP2_MUXER 0 +#define CONFIG_MP3_MUXER 0 +#define CONFIG_MP4_MUXER 1 +#define CONFIG_MPEG1SYSTEM_MUXER 0 +#define CONFIG_MPEG1VCD_MUXER 0 +#define CONFIG_MPEG1VIDEO_MUXER 0 +#define CONFIG_MPEG2DVD_MUXER 0 +#define CONFIG_MPEG2SVCD_MUXER 0 +#define CONFIG_MPEG2VIDEO_MUXER 0 +#define CONFIG_MPEG2VOB_MUXER 0 +#define CONFIG_MPEGTS_MUXER 0 +#define CONFIG_MPJPEG_MUXER 0 +#define CONFIG_MXF_MUXER 0 +#define CONFIG_MXF_D10_MUXER 0 +#define CONFIG_MXF_OPATOM_MUXER 0 +#define CONFIG_NULL_MUXER 0 +#define CONFIG_NUT_MUXER 0 +#define CONFIG_OGA_MUXER 0 +#define CONFIG_OGG_MUXER 0 +#define CONFIG_OGV_MUXER 0 +#define CONFIG_OMA_MUXER 0 +#define CONFIG_OPUS_MUXER 0 +#define CONFIG_PCM_ALAW_MUXER 0 +#define CONFIG_PCM_MULAW_MUXER 0 +#define CONFIG_PCM_F64BE_MUXER 0 +#define CONFIG_PCM_F64LE_MUXER 0 +#define CONFIG_PCM_F32BE_MUXER 0 +#define CONFIG_PCM_F32LE_MUXER 0 +#define CONFIG_PCM_S32BE_MUXER 0 +#define CONFIG_PCM_S32LE_MUXER 0 +#define CONFIG_PCM_S24BE_MUXER 0 +#define CONFIG_PCM_S24LE_MUXER 0 +#define CONFIG_PCM_S16BE_MUXER 0 +#define CONFIG_PCM_S16LE_MUXER 0 +#define CONFIG_PCM_S8_MUXER 0 +#define CONFIG_PCM_U32BE_MUXER 0 +#define CONFIG_PCM_U32LE_MUXER 0 +#define CONFIG_PCM_U24BE_MUXER 0 +#define CONFIG_PCM_U24LE_MUXER 0 +#define CONFIG_PCM_U16BE_MUXER 0 +#define CONFIG_PCM_U16LE_MUXER 0 +#define CONFIG_PCM_U8_MUXER 0 +#define CONFIG_PSP_MUXER 0 +#define CONFIG_RAWVIDEO_MUXER 0 +#define CONFIG_RM_MUXER 0 +#define CONFIG_ROQ_MUXER 0 +#define CONFIG_RSO_MUXER 0 +#define CONFIG_RTP_MUXER 0 +#define CONFIG_RTP_MPEGTS_MUXER 0 +#define CONFIG_RTSP_MUXER 0 +#define CONFIG_SAP_MUXER 0 +#define CONFIG_SCC_MUXER 0 +#define CONFIG_SEGMENT_MUXER 0 +#define CONFIG_STREAM_SEGMENT_MUXER 0 +#define CONFIG_SINGLEJPEG_MUXER 0 +#define CONFIG_SMJPEG_MUXER 0 +#define CONFIG_SMOOTHSTREAMING_MUXER 0 +#define CONFIG_SOX_MUXER 0 +#define CONFIG_SPX_MUXER 0 +#define CONFIG_SPDIF_MUXER 0 +#define CONFIG_SRT_MUXER 0 +#define CONFIG_SWF_MUXER 0 +#define CONFIG_TEE_MUXER 0 +#define CONFIG_TG2_MUXER 0 +#define CONFIG_TGP_MUXER 0 +#define CONFIG_MKVTIMESTAMP_V2_MUXER 0 +#define CONFIG_TRUEHD_MUXER 0 +#define CONFIG_TTA_MUXER 0 +#define CONFIG_UNCODEDFRAMECRC_MUXER 0 +#define CONFIG_VC1_MUXER 0 +#define CONFIG_VC1T_MUXER 0 +#define CONFIG_VOC_MUXER 0 +#define CONFIG_W64_MUXER 0 +#define CONFIG_WAV_MUXER 0 +#define CONFIG_WEBM_MUXER 0 +#define CONFIG_WEBM_DASH_MANIFEST_MUXER 0 +#define CONFIG_WEBM_CHUNK_MUXER 0 +#define CONFIG_WEBP_MUXER 0 +#define CONFIG_WEBVTT_MUXER 0 +#define CONFIG_WTV_MUXER 0 +#define CONFIG_WV_MUXER 0 +#define CONFIG_YUV4MPEGPIPE_MUXER 0 +#define CONFIG_CHROMAPRINT_MUXER 0 +#define CONFIG_LIBNUT_MUXER 0 +#define CONFIG_ALSA_OUTDEV 0 +#define CONFIG_CACA_OUTDEV 0 +#define CONFIG_DECKLINK_OUTDEV 0 +#define CONFIG_FBDEV_OUTDEV 0 +#define CONFIG_OPENGL_OUTDEV 0 +#define CONFIG_OSS_OUTDEV 0 +#define CONFIG_PULSE_OUTDEV 0 +#define CONFIG_SDL2_OUTDEV 0 +#define CONFIG_SNDIO_OUTDEV 0 +#define CONFIG_V4L2_OUTDEV 0 +#define CONFIG_XV_OUTDEV 0 +#define CONFIG_AAC_PARSER 1 +#define CONFIG_AAC_LATM_PARSER 1 +#define CONFIG_AC3_PARSER 0 +#define CONFIG_ADX_PARSER 0 +#define CONFIG_BMP_PARSER 0 +#define CONFIG_CAVSVIDEO_PARSER 0 +#define CONFIG_COOK_PARSER 0 +#define CONFIG_DCA_PARSER 0 +#define CONFIG_DIRAC_PARSER 0 +#define CONFIG_DNXHD_PARSER 0 +#define CONFIG_DPX_PARSER 0 +#define CONFIG_DVAUDIO_PARSER 0 +#define CONFIG_DVBSUB_PARSER 0 +#define CONFIG_DVDSUB_PARSER 0 +#define CONFIG_DVD_NAV_PARSER 0 +#define CONFIG_FLAC_PARSER 1 +#define CONFIG_G729_PARSER 0 +#define CONFIG_GSM_PARSER 0 +#define CONFIG_H261_PARSER 0 +#define CONFIG_H263_PARSER 1 +#define CONFIG_H264_PARSER 1 +#define CONFIG_HEVC_PARSER 1 +#define CONFIG_MJPEG_PARSER 0 +#define CONFIG_MLP_PARSER 0 +#define CONFIG_MPEG4VIDEO_PARSER 1 +#define CONFIG_MPEGAUDIO_PARSER 1 +#define CONFIG_MPEGVIDEO_PARSER 0 +#define CONFIG_OPUS_PARSER 0 +#define CONFIG_PNG_PARSER 0 +#define CONFIG_PNM_PARSER 0 +#define CONFIG_RV30_PARSER 0 +#define CONFIG_RV40_PARSER 0 +#define CONFIG_SIPR_PARSER 0 +#define CONFIG_TAK_PARSER 0 +#define CONFIG_VC1_PARSER 0 +#define CONFIG_VORBIS_PARSER 0 +#define CONFIG_VP3_PARSER 0 +#define CONFIG_VP8_PARSER 0 +#define CONFIG_VP9_PARSER 0 +#define CONFIG_XMA_PARSER 0 +#define CONFIG_ASYNC_PROTOCOL 1 +#define CONFIG_BLURAY_PROTOCOL 0 +#define CONFIG_CACHE_PROTOCOL 1 +#define CONFIG_CONCAT_PROTOCOL 0 +#define CONFIG_CRYPTO_PROTOCOL 1 +#define CONFIG_DATA_PROTOCOL 1 +#define CONFIG_FFRTMPCRYPT_PROTOCOL 0 +#define CONFIG_FFRTMPHTTP_PROTOCOL 1 +#define CONFIG_FILE_PROTOCOL 1 +#define CONFIG_FTP_PROTOCOL 1 +#define CONFIG_GOPHER_PROTOCOL 0 +#define CONFIG_HLS_PROTOCOL 1 +#define CONFIG_HTTP_PROTOCOL 1 +#define CONFIG_HTTPPROXY_PROTOCOL 1 +#define CONFIG_HTTPS_PROTOCOL 1 +#define CONFIG_ICECAST_PROTOCOL 0 +#define CONFIG_IJKHTTPHOOK_PROTOCOL 1 +#define CONFIG_IJKHLSCACHE_PROTOCOL 1 +#define CONFIG_IJKLONGURL_PROTOCOL 1 +#define CONFIG_IJKMEDIADATASOURCE_PROTOCOL 1 +#define CONFIG_IJKSEGMENT_PROTOCOL 1 +#define CONFIG_IJKTCPHOOK_PROTOCOL 1 +#define CONFIG_IJKIO_PROTOCOL 1 +#define CONFIG_MMSH_PROTOCOL 0 +#define CONFIG_MMST_PROTOCOL 0 +#define CONFIG_MD5_PROTOCOL 0 +#define CONFIG_PIPE_PROTOCOL 1 +#define CONFIG_PROMPEG_PROTOCOL 1 +#define CONFIG_RTMP_PROTOCOL 1 +#define CONFIG_RTMPE_PROTOCOL 0 +#define CONFIG_RTMPS_PROTOCOL 0 +#define CONFIG_RTMPT_PROTOCOL 1 +#define CONFIG_RTMPTE_PROTOCOL 0 +#define CONFIG_RTMPTS_PROTOCOL 0 +#define CONFIG_RTP_PROTOCOL 0 +#define CONFIG_SCTP_PROTOCOL 0 +#define CONFIG_SRTP_PROTOCOL 0 +#define CONFIG_SUBFILE_PROTOCOL 0 +#define CONFIG_TEE_PROTOCOL 1 +#define CONFIG_TCP_PROTOCOL 1 +#define CONFIG_TLS_GNUTLS_PROTOCOL 0 +#define CONFIG_TLS_SCHANNEL_PROTOCOL 0 +#define CONFIG_TLS_SECURETRANSPORT_PROTOCOL 0 +#define CONFIG_TLS_OPENSSL_PROTOCOL 1 +#define CONFIG_UDP_PROTOCOL 1 +#define CONFIG_UDPLITE_PROTOCOL 1 +#define CONFIG_UNIX_PROTOCOL 0 +#define CONFIG_LIBRTMP_PROTOCOL 0 +#define CONFIG_LIBRTMPE_PROTOCOL 0 +#define CONFIG_LIBRTMPS_PROTOCOL 0 +#define CONFIG_LIBRTMPT_PROTOCOL 0 +#define CONFIG_LIBRTMPTE_PROTOCOL 0 +#define CONFIG_LIBSSH_PROTOCOL 0 +#define CONFIG_LIBSMBCLIENT_PROTOCOL 0 +#endif /* FFMPEG_CONFIG_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libswresample/swresample.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libswresample/swresample.h new file mode 100644 index 0000000..a8db5c2 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libswresample/swresample.h @@ -0,0 +1,583 @@ +/* + * Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at) + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWRESAMPLE_SWRESAMPLE_H +#define SWRESAMPLE_SWRESAMPLE_H + +/** + * @file + * @ingroup lswr + * libswresample public header + */ + +/** + * @defgroup lswr libswresample + * @{ + * + * Audio resampling, sample format conversion and mixing library. + * + * Interaction with lswr is done through SwrContext, which is + * allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters + * must be set with the @ref avoptions API. + * + * The first thing you will need to do in order to use lswr is to allocate + * SwrContext. This can be done with swr_alloc() or swr_alloc_set_opts(). If you + * are using the former, you must set options through the @ref avoptions API. + * The latter function provides the same feature, but it allows you to set some + * common options in the same statement. + * + * For example the following code will setup conversion from planar float sample + * format to interleaved signed 16-bit integer, downsampling from 48kHz to + * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing + * matrix). This is using the swr_alloc() function. + * @code + * SwrContext *swr = swr_alloc(); + * av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0); + * av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); + * av_opt_set_int(swr, "in_sample_rate", 48000, 0); + * av_opt_set_int(swr, "out_sample_rate", 44100, 0); + * av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0); + * av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + * @endcode + * + * The same job can be done using swr_alloc_set_opts() as well: + * @code + * SwrContext *swr = swr_alloc_set_opts(NULL, // we're allocating a new context + * AV_CH_LAYOUT_STEREO, // out_ch_layout + * AV_SAMPLE_FMT_S16, // out_sample_fmt + * 44100, // out_sample_rate + * AV_CH_LAYOUT_5POINT1, // in_ch_layout + * AV_SAMPLE_FMT_FLTP, // in_sample_fmt + * 48000, // in_sample_rate + * 0, // log_offset + * NULL); // log_ctx + * @endcode + * + * Once all values have been set, it must be initialized with swr_init(). If + * you need to change the conversion parameters, you can change the parameters + * using @ref AVOptions, as described above in the first example; or by using + * swr_alloc_set_opts(), but with the first argument the allocated context. + * You must then call swr_init() again. + * + * The conversion itself is done by repeatedly calling swr_convert(). + * Note that the samples may get buffered in swr if you provide insufficient + * output space or if sample rate conversion is done, which requires "future" + * samples. Samples that do not require future input can be retrieved at any + * time by using swr_convert() (in_count can be set to 0). + * At the end of conversion the resampling buffer can be flushed by calling + * swr_convert() with NULL in and 0 in_count. + * + * The samples used in the conversion process can be managed with the libavutil + * @ref lavu_sampmanip "samples manipulation" API, including av_samples_alloc() + * function used in the following example. + * + * The delay between input and output, can at any time be found by using + * swr_get_delay(). + * + * The following code demonstrates the conversion loop assuming the parameters + * from above and caller-defined functions get_input() and handle_output(): + * @code + * uint8_t **input; + * int in_samples; + * + * while (get_input(&input, &in_samples)) { + * uint8_t *output; + * int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) + + * in_samples, 44100, 48000, AV_ROUND_UP); + * av_samples_alloc(&output, NULL, 2, out_samples, + * AV_SAMPLE_FMT_S16, 0); + * out_samples = swr_convert(swr, &output, out_samples, + * input, in_samples); + * handle_output(output, out_samples); + * av_freep(&output); + * } + * @endcode + * + * When the conversion is finished, the conversion + * context and everything associated with it must be freed with swr_free(). + * A swr_close() function is also available, but it exists mainly for + * compatibility with libavresample, and is not required to be called. + * + * There will be no memory leak if the data is not completely flushed before + * swr_free(). + */ + +#include +#include "libavutil/channel_layout.h" +#include "libavutil/frame.h" +#include "libavutil/samplefmt.h" + +#include "libswresample/version.h" + +#if LIBSWRESAMPLE_VERSION_MAJOR < 1 +#define SWR_CH_MAX 32 ///< Maximum number of channels +#endif + +/** + * @name Option constants + * These constants are used for the @ref avoptions interface for lswr. + * @{ + * + */ + +#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate +//TODO use int resample ? +//long term TODO can we enable this dynamically? + +/** Dithering algorithms */ +enum SwrDitherType { + SWR_DITHER_NONE = 0, + SWR_DITHER_RECTANGULAR, + SWR_DITHER_TRIANGULAR, + SWR_DITHER_TRIANGULAR_HIGHPASS, + + SWR_DITHER_NS = 64, ///< not part of API/ABI + SWR_DITHER_NS_LIPSHITZ, + SWR_DITHER_NS_F_WEIGHTED, + SWR_DITHER_NS_MODIFIED_E_WEIGHTED, + SWR_DITHER_NS_IMPROVED_E_WEIGHTED, + SWR_DITHER_NS_SHIBATA, + SWR_DITHER_NS_LOW_SHIBATA, + SWR_DITHER_NS_HIGH_SHIBATA, + SWR_DITHER_NB, ///< not part of API/ABI +}; + +/** Resampling Engines */ +enum SwrEngine { + SWR_ENGINE_SWR, /**< SW Resampler */ + SWR_ENGINE_SOXR, /**< SoX Resampler */ + SWR_ENGINE_NB, ///< not part of API/ABI +}; + +/** Resampling Filter Types */ +enum SwrFilterType { + SWR_FILTER_TYPE_CUBIC, /**< Cubic */ + SWR_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall windowed sinc */ + SWR_FILTER_TYPE_KAISER, /**< Kaiser windowed sinc */ +}; + +/** + * @} + */ + +/** + * The libswresample context. Unlike libavcodec and libavformat, this structure + * is opaque. This means that if you would like to set options, you must use + * the @ref avoptions API and cannot directly set values to members of the + * structure. + */ +typedef struct SwrContext SwrContext; + +/** + * Get the AVClass for SwrContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + * @return the AVClass of SwrContext + */ +const AVClass *swr_get_class(void); + +/** + * @name SwrContext constructor functions + * @{ + */ + +/** + * Allocate SwrContext. + * + * If you use this function you will need to set the parameters (manually or + * with swr_alloc_set_opts()) before calling swr_init(). + * + * @see swr_alloc_set_opts(), swr_init(), swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *swr_alloc(void); + +/** + * Initialize context after user parameters have been set. + * @note The context must be configured using the AVOption API. + * + * @see av_opt_set_int() + * @see av_opt_set_dict() + * + * @param[in,out] s Swr context to initialize + * @return AVERROR error code in case of failure. + */ +int swr_init(struct SwrContext *s); + +/** + * Check whether an swr context has been initialized or not. + * + * @param[in] s Swr context to check + * @see swr_init() + * @return positive if it has been initialized, 0 if not initialized + */ +int swr_is_initialized(struct SwrContext *s); + +/** + * Allocate SwrContext if needed and set/reset common parameters. + * + * This function does not require s to be allocated with swr_alloc(). On the + * other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters + * on the allocated context. + * + * @param s existing Swr context if available, or NULL if not + * @param out_ch_layout output channel layout (AV_CH_LAYOUT_*) + * @param out_sample_fmt output sample format (AV_SAMPLE_FMT_*). + * @param out_sample_rate output sample rate (frequency in Hz) + * @param in_ch_layout input channel layout (AV_CH_LAYOUT_*) + * @param in_sample_fmt input sample format (AV_SAMPLE_FMT_*). + * @param in_sample_rate input sample rate (frequency in Hz) + * @param log_offset logging level offset + * @param log_ctx parent logging context, can be NULL + * + * @see swr_init(), swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *swr_alloc_set_opts(struct SwrContext *s, + int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, + int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, + int log_offset, void *log_ctx); + +/** + * @} + * + * @name SwrContext destructor functions + * @{ + */ + +/** + * Free the given SwrContext and set the pointer to NULL. + * + * @param[in] s a pointer to a pointer to Swr context + */ +void swr_free(struct SwrContext **s); + +/** + * Closes the context so that swr_is_initialized() returns 0. + * + * The context can be brought back to life by running swr_init(), + * swr_init() can also be used without swr_close(). + * This function is mainly provided for simplifying the usecase + * where one tries to support libavresample and libswresample. + * + * @param[in,out] s Swr context to be closed + */ +void swr_close(struct SwrContext *s); + +/** + * @} + * + * @name Core conversion functions + * @{ + */ + +/** Convert audio. + * + * in and in_count can be set to 0 to flush the last few samples out at the + * end. + * + * If more input is provided than output space, then the input will be buffered. + * You can avoid this buffering by using swr_get_out_samples() to retrieve an + * upper bound on the required number of output samples for the given number of + * input samples. Conversion will run directly without copying whenever possible. + * + * @param s allocated Swr context, with parameters set + * @param out output buffers, only the first one need be set in case of packed audio + * @param out_count amount of space available for output in samples per channel + * @param in input buffers, only the first one need to be set in case of packed audio + * @param in_count number of input samples available in one channel + * + * @return number of samples output per channel, negative value on error + */ +int swr_convert(struct SwrContext *s, uint8_t **out, int out_count, + const uint8_t **in , int in_count); + +/** + * Convert the next timestamp from input to output + * timestamps are in 1/(in_sample_rate * out_sample_rate) units. + * + * @note There are 2 slightly differently behaving modes. + * @li When automatic timestamp compensation is not used, (min_compensation >= FLT_MAX) + * in this case timestamps will be passed through with delays compensated + * @li When automatic timestamp compensation is used, (min_compensation < FLT_MAX) + * in this case the output timestamps will match output sample numbers. + * See ffmpeg-resampler(1) for the two modes of compensation. + * + * @param s[in] initialized Swr context + * @param pts[in] timestamp for the next input sample, INT64_MIN if unknown + * @see swr_set_compensation(), swr_drop_output(), and swr_inject_silence() are + * function used internally for timestamp compensation. + * @return the output timestamp for the next output sample + */ +int64_t swr_next_pts(struct SwrContext *s, int64_t pts); + +/** + * @} + * + * @name Low-level option setting functions + * These functons provide a means to set low-level options that is not possible + * with the AVOption API. + * @{ + */ + +/** + * Activate resampling compensation ("soft" compensation). This function is + * internally called when needed in swr_next_pts(). + * + * @param[in,out] s allocated Swr context. If it is not initialized, + * or SWR_FLAG_RESAMPLE is not set, swr_init() is + * called with the flag set. + * @param[in] sample_delta delta in PTS per sample + * @param[in] compensation_distance number of samples to compensate for + * @return >= 0 on success, AVERROR error codes if: + * @li @c s is NULL, + * @li @c compensation_distance is less than 0, + * @li @c compensation_distance is 0 but sample_delta is not, + * @li compensation unsupported by resampler, or + * @li swr_init() fails when called. + */ +int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance); + +/** + * Set a customized input channel mapping. + * + * @param[in,out] s allocated Swr context, not yet initialized + * @param[in] channel_map customized input channel mapping (array of channel + * indexes, -1 for a muted channel) + * @return >= 0 on success, or AVERROR error code in case of failure. + */ +int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map); + +/** + * Generate a channel mixing matrix. + * + * This function is the one used internally by libswresample for building the + * default mixing matrix. It is made public just as a utility function for + * building custom matrices. + * + * @param in_layout input channel layout + * @param out_layout output channel layout + * @param center_mix_level mix level for the center channel + * @param surround_mix_level mix level for the surround channel(s) + * @param lfe_mix_level mix level for the low-frequency effects channel + * @param rematrix_maxval if 1.0, coefficients will be normalized to prevent + * overflow. if INT_MAX, coefficients will not be + * normalized. + * @param[out] matrix mixing coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o. + * @param stride distance between adjacent input channels in the + * matrix array + * @param matrix_encoding matrixed stereo downmix mode (e.g. dplii) + * @param log_ctx parent logging context, can be NULL + * @return 0 on success, negative AVERROR code on failure + */ +int swr_build_matrix(uint64_t in_layout, uint64_t out_layout, + double center_mix_level, double surround_mix_level, + double lfe_mix_level, double rematrix_maxval, + double rematrix_volume, double *matrix, + int stride, enum AVMatrixEncoding matrix_encoding, + void *log_ctx); + +/** + * Set a customized remix matrix. + * + * @param s allocated Swr context, not yet initialized + * @param matrix remix coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o + * @param stride offset between lines of the matrix + * @return >= 0 on success, or AVERROR error code in case of failure. + */ +int swr_set_matrix(struct SwrContext *s, const double *matrix, int stride); + +/** + * @} + * + * @name Sample handling functions + * @{ + */ + +/** + * Drops the specified number of output samples. + * + * This function, along with swr_inject_silence(), is called by swr_next_pts() + * if needed for "hard" compensation. + * + * @param s allocated Swr context + * @param count number of samples to be dropped + * + * @return >= 0 on success, or a negative AVERROR code on failure + */ +int swr_drop_output(struct SwrContext *s, int count); + +/** + * Injects the specified number of silence samples. + * + * This function, along with swr_drop_output(), is called by swr_next_pts() + * if needed for "hard" compensation. + * + * @param s allocated Swr context + * @param count number of samples to be dropped + * + * @return >= 0 on success, or a negative AVERROR code on failure + */ +int swr_inject_silence(struct SwrContext *s, int count); + +/** + * Gets the delay the next input sample will experience relative to the next output sample. + * + * Swresample can buffer data if more input has been provided than available + * output space, also converting between sample rates needs a delay. + * This function returns the sum of all such delays. + * The exact delay is not necessarily an integer value in either input or + * output sample rate. Especially when downsampling by a large value, the + * output sample rate may be a poor choice to represent the delay, similarly + * for upsampling and the input sample rate. + * + * @param s swr context + * @param base timebase in which the returned delay will be: + * @li if it's set to 1 the returned delay is in seconds + * @li if it's set to 1000 the returned delay is in milliseconds + * @li if it's set to the input sample rate then the returned + * delay is in input samples + * @li if it's set to the output sample rate then the returned + * delay is in output samples + * @li if it's the least common multiple of in_sample_rate and + * out_sample_rate then an exact rounding-free delay will be + * returned + * @returns the delay in 1 / @c base units. + */ +int64_t swr_get_delay(struct SwrContext *s, int64_t base); + +/** + * Find an upper bound on the number of samples that the next swr_convert + * call will output, if called with in_samples of input samples. This + * depends on the internal state, and anything changing the internal state + * (like further swr_convert() calls) will may change the number of samples + * swr_get_out_samples() returns for the same number of input samples. + * + * @param in_samples number of input samples. + * @note any call to swr_inject_silence(), swr_convert(), swr_next_pts() + * or swr_set_compensation() invalidates this limit + * @note it is recommended to pass the correct available buffer size + * to all functions like swr_convert() even if swr_get_out_samples() + * indicates that less would be used. + * @returns an upper bound on the number of samples that the next swr_convert + * will output or a negative value to indicate an error + */ +int swr_get_out_samples(struct SwrContext *s, int in_samples); + +/** + * @} + * + * @name Configuration accessors + * @{ + */ + +/** + * Return the @ref LIBSWRESAMPLE_VERSION_INT constant. + * + * This is useful to check if the build-time libswresample has the same version + * as the run-time one. + * + * @returns the unsigned int-typed version + */ +unsigned swresample_version(void); + +/** + * Return the swr build-time configuration. + * + * @returns the build-time @c ./configure flags + */ +const char *swresample_configuration(void); + +/** + * Return the swr license. + * + * @returns the license of libswresample, determined at build-time + */ +const char *swresample_license(void); + +/** + * @} + * + * @name AVFrame based API + * @{ + */ + +/** + * Convert the samples in the input AVFrame and write them to the output AVFrame. + * + * Input and output AVFrames must have channel_layout, sample_rate and format set. + * + * If the output AVFrame does not have the data pointers allocated the nb_samples + * field will be set using av_frame_get_buffer() + * is called to allocate the frame. + * + * The output AVFrame can be NULL or have fewer allocated samples than required. + * In this case, any remaining samples not written to the output will be added + * to an internal FIFO buffer, to be returned at the next call to this function + * or to swr_convert(). + * + * If converting sample rate, there may be data remaining in the internal + * resampling delay buffer. swr_get_delay() tells the number of + * remaining samples. To get this data as output, call this function or + * swr_convert() with NULL input. + * + * If the SwrContext configuration does not match the output and + * input AVFrame settings the conversion does not take place and depending on + * which AVFrame is not matching AVERROR_OUTPUT_CHANGED, AVERROR_INPUT_CHANGED + * or the result of a bitwise-OR of them is returned. + * + * @see swr_delay() + * @see swr_convert() + * @see swr_get_delay() + * + * @param swr audio resample context + * @param output output AVFrame + * @param input input AVFrame + * @return 0 on success, AVERROR on failure or nonmatching + * configuration. + */ +int swr_convert_frame(SwrContext *swr, + AVFrame *output, const AVFrame *input); + +/** + * Configure or reconfigure the SwrContext using the information + * provided by the AVFrames. + * + * The original resampling context is reset even on failure. + * The function calls swr_close() internally if the context is open. + * + * @see swr_close(); + * + * @param swr audio resample context + * @param output output AVFrame + * @param input input AVFrame + * @return 0 on success, AVERROR on failure. + */ +int swr_config_frame(SwrContext *swr, const AVFrame *out, const AVFrame *in); + +/** + * @} + * @} + */ + +#endif /* SWRESAMPLE_SWRESAMPLE_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libswresample/version.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libswresample/version.h new file mode 100644 index 0000000..fb76f56 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libswresample/version.h @@ -0,0 +1,45 @@ +/* + * Version macros. + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWRESAMPLE_VERSION_H +#define SWRESAMPLE_VERSION_H + +/** + * @file + * Libswresample version macros + */ + +#include "libavutil/avutil.h" + +#define LIBSWRESAMPLE_VERSION_MAJOR 2 +#define LIBSWRESAMPLE_VERSION_MINOR 7 +#define LIBSWRESAMPLE_VERSION_MICRO 100 + +#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_VERSION AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_BUILD LIBSWRESAMPLE_VERSION_INT + +#define LIBSWRESAMPLE_IDENT "SwR" AV_STRINGIFY(LIBSWRESAMPLE_VERSION) + +#endif /* SWRESAMPLE_VERSION_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libswscale/swscale.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libswscale/swscale.h new file mode 100644 index 0000000..7713f51 --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libswscale/swscale.h @@ -0,0 +1,336 @@ +/* + * Copyright (C) 2001-2011 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_SWSCALE_H +#define SWSCALE_SWSCALE_H + +/** + * @file + * @ingroup libsws + * external API header + */ + +#include + +#include "libavutil/avutil.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" +#include "version.h" + +/** + * @defgroup libsws libswscale + * Color conversion and scaling library. + * + * @{ + * + * Return the LIBSWSCALE_VERSION_INT constant. + */ +unsigned swscale_version(void); + +/** + * Return the libswscale build-time configuration. + */ +const char *swscale_configuration(void); + +/** + * Return the libswscale license. + */ +const char *swscale_license(void); + +/* values for the flags, the stuff on the command line is different */ +#define SWS_FAST_BILINEAR 1 +#define SWS_BILINEAR 2 +#define SWS_BICUBIC 4 +#define SWS_X 8 +#define SWS_POINT 0x10 +#define SWS_AREA 0x20 +#define SWS_BICUBLIN 0x40 +#define SWS_GAUSS 0x80 +#define SWS_SINC 0x100 +#define SWS_LANCZOS 0x200 +#define SWS_SPLINE 0x400 + +#define SWS_SRC_V_CHR_DROP_MASK 0x30000 +#define SWS_SRC_V_CHR_DROP_SHIFT 16 + +#define SWS_PARAM_DEFAULT 123456 + +#define SWS_PRINT_INFO 0x1000 + +//the following 3 flags are not completely implemented +//internal chrominance subsampling info +#define SWS_FULL_CHR_H_INT 0x2000 +//input subsampling info +#define SWS_FULL_CHR_H_INP 0x4000 +#define SWS_DIRECT_BGR 0x8000 +#define SWS_ACCURATE_RND 0x40000 +#define SWS_BITEXACT 0x80000 +#define SWS_ERROR_DIFFUSION 0x800000 + +#define SWS_MAX_REDUCE_CUTOFF 0.002 + +#define SWS_CS_ITU709 1 +#define SWS_CS_FCC 4 +#define SWS_CS_ITU601 5 +#define SWS_CS_ITU624 5 +#define SWS_CS_SMPTE170M 5 +#define SWS_CS_SMPTE240M 7 +#define SWS_CS_DEFAULT 5 +#define SWS_CS_BT2020 9 + +/** + * Return a pointer to yuv<->rgb coefficients for the given colorspace + * suitable for sws_setColorspaceDetails(). + * + * @param colorspace One of the SWS_CS_* macros. If invalid, + * SWS_CS_DEFAULT is used. + */ +const int *sws_getCoefficients(int colorspace); + +// when used for filters they must have an odd number of elements +// coeffs cannot be shared between vectors +typedef struct SwsVector { + double *coeff; ///< pointer to the list of coefficients + int length; ///< number of coefficients in the vector +} SwsVector; + +// vectors can be shared +typedef struct SwsFilter { + SwsVector *lumH; + SwsVector *lumV; + SwsVector *chrH; + SwsVector *chrV; +} SwsFilter; + +struct SwsContext; + +/** + * Return a positive value if pix_fmt is a supported input format, 0 + * otherwise. + */ +int sws_isSupportedInput(enum AVPixelFormat pix_fmt); + +/** + * Return a positive value if pix_fmt is a supported output format, 0 + * otherwise. + */ +int sws_isSupportedOutput(enum AVPixelFormat pix_fmt); + +/** + * @param[in] pix_fmt the pixel format + * @return a positive value if an endianness conversion for pix_fmt is + * supported, 0 otherwise. + */ +int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt); + +/** + * Allocate an empty SwsContext. This must be filled and passed to + * sws_init_context(). For filling see AVOptions, options.c and + * sws_setColorspaceDetails(). + */ +struct SwsContext *sws_alloc_context(void); + +/** + * Initialize the swscaler context sws_context. + * + * @return zero or positive value on success, a negative value on + * error + */ +av_warn_unused_result +int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter); + +/** + * Free the swscaler context swsContext. + * If swsContext is NULL, then does nothing. + */ +void sws_freeContext(struct SwsContext *swsContext); + +/** + * Allocate and return an SwsContext. You need it to perform + * scaling/conversion operations using sws_scale(). + * + * @param srcW the width of the source image + * @param srcH the height of the source image + * @param srcFormat the source image format + * @param dstW the width of the destination image + * @param dstH the height of the destination image + * @param dstFormat the destination image format + * @param flags specify which algorithm and options to use for rescaling + * @param param extra parameters to tune the used scaler + * For SWS_BICUBIC param[0] and [1] tune the shape of the basis + * function, param[0] tunes f(1) and param[1] f´(1) + * For SWS_GAUSS param[0] tunes the exponent and thus cutoff + * frequency + * For SWS_LANCZOS param[0] tunes the width of the window function + * @return a pointer to an allocated context, or NULL in case of error + * @note this function is to be removed after a saner alternative is + * written + */ +struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); + +/** + * Scale the image slice in srcSlice and put the resulting scaled + * slice in the image in dst. A slice is a sequence of consecutive + * rows in an image. + * + * Slices have to be provided in sequential order, either in + * top-bottom or bottom-top order. If slices are provided in + * non-sequential order the behavior of the function is undefined. + * + * @param c the scaling context previously created with + * sws_getContext() + * @param srcSlice the array containing the pointers to the planes of + * the source slice + * @param srcStride the array containing the strides for each plane of + * the source image + * @param srcSliceY the position in the source image of the slice to + * process, that is the number (counted starting from + * zero) in the image of the first row of the slice + * @param srcSliceH the height of the source slice, that is the number + * of rows in the slice + * @param dst the array containing the pointers to the planes of + * the destination image + * @param dstStride the array containing the strides for each plane of + * the destination image + * @return the height of the output slice + */ +int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], + const int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *const dst[], const int dstStride[]); + +/** + * @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg) + * @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg) + * @param table the yuv2rgb coefficients describing the output yuv space, normally ff_yuv2rgb_coeffs[x] + * @param inv_table the yuv2rgb coefficients describing the input yuv space, normally ff_yuv2rgb_coeffs[x] + * @param brightness 16.16 fixed point brightness correction + * @param contrast 16.16 fixed point contrast correction + * @param saturation 16.16 fixed point saturation correction + * @return -1 if not supported + */ +int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], + int srcRange, const int table[4], int dstRange, + int brightness, int contrast, int saturation); + +/** + * @return -1 if not supported + */ +int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, + int *srcRange, int **table, int *dstRange, + int *brightness, int *contrast, int *saturation); + +/** + * Allocate and return an uninitialized vector with length coefficients. + */ +SwsVector *sws_allocVec(int length); + +/** + * Return a normalized Gaussian curve used to filter stuff + * quality = 3 is high quality, lower is lower quality. + */ +SwsVector *sws_getGaussianVec(double variance, double quality); + +/** + * Scale all the coefficients of a by the scalar value. + */ +void sws_scaleVec(SwsVector *a, double scalar); + +/** + * Scale all the coefficients of a so that their sum equals height. + */ +void sws_normalizeVec(SwsVector *a, double height); + +#if FF_API_SWS_VECTOR +attribute_deprecated SwsVector *sws_getConstVec(double c, int length); +attribute_deprecated SwsVector *sws_getIdentityVec(void); +attribute_deprecated void sws_convVec(SwsVector *a, SwsVector *b); +attribute_deprecated void sws_addVec(SwsVector *a, SwsVector *b); +attribute_deprecated void sws_subVec(SwsVector *a, SwsVector *b); +attribute_deprecated void sws_shiftVec(SwsVector *a, int shift); +attribute_deprecated SwsVector *sws_cloneVec(SwsVector *a); +attribute_deprecated void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level); +#endif + +void sws_freeVec(SwsVector *a); + +SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, + float lumaSharpen, float chromaSharpen, + float chromaHShift, float chromaVShift, + int verbose); +void sws_freeFilter(SwsFilter *filter); + +/** + * Check if context can be reused, otherwise reallocate a new one. + * + * If context is NULL, just calls sws_getContext() to get a new + * context. Otherwise, checks if the parameters are the ones already + * saved in context. If that is the case, returns the current + * context. Otherwise, frees context and gets a new context with + * the new parameters. + * + * Be warned that srcFilter and dstFilter are not checked, they + * are assumed to remain the same. + */ +struct SwsContext *sws_getCachedContext(struct SwsContext *context, + int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); + +/** + * Convert an 8-bit paletted frame into a frame with a color depth of 32 bits. + * + * The output frame will have the same packed format as the palette. + * + * @param src source frame buffer + * @param dst destination frame buffer + * @param num_pixels number of pixels to convert + * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src + */ +void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); + +/** + * Convert an 8-bit paletted frame into a frame with a color depth of 24 bits. + * + * With the palette format "ABCD", the destination frame ends up with the format "ABC". + * + * @param src source frame buffer + * @param dst destination frame buffer + * @param num_pixels number of pixels to convert + * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src + */ +void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); + +/** + * Get the AVClass for swsContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *sws_get_class(void); + +/** + * @} + */ + +#endif /* SWSCALE_SWSCALE_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libswscale/version.h b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libswscale/version.h new file mode 100644 index 0000000..c1090ca --- /dev/null +++ b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Headers/libswscale/version.h @@ -0,0 +1,53 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_VERSION_H +#define SWSCALE_VERSION_H + +/** + * @file + * swscale version macros + */ + +#include "libavutil/version.h" + +#define LIBSWSCALE_VERSION_MAJOR 4 +#define LIBSWSCALE_VERSION_MINOR 6 +#define LIBSWSCALE_VERSION_MICRO 100 + +#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_VERSION AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_BUILD LIBSWSCALE_VERSION_INT + +#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_SWS_VECTOR +#define FF_API_SWS_VECTOR (LIBSWSCALE_VERSION_MAJOR < 6) +#endif + +#endif /* SWSCALE_VERSION_H */ diff --git a/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Info.plist b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Info.plist new file mode 100644 index 0000000..5c31bd2 Binary files /dev/null and b/src/ios/FFmpeg.xcframework/ios-arm64_armv7/FFmpeg.framework/Info.plist differ diff --git a/src/ios/Listener.h b/src/ios/Listener.h new file mode 100644 index 0000000..66c1b2b --- /dev/null +++ b/src/ios/Listener.h @@ -0,0 +1,16 @@ +// +// Listener.h +// shuto-cne +// +// Created by 范大德 on 2022/3/17. +// + +#ifndef Listener_h +#define Listener_h + +#endif /* Listener_h */ +@interface Listener +{} +- (instancetype)init; +- (void)on: (NSDictionary*)extra; +@end diff --git a/src/ios/SoundTouch.xcframework/Info.plist b/src/ios/SoundTouch.xcframework/Info.plist new file mode 100644 index 0000000..bb9e175 --- /dev/null +++ b/src/ios/SoundTouch.xcframework/Info.plist @@ -0,0 +1,26 @@ + + + + + AvailableLibraries + + + LibraryIdentifier + ios-arm64_armv7 + LibraryPath + SoundTouch.framework + SupportedArchitectures + + arm64 + armv7 + + SupportedPlatform + ios + + + CFBundlePackageType + XFWK + XCFrameworkFormatVersion + 1.0 + + diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/AAFilter.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/AAFilter.h new file mode 100644 index 0000000..81d836b --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/AAFilter.h @@ -0,0 +1,93 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// Sampled sound tempo changer/time stretch algorithm. Changes the sound tempo +/// while maintaining the original pitch by using a time domain WSOLA-like method +/// with several performance-increasing tweaks. +/// +/// Anti-alias filter is used to prevent folding of high frequencies when +/// transposing the sample rate with interpolation. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef AAFilter_H +#define AAFilter_H + +#include "STTypes.h" +#include "FIFOSampleBuffer.h" + +namespace soundtouch +{ + +class AAFilter +{ +protected: + class FIRFilter *pFIR; + + /// Low-pass filter cut-off frequency, negative = invalid + double cutoffFreq; + + /// num of filter taps + uint length; + + /// Calculate the FIR coefficients realizing the given cutoff-frequency + void calculateCoeffs(); +public: + AAFilter(uint length); + + ~AAFilter(); + + /// Sets new anti-alias filter cut-off edge frequency, scaled to sampling + /// frequency (nyquist frequency = 0.5). The filter will cut off the + /// frequencies than that. + void setCutoffFreq(double newCutoffFreq); + + /// Sets number of FIR filter taps, i.e. ~filter complexity + void setLength(uint newLength); + + uint getLength() const; + + /// Applies the filter to the given sequence of samples. + /// Note : The amount of outputted samples is by value of 'filter length' + /// smaller than the amount of input samples. + uint evaluate(SAMPLETYPE *dest, + const SAMPLETYPE *src, + uint numSamples, + uint numChannels) const; + + /// Applies the filter to the given src & dest pipes, so that processed amount of + /// samples get removed from src, and produced amount added to dest + /// Note : The amount of outputted samples is by value of 'filter length' + /// smaller than the amount of input samples. + uint evaluate(FIFOSampleBuffer &dest, + FIFOSampleBuffer &src) const; + +}; + +} + +#endif diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/BPMDetect.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/BPMDetect.h new file mode 100644 index 0000000..f025fbd --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/BPMDetect.h @@ -0,0 +1,205 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// Beats-per-minute (BPM) detection routine. +/// +/// The beat detection algorithm works as follows: +/// - Use function 'inputSamples' to input a chunks of samples to the class for +/// analysis. It's a good idea to enter a large sound file or stream in smallish +/// chunks of around few kilosamples in order not to extinguish too much RAM memory. +/// - Input sound data is decimated to approx 500 Hz to reduce calculation burden, +/// which is basically ok as low (bass) frequencies mostly determine the beat rate. +/// Simple averaging is used for anti-alias filtering because the resulting signal +/// quality isn't of that high importance. +/// - Decimated sound data is enveloped, i.e. the amplitude shape is detected by +/// taking absolute value that's smoothed by sliding average. Signal levels that +/// are below a couple of times the general RMS amplitude level are cut away to +/// leave only notable peaks there. +/// - Repeating sound patterns (e.g. beats) are detected by calculating short-term +/// autocorrelation function of the enveloped signal. +/// - After whole sound data file has been analyzed as above, the bpm level is +/// detected by function 'getBpm' that finds the highest peak of the autocorrelation +/// function, calculates it's precise location and converts this reading to bpm's. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef _BPMDetect_H_ +#define _BPMDetect_H_ + +#include +#include "STTypes.h" +#include "FIFOSampleBuffer.h" + +namespace soundtouch +{ + + /// Minimum allowed BPM rate. Used to restrict accepted result above a reasonable limit. + #define MIN_BPM 45 + + /// Maximum allowed BPM rate range. Used for calculating algorithm parametrs + #define MAX_BPM_RANGE 200 + + /// Maximum allowed BPM rate range. Used to restrict accepted result below a reasonable limit. + #define MAX_BPM_VALID 190 + +//////////////////////////////////////////////////////////////////////////////// + + typedef struct + { + float pos; + float strength; + } BEAT; + + + class IIR2_filter + { + double coeffs[5]; + double prev[5]; + + public: + IIR2_filter(const double *lpf_coeffs); + float update(float x); + }; + + + /// Class for calculating BPM rate for audio data. + class BPMDetect + { + protected: + /// Auto-correlation accumulator bins. + float *xcorr; + + /// Sample average counter. + int decimateCount; + + /// Sample average accumulator for FIFO-like decimation. + soundtouch::LONG_SAMPLETYPE decimateSum; + + /// Decimate sound by this coefficient to reach approx. 500 Hz. + int decimateBy; + + /// Auto-correlation window length + int windowLen; + + /// Number of channels (1 = mono, 2 = stereo) + int channels; + + /// sample rate + int sampleRate; + + /// Beginning of auto-correlation window: Autocorrelation isn't being updated for + /// the first these many correlation bins. + int windowStart; + + /// window functions for data preconditioning + float *hamw; + float *hamw2; + + // beat detection variables + int pos; + int peakPos; + int beatcorr_ringbuffpos; + int init_scaler; + float peakVal; + float *beatcorr_ringbuff; + + /// FIFO-buffer for decimated processing samples. + soundtouch::FIFOSampleBuffer *buffer; + + /// Collection of detected beat positions + //BeatCollection beats; + std::vector beats; + + // 2nd order low-pass-filter + IIR2_filter beat_lpf; + + /// Updates auto-correlation function for given number of decimated samples that + /// are read from the internal 'buffer' pipe (samples aren't removed from the pipe + /// though). + void updateXCorr(int process_samples /// How many samples are processed. + ); + + /// Decimates samples to approx. 500 Hz. + /// + /// \return Number of output samples. + int decimate(soundtouch::SAMPLETYPE *dest, ///< Destination buffer + const soundtouch::SAMPLETYPE *src, ///< Source sample buffer + int numsamples ///< Number of source samples. + ); + + /// Calculates amplitude envelope for the buffer of samples. + /// Result is output to 'samples'. + void calcEnvelope(soundtouch::SAMPLETYPE *samples, ///< Pointer to input/output data buffer + int numsamples ///< Number of samples in buffer + ); + + /// remove constant bias from xcorr data + void removeBias(); + + // Detect individual beat positions + void updateBeatPos(int process_samples); + + + public: + /// Constructor. + BPMDetect(int numChannels, ///< Number of channels in sample data. + int sampleRate ///< Sample rate in Hz. + ); + + /// Destructor. + virtual ~BPMDetect(); + + /// Inputs a block of samples for analyzing: Envelopes the samples and then + /// updates the autocorrelation estimation. When whole song data has been input + /// in smaller blocks using this function, read the resulting bpm with 'getBpm' + /// function. + /// + /// Notice that data in 'samples' array can be disrupted in processing. + void inputSamples(const soundtouch::SAMPLETYPE *samples, ///< Pointer to input/working data buffer + int numSamples ///< Number of samples in buffer + ); + + /// Analyzes the results and returns the BPM rate. Use this function to read result + /// after whole song data has been input to the class by consecutive calls of + /// 'inputSamples' function. + /// + /// \return Beats-per-minute rate, or zero if detection failed. + float getBpm(); + + /// Get beat position arrays. Note: The array includes also really low beat detection values + /// in absence of clear strong beats. Consumer may wish to filter low values away. + /// - "pos" receive array of beat positions + /// - "values" receive array of beat detection strengths + /// - max_num indicates max.size of "pos" and "values" array. + /// + /// You can query a suitable array sized by calling this with NULL in "pos" & "values". + /// + /// \return number of beats in the arrays. + int getBeats(float *pos, float *strength, int max_num); + }; +} +#endif // _BPMDetect_H_ diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/FIFOSampleBuffer.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/FIFOSampleBuffer.h new file mode 100644 index 0000000..537a7b8 --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/FIFOSampleBuffer.h @@ -0,0 +1,180 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// A buffer class for temporarily storaging sound samples, operates as a +/// first-in-first-out pipe. +/// +/// Samples are added to the end of the sample buffer with the 'putSamples' +/// function, and are received from the beginning of the buffer by calling +/// the 'receiveSamples' function. The class automatically removes the +/// output samples from the buffer as well as grows the storage size +/// whenever necessary. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef FIFOSampleBuffer_H +#define FIFOSampleBuffer_H + +#include "FIFOSamplePipe.h" + +namespace soundtouch +{ + +/// Sample buffer working in FIFO (first-in-first-out) principle. The class takes +/// care of storage size adjustment and data moving during input/output operations. +/// +/// Notice that in case of stereo audio, one sample is considered to consist of +/// both channel data. +class FIFOSampleBuffer : public FIFOSamplePipe +{ +private: + /// Sample buffer. + SAMPLETYPE *buffer; + + // Raw unaligned buffer memory. 'buffer' is made aligned by pointing it to first + // 16-byte aligned location of this buffer + SAMPLETYPE *bufferUnaligned; + + /// Sample buffer size in bytes + uint sizeInBytes; + + /// How many samples are currently in buffer. + uint samplesInBuffer; + + /// Channels, 1=mono, 2=stereo. + uint channels; + + /// Current position pointer to the buffer. This pointer is increased when samples are + /// removed from the pipe so that it's necessary to actually rewind buffer (move data) + /// only new data when is put to the pipe. + uint bufferPos; + + /// Rewind the buffer by moving data from position pointed by 'bufferPos' to real + /// beginning of the buffer. + void rewind(); + + /// Ensures that the buffer has capacity for at least this many samples. + void ensureCapacity(uint capacityRequirement); + + /// Returns current capacity. + uint getCapacity() const; + +public: + + /// Constructor + FIFOSampleBuffer(int numChannels = 2 ///< Number of channels, 1=mono, 2=stereo. + ///< Default is stereo. + ); + + /// destructor + ~FIFOSampleBuffer(); + + /// Returns a pointer to the beginning of the output samples. + /// This function is provided for accessing the output samples directly. + /// Please be careful for not to corrupt the book-keeping! + /// + /// When using this function to output samples, also remember to 'remove' the + /// output samples from the buffer by calling the + /// 'receiveSamples(numSamples)' function + virtual SAMPLETYPE *ptrBegin(); + + /// Returns a pointer to the end of the used part of the sample buffer (i.e. + /// where the new samples are to be inserted). This function may be used for + /// inserting new samples into the sample buffer directly. Please be careful + /// not corrupt the book-keeping! + /// + /// When using this function as means for inserting new samples, also remember + /// to increase the sample count afterwards, by calling the + /// 'putSamples(numSamples)' function. + SAMPLETYPE *ptrEnd( + uint slackCapacity ///< How much free capacity (in samples) there _at least_ + ///< should be so that the caller can successfully insert the + ///< desired samples to the buffer. If necessary, the function + ///< grows the buffer size to comply with this requirement. + ); + + /// Adds 'numSamples' pcs of samples from the 'samples' memory position to + /// the sample buffer. + virtual void putSamples(const SAMPLETYPE *samples, ///< Pointer to samples. + uint numSamples ///< Number of samples to insert. + ); + + /// Adjusts the book-keeping to increase number of samples in the buffer without + /// copying any actual samples. + /// + /// This function is used to update the number of samples in the sample buffer + /// when accessing the buffer directly with 'ptrEnd' function. Please be + /// careful though! + virtual void putSamples(uint numSamples ///< Number of samples been inserted. + ); + + /// Output samples from beginning of the sample buffer. Copies requested samples to + /// output buffer and removes them from the sample buffer. If there are less than + /// 'numsample' samples in the buffer, returns all that available. + /// + /// \return Number of samples returned. + virtual uint receiveSamples(SAMPLETYPE *output, ///< Buffer where to copy output samples. + uint maxSamples ///< How many samples to receive at max. + ); + + /// Adjusts book-keeping so that given number of samples are removed from beginning of the + /// sample buffer without copying them anywhere. + /// + /// Used to reduce the number of samples in the buffer when accessing the sample buffer directly + /// with 'ptrBegin' function. + virtual uint receiveSamples(uint maxSamples ///< Remove this many samples from the beginning of pipe. + ); + + /// Returns number of samples currently available. + virtual uint numSamples() const; + + /// Sets number of channels, 1 = mono, 2 = stereo. + void setChannels(int numChannels); + + /// Get number of channels + int getChannels() + { + return channels; + } + + /// Returns nonzero if there aren't any samples available for outputting. + virtual int isEmpty() const; + + /// Clears all the samples. + virtual void clear(); + + /// allow trimming (downwards) amount of samples in pipeline. + /// Returns adjusted amount of samples + uint adjustAmountOfSamples(uint numSamples); + + /// Add silence to end of buffer + void addSilent(uint nSamples); +}; + +} + +#endif diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/FIFOSamplePipe.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/FIFOSamplePipe.h new file mode 100644 index 0000000..3def42d --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/FIFOSamplePipe.h @@ -0,0 +1,230 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// 'FIFOSamplePipe' : An abstract base class for classes that manipulate sound +/// samples by operating like a first-in-first-out pipe: New samples are fed +/// into one end of the pipe with the 'putSamples' function, and the processed +/// samples are received from the other end with the 'receiveSamples' function. +/// +/// 'FIFOProcessor' : A base class for classes the do signal processing with +/// the samples while operating like a first-in-first-out pipe. When samples +/// are input with the 'putSamples' function, the class processes them +/// and moves the processed samples to the given 'output' pipe object, which +/// may be either another processing stage, or a fifo sample buffer object. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef FIFOSamplePipe_H +#define FIFOSamplePipe_H + +#include +#include +#include "STTypes.h" + +namespace soundtouch +{ + +/// Abstract base class for FIFO (first-in-first-out) sample processing classes. +class FIFOSamplePipe +{ +protected: + + bool verifyNumberOfChannels(int nChannels) const + { + if ((nChannels > 0) && (nChannels <= SOUNDTOUCH_MAX_CHANNELS)) + { + return true; + } + ST_THROW_RT_ERROR("Error: Illegal number of channels"); + return false; + } + +public: + // virtual default destructor + virtual ~FIFOSamplePipe() {} + + + /// Returns a pointer to the beginning of the output samples. + /// This function is provided for accessing the output samples directly. + /// Please be careful for not to corrupt the book-keeping! + /// + /// When using this function to output samples, also remember to 'remove' the + /// output samples from the buffer by calling the + /// 'receiveSamples(numSamples)' function + virtual SAMPLETYPE *ptrBegin() = 0; + + /// Adds 'numSamples' pcs of samples from the 'samples' memory position to + /// the sample buffer. + virtual void putSamples(const SAMPLETYPE *samples, ///< Pointer to samples. + uint numSamples ///< Number of samples to insert. + ) = 0; + + + // Moves samples from the 'other' pipe instance to this instance. + void moveSamples(FIFOSamplePipe &other ///< Other pipe instance where from the receive the data. + ) + { + int oNumSamples = other.numSamples(); + + putSamples(other.ptrBegin(), oNumSamples); + other.receiveSamples(oNumSamples); + }; + + /// Output samples from beginning of the sample buffer. Copies requested samples to + /// output buffer and removes them from the sample buffer. If there are less than + /// 'numsample' samples in the buffer, returns all that available. + /// + /// \return Number of samples returned. + virtual uint receiveSamples(SAMPLETYPE *output, ///< Buffer where to copy output samples. + uint maxSamples ///< How many samples to receive at max. + ) = 0; + + /// Adjusts book-keeping so that given number of samples are removed from beginning of the + /// sample buffer without copying them anywhere. + /// + /// Used to reduce the number of samples in the buffer when accessing the sample buffer directly + /// with 'ptrBegin' function. + virtual uint receiveSamples(uint maxSamples ///< Remove this many samples from the beginning of pipe. + ) = 0; + + /// Returns number of samples currently available. + virtual uint numSamples() const = 0; + + // Returns nonzero if there aren't any samples available for outputting. + virtual int isEmpty() const = 0; + + /// Clears all the samples. + virtual void clear() = 0; + + /// allow trimming (downwards) amount of samples in pipeline. + /// Returns adjusted amount of samples + virtual uint adjustAmountOfSamples(uint numSamples) = 0; + +}; + + +/// Base-class for sound processing routines working in FIFO principle. With this base +/// class it's easy to implement sound processing stages that can be chained together, +/// so that samples that are fed into beginning of the pipe automatically go through +/// all the processing stages. +/// +/// When samples are input to this class, they're first processed and then put to +/// the FIFO pipe that's defined as output of this class. This output pipe can be +/// either other processing stage or a FIFO sample buffer. +class FIFOProcessor :public FIFOSamplePipe +{ +protected: + /// Internal pipe where processed samples are put. + FIFOSamplePipe *output; + + /// Sets output pipe. + void setOutPipe(FIFOSamplePipe *pOutput) + { + assert(output == NULL); + assert(pOutput != NULL); + output = pOutput; + } + + /// Constructor. Doesn't define output pipe; it has to be set be + /// 'setOutPipe' function. + FIFOProcessor() + { + output = NULL; + } + + /// Constructor. Configures output pipe. + FIFOProcessor(FIFOSamplePipe *pOutput ///< Output pipe. + ) + { + output = pOutput; + } + + /// Destructor. + virtual ~FIFOProcessor() + { + } + + /// Returns a pointer to the beginning of the output samples. + /// This function is provided for accessing the output samples directly. + /// Please be careful for not to corrupt the book-keeping! + /// + /// When using this function to output samples, also remember to 'remove' the + /// output samples from the buffer by calling the + /// 'receiveSamples(numSamples)' function + virtual SAMPLETYPE *ptrBegin() + { + return output->ptrBegin(); + } + +public: + + /// Output samples from beginning of the sample buffer. Copies requested samples to + /// output buffer and removes them from the sample buffer. If there are less than + /// 'numsample' samples in the buffer, returns all that available. + /// + /// \return Number of samples returned. + virtual uint receiveSamples(SAMPLETYPE *outBuffer, ///< Buffer where to copy output samples. + uint maxSamples ///< How many samples to receive at max. + ) + { + return output->receiveSamples(outBuffer, maxSamples); + } + + /// Adjusts book-keeping so that given number of samples are removed from beginning of the + /// sample buffer without copying them anywhere. + /// + /// Used to reduce the number of samples in the buffer when accessing the sample buffer directly + /// with 'ptrBegin' function. + virtual uint receiveSamples(uint maxSamples ///< Remove this many samples from the beginning of pipe. + ) + { + return output->receiveSamples(maxSamples); + } + + /// Returns number of samples currently available. + virtual uint numSamples() const + { + return output->numSamples(); + } + + /// Returns nonzero if there aren't any samples available for outputting. + virtual int isEmpty() const + { + return output->isEmpty(); + } + + /// allow trimming (downwards) amount of samples in pipeline. + /// Returns adjusted amount of samples + virtual uint adjustAmountOfSamples(uint numSamples) + { + return output->adjustAmountOfSamples(numSamples); + } +}; + +} + +#endif diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/FIRFilter.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/FIRFilter.h new file mode 100644 index 0000000..39c2cc7 --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/FIRFilter.h @@ -0,0 +1,140 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// General FIR digital filter routines with MMX optimization. +/// +/// Note : MMX optimized functions reside in a separate, platform-specific file, +/// e.g. 'mmx_win.cpp' or 'mmx_gcc.cpp' +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef FIRFilter_H +#define FIRFilter_H + +#include +#include "STTypes.h" + +namespace soundtouch +{ + +class FIRFilter +{ +protected: + // Number of FIR filter taps + uint length; + // Number of FIR filter taps divided by 8 + uint lengthDiv8; + + // Result divider factor in 2^k format + uint resultDivFactor; + + // Result divider value. + SAMPLETYPE resultDivider; + + // Memory for filter coefficients + SAMPLETYPE *filterCoeffs; + SAMPLETYPE *filterCoeffsStereo; + + virtual uint evaluateFilterStereo(SAMPLETYPE *dest, + const SAMPLETYPE *src, + uint numSamples) const; + virtual uint evaluateFilterMono(SAMPLETYPE *dest, + const SAMPLETYPE *src, + uint numSamples) const; + virtual uint evaluateFilterMulti(SAMPLETYPE *dest, const SAMPLETYPE *src, uint numSamples, uint numChannels); + +public: + FIRFilter(); + virtual ~FIRFilter(); + + /// Operator 'new' is overloaded so that it automatically creates a suitable instance + /// depending on if we've a MMX-capable CPU available or not. + static void * operator new(size_t s); + + static FIRFilter *newInstance(); + + /// Applies the filter to the given sequence of samples. + /// Note : The amount of outputted samples is by value of 'filter_length' + /// smaller than the amount of input samples. + /// + /// \return Number of samples copied to 'dest'. + uint evaluate(SAMPLETYPE *dest, + const SAMPLETYPE *src, + uint numSamples, + uint numChannels); + + uint getLength() const; + + virtual void setCoefficients(const SAMPLETYPE *coeffs, + uint newLength, + uint uResultDivFactor); +}; + + +// Optional subclasses that implement CPU-specific optimizations: + +#ifdef SOUNDTOUCH_ALLOW_MMX + +/// Class that implements MMX optimized functions exclusive for 16bit integer samples type. + class FIRFilterMMX : public FIRFilter + { + protected: + short *filterCoeffsUnalign; + short *filterCoeffsAlign; + + virtual uint evaluateFilterStereo(short *dest, const short *src, uint numSamples) const; + public: + FIRFilterMMX(); + ~FIRFilterMMX(); + + virtual void setCoefficients(const short *coeffs, uint newLength, uint uResultDivFactor); + }; + +#endif // SOUNDTOUCH_ALLOW_MMX + + +#ifdef SOUNDTOUCH_ALLOW_SSE + /// Class that implements SSE optimized functions exclusive for floating point samples type. + class FIRFilterSSE : public FIRFilter + { + protected: + float *filterCoeffsUnalign; + float *filterCoeffsAlign; + + virtual uint evaluateFilterStereo(float *dest, const float *src, uint numSamples) const; + public: + FIRFilterSSE(); + ~FIRFilterSSE(); + + virtual void setCoefficients(const float *coeffs, uint newLength, uint uResultDivFactor); + }; + +#endif // SOUNDTOUCH_ALLOW_SSE + +} + +#endif // FIRFilter_H diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/InterpolateCubic.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/InterpolateCubic.h new file mode 100644 index 0000000..9749bcd --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/InterpolateCubic.h @@ -0,0 +1,68 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// Cubic interpolation routine. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef _InterpolateCubic_H_ +#define _InterpolateCubic_H_ + +#include "RateTransposer.h" +#include "STTypes.h" + +namespace soundtouch +{ + +class InterpolateCubic : public TransposerBase +{ +protected: + virtual void resetRegisters(); + virtual int transposeMono(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples); + virtual int transposeStereo(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples); + virtual int transposeMulti(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples); + + double fract; + +public: + InterpolateCubic(); + + int getLatency() const + { + return 1; + } +}; + +} + +#endif diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/InterpolateLinear.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/InterpolateLinear.h new file mode 100644 index 0000000..ffc11bd --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/InterpolateLinear.h @@ -0,0 +1,98 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// Linear interpolation routine. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef _InterpolateLinear_H_ +#define _InterpolateLinear_H_ + +#include "RateTransposer.h" +#include "STTypes.h" + +namespace soundtouch +{ + +/// Linear transposer class that uses integer arithmetic +class InterpolateLinearInteger : public TransposerBase +{ +protected: + int iFract; + int iRate; + + virtual void resetRegisters(); + + virtual int transposeMono(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples); + virtual int transposeStereo(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples); + virtual int transposeMulti(SAMPLETYPE *dest, const SAMPLETYPE *src, int &srcSamples); +public: + InterpolateLinearInteger(); + + /// Sets new target rate. Normal rate = 1.0, smaller values represent slower + /// rate, larger faster rates. + virtual void setRate(double newRate); + + int getLatency() const + { + return 0; + } +}; + + +/// Linear transposer class that uses floating point arithmetic +class InterpolateLinearFloat : public TransposerBase +{ +protected: + double fract; + + virtual void resetRegisters(); + + virtual int transposeMono(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples); + virtual int transposeStereo(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples); + virtual int transposeMulti(SAMPLETYPE *dest, const SAMPLETYPE *src, int &srcSamples); + +public: + InterpolateLinearFloat(); + + int getLatency() const + { + return 0; + } +}; + +} + +#endif diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/InterpolateShannon.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/InterpolateShannon.h new file mode 100644 index 0000000..794e755 --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/InterpolateShannon.h @@ -0,0 +1,73 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// Sample interpolation routine using 8-tap band-limited Shannon interpolation +/// with kaiser window. +/// +/// Notice. This algorithm is remarkably much heavier than linear or cubic +/// interpolation, and not remarkably better than cubic algorithm. Thus mostly +/// for experimental purposes +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef _InterpolateShannon_H_ +#define _InterpolateShannon_H_ + +#include "RateTransposer.h" +#include "STTypes.h" + +namespace soundtouch +{ + +class InterpolateShannon : public TransposerBase +{ +protected: + void resetRegisters(); + int transposeMono(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples); + int transposeStereo(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples); + int transposeMulti(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples); + + double fract; + +public: + InterpolateShannon(); + + int getLatency() const + { + return 3; + } +}; + +} + +#endif diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/PeakFinder.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/PeakFinder.h new file mode 100644 index 0000000..bf46daa --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/PeakFinder.h @@ -0,0 +1,90 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// The routine detects highest value on an array of values and calculates the +/// precise peak location as a mass-center of the 'hump' around the peak value. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef _PeakFinder_H_ +#define _PeakFinder_H_ + +namespace soundtouch +{ + +class PeakFinder +{ +protected: + /// Min, max allowed peak positions within the data vector + int minPos, maxPos; + + /// Calculates the mass center between given vector items. + double calcMassCenter(const float *data, ///< Data vector. + int firstPos, ///< Index of first vector item belonging to the peak. + int lastPos ///< Index of last vector item belonging to the peak. + ) const; + + /// Finds the data vector index where the monotoniously decreasing signal crosses the + /// given level. + int findCrossingLevel(const float *data, ///< Data vector. + float level, ///< Goal crossing level. + int peakpos, ///< Peak position index within the data vector. + int direction /// Direction where to proceed from the peak: 1 = right, -1 = left. + ) const; + + // Finds real 'top' of a peak hump from neighnourhood of the given 'peakpos'. + int findTop(const float *data, int peakpos) const; + + + /// Finds the 'ground' level, i.e. smallest level between two neighbouring peaks, to right- + /// or left-hand side of the given peak position. + int findGround(const float *data, /// Data vector. + int peakpos, /// Peak position index within the data vector. + int direction /// Direction where to proceed from the peak: 1 = right, -1 = left. + ) const; + + /// get exact center of peak near given position by calculating local mass of center + double getPeakCenter(const float *data, int peakpos) const; + +public: + /// Constructor. + PeakFinder(); + + /// Detect exact peak position of the data vector by finding the largest peak 'hump' + /// and calculating the mass-center location of the peak hump. + /// + /// \return The location of the largest base harmonic peak hump. + double detectPeak(const float *data, /// Data vector to be analyzed. The data vector has + /// to be at least 'maxPos' items long. + int minPos, ///< Min allowed peak location within the vector data. + int maxPos ///< Max allowed peak location within the vector data. + ); +}; + +} + +#endif // _PeakFinder_H_ diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/RateTransposer.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/RateTransposer.h new file mode 100644 index 0000000..45e79cf --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/RateTransposer.h @@ -0,0 +1,164 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// Sample rate transposer. Changes sample rate by using linear interpolation +/// together with anti-alias filtering (first order interpolation with anti- +/// alias filtering should be quite adequate for this application). +/// +/// Use either of the derived classes of 'RateTransposerInteger' or +/// 'RateTransposerFloat' for corresponding integer/floating point tranposing +/// algorithm implementation. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef RateTransposer_H +#define RateTransposer_H + +#include +#include "AAFilter.h" +#include "FIFOSamplePipe.h" +#include "FIFOSampleBuffer.h" + +#include "STTypes.h" + +namespace soundtouch +{ + +/// Abstract base class for transposer implementations (linear, advanced vs integer, float etc) +class TransposerBase +{ +public: + enum ALGORITHM { + LINEAR = 0, + CUBIC, + SHANNON + }; + +protected: + virtual void resetRegisters() = 0; + + virtual int transposeMono(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples) = 0; + virtual int transposeStereo(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples) = 0; + virtual int transposeMulti(SAMPLETYPE *dest, + const SAMPLETYPE *src, + int &srcSamples) = 0; + + static ALGORITHM algorithm; + +public: + double rate; + int numChannels; + + TransposerBase(); + virtual ~TransposerBase(); + + virtual int transpose(FIFOSampleBuffer &dest, FIFOSampleBuffer &src); + virtual void setRate(double newRate); + virtual void setChannels(int channels); + virtual int getLatency() const = 0; + + // static factory function + static TransposerBase *newInstance(); + + // static function to set interpolation algorithm + static void setAlgorithm(ALGORITHM a); +}; + + +/// A common linear samplerate transposer class. +/// +class RateTransposer : public FIFOProcessor +{ +protected: + /// Anti-alias filter object + AAFilter *pAAFilter; + TransposerBase *pTransposer; + + /// Buffer for collecting samples to feed the anti-alias filter between + /// two batches + FIFOSampleBuffer inputBuffer; + + /// Buffer for keeping samples between transposing & anti-alias filter + FIFOSampleBuffer midBuffer; + + /// Output sample buffer + FIFOSampleBuffer outputBuffer; + + bool bUseAAFilter; + + + /// Transposes sample rate by applying anti-alias filter to prevent folding. + /// Returns amount of samples returned in the "dest" buffer. + /// The maximum amount of samples that can be returned at a time is set by + /// the 'set_returnBuffer_size' function. + void processSamples(const SAMPLETYPE *src, + uint numSamples); + +public: + RateTransposer(); + virtual ~RateTransposer(); + + /// Returns the output buffer object + FIFOSamplePipe *getOutput() { return &outputBuffer; }; + + /// Return anti-alias filter object + AAFilter *getAAFilter(); + + /// Enables/disables the anti-alias filter. Zero to disable, nonzero to enable + void enableAAFilter(bool newMode); + + /// Returns nonzero if anti-alias filter is enabled. + bool isAAFilterEnabled() const; + + /// Sets new target rate. Normal rate = 1.0, smaller values represent slower + /// rate, larger faster rates. + virtual void setRate(double newRate); + + /// Sets the number of channels, 1 = mono, 2 = stereo + void setChannels(int channels); + + /// Adds 'numSamples' pcs of samples from the 'samples' memory position into + /// the input of the object. + void putSamples(const SAMPLETYPE *samples, uint numSamples); + + /// Clears all the samples in the object + void clear(); + + /// Returns nonzero if there aren't any samples available for outputting. + int isEmpty() const; + + /// Return approximate initial input-output latency + int getLatency() const; +}; + +} + +#endif diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/STTypes.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/STTypes.h new file mode 100644 index 0000000..7d87f75 --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/STTypes.h @@ -0,0 +1,190 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// Common type definitions for SoundTouch audio processing library. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef STTypes_H +#define STTypes_H + +typedef unsigned int uint; +typedef unsigned long ulong; + +// Patch for MinGW: on Win64 long is 32-bit +#ifdef _WIN64 + typedef unsigned long long ulongptr; +#else + typedef ulong ulongptr; +#endif + + +// Helper macro for aligning pointer up to next 16-byte boundary +#define SOUNDTOUCH_ALIGN_POINTER_16(x) ( ( (ulongptr)(x) + 15 ) & ~(ulongptr)15 ) + + +#if (defined(__GNUC__) && !defined(ANDROID)) + // In GCC, include soundtouch_config.h made by config scritps. + // Skip this in Android compilation that uses GCC but without configure scripts. + #include "soundtouch_config.h" +#endif + + +namespace soundtouch +{ + /// Max allowed number of channels + #define SOUNDTOUCH_MAX_CHANNELS 16 + + /// Activate these undef's to overrule the possible sampletype + /// setting inherited from some other header file: + //#undef SOUNDTOUCH_INTEGER_SAMPLES + //#undef SOUNDTOUCH_FLOAT_SAMPLES + + /// If following flag is defined, always uses multichannel processing + /// routines also for mono and stero sound. This is for routine testing + /// purposes; output should be same with either routines, yet disabling + /// the dedicated mono/stereo processing routines will result in slower + /// runtime performance so recommendation is to keep this off. + // #define USE_MULTICH_ALWAYS + + #if (defined(__SOFTFP__) && defined(ANDROID)) + // For Android compilation: Force use of Integer samples in case that + // compilation uses soft-floating point emulation - soft-fp is way too slow + #undef SOUNDTOUCH_FLOAT_SAMPLES + #define SOUNDTOUCH_INTEGER_SAMPLES 1 + #endif + + #if !(SOUNDTOUCH_INTEGER_SAMPLES || SOUNDTOUCH_FLOAT_SAMPLES) + + /// Choose either 32bit floating point or 16bit integer sampletype + /// by choosing one of the following defines, unless this selection + /// has already been done in some other file. + //// + /// Notes: + /// - In Windows environment, choose the sample format with the + /// following defines. + /// - In GNU environment, the floating point samples are used by + /// default, but integer samples can be chosen by giving the + /// following switch to the configure script: + /// ./configure --enable-integer-samples + /// However, if you still prefer to select the sample format here + /// also in GNU environment, then please #undef the INTEGER_SAMPLE + /// and FLOAT_SAMPLE defines first as in comments above. + //#define SOUNDTOUCH_INTEGER_SAMPLES 1 //< 16bit integer samples + #define SOUNDTOUCH_FLOAT_SAMPLES 1 //< 32bit float samples + + #endif + + #if (_M_IX86 || __i386__ || __x86_64__ || _M_X64) + /// Define this to allow X86-specific assembler/intrinsic optimizations. + /// Notice that library contains also usual C++ versions of each of these + /// these routines, so if you're having difficulties getting the optimized + /// routines compiled for whatever reason, you may disable these optimizations + /// to make the library compile. + + #define SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS 1 + + /// In GNU environment, allow the user to override this setting by + /// giving the following switch to the configure script: + /// ./configure --disable-x86-optimizations + /// ./configure --enable-x86-optimizations=no + #ifdef SOUNDTOUCH_DISABLE_X86_OPTIMIZATIONS + #undef SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS + #endif + #else + /// Always disable optimizations when not using a x86 systems. + #undef SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS + + #endif + + // If defined, allows the SIMD-optimized routines to take minor shortcuts + // for improved performance. Undefine to require faithfully similar SIMD + // calculations as in normal C implementation. + #define SOUNDTOUCH_ALLOW_NONEXACT_SIMD_OPTIMIZATION 1 + + + #ifdef SOUNDTOUCH_INTEGER_SAMPLES + // 16bit integer sample type + typedef short SAMPLETYPE; + // data type for sample accumulation: Use 32bit integer to prevent overflows + typedef long LONG_SAMPLETYPE; + + #ifdef SOUNDTOUCH_FLOAT_SAMPLES + // check that only one sample type is defined + #error "conflicting sample types defined" + #endif // SOUNDTOUCH_FLOAT_SAMPLES + + #ifdef SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS + // Allow MMX optimizations (not available in X64 mode) + #if (!_M_X64) + #define SOUNDTOUCH_ALLOW_MMX 1 + #endif + #endif + + #else + + // floating point samples + typedef float SAMPLETYPE; + // data type for sample accumulation: Use float also here to enable + // efficient autovectorization + typedef float LONG_SAMPLETYPE; + + #ifdef SOUNDTOUCH_ALLOW_X86_OPTIMIZATIONS + // Allow SSE optimizations + #define SOUNDTOUCH_ALLOW_SSE 1 + #endif + + #endif // SOUNDTOUCH_INTEGER_SAMPLES + + #if ((SOUNDTOUCH_ALLOW_SSE) || (__SSE__) || (SOUNDTOUCH_USE_NEON)) + #if SOUNDTOUCH_ALLOW_NONEXACT_SIMD_OPTIMIZATION + #define ST_SIMD_AVOID_UNALIGNED + #endif + #endif + +}; + +// define ST_NO_EXCEPTION_HANDLING switch to disable throwing std exceptions: +// #define ST_NO_EXCEPTION_HANDLING 1 +#ifdef ST_NO_EXCEPTION_HANDLING + // Exceptions disabled. Throw asserts instead if enabled. + #include + #define ST_THROW_RT_ERROR(x) {assert((const char *)x);} +#else + // use c++ standard exceptions + #include + #include + #define ST_THROW_RT_ERROR(x) {throw std::runtime_error(x);} +#endif + +// When this #define is active, eliminates a clicking sound when the "rate" or "pitch" +// parameter setting crosses from value <1 to >=1 or vice versa during processing. +// Default is off as such crossover is untypical case and involves a slight sound +// quality compromise. +//#define SOUNDTOUCH_PREVENT_CLICK_AT_RATE_CROSSOVER 1 + +#endif diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/SoundTouch.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/SoundTouch.h new file mode 100644 index 0000000..b68aba4 --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/SoundTouch.h @@ -0,0 +1,348 @@ +////////////////////////////////////////////////////////////////////////////// +/// +/// SoundTouch - main class for tempo/pitch/rate adjusting routines. +/// +/// Notes: +/// - Initialize the SoundTouch object instance by setting up the sound stream +/// parameters with functions 'setSampleRate' and 'setChannels', then set +/// desired tempo/pitch/rate settings with the corresponding functions. +/// +/// - The SoundTouch class behaves like a first-in-first-out pipeline: The +/// samples that are to be processed are fed into one of the pipe by calling +/// function 'putSamples', while the ready processed samples can be read +/// from the other end of the pipeline with function 'receiveSamples'. +/// +/// - The SoundTouch processing classes require certain sized 'batches' of +/// samples in order to process the sound. For this reason the classes buffer +/// incoming samples until there are enough of samples available for +/// processing, then they carry out the processing step and consequently +/// make the processed samples available for outputting. +/// +/// - For the above reason, the processing routines introduce a certain +/// 'latency' between the input and output, so that the samples input to +/// SoundTouch may not be immediately available in the output, and neither +/// the amount of outputtable samples may not immediately be in direct +/// relationship with the amount of previously input samples. +/// +/// - The tempo/pitch/rate control parameters can be altered during processing. +/// Please notice though that they aren't currently protected by semaphores, +/// so in multi-thread application external semaphore protection may be +/// required. +/// +/// - This class utilizes classes 'TDStretch' for tempo change (without modifying +/// pitch) and 'RateTransposer' for changing the playback rate (that is, both +/// tempo and pitch in the same ratio) of the sound. The third available control +/// 'pitch' (change pitch but maintain tempo) is produced by a combination of +/// combining the two other controls. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef SoundTouch_H +#define SoundTouch_H + +#include "FIFOSamplePipe.h" +#include "STTypes.h" + +namespace soundtouch +{ + +/// Soundtouch library version string +#define SOUNDTOUCH_VERSION "2.2" + +/// SoundTouch library version id +#define SOUNDTOUCH_VERSION_ID (20200) + +// +// Available setting IDs for the 'setSetting' & 'get_setting' functions: + +/// Enable/disable anti-alias filter in pitch transposer (0 = disable) +#define SETTING_USE_AA_FILTER 0 + +/// Pitch transposer anti-alias filter length (8 .. 128 taps, default = 32) +#define SETTING_AA_FILTER_LENGTH 1 + +/// Enable/disable quick seeking algorithm in tempo changer routine +/// (enabling quick seeking lowers CPU utilization but causes a minor sound +/// quality compromising) +#define SETTING_USE_QUICKSEEK 2 + +/// Time-stretch algorithm single processing sequence length in milliseconds. This determines +/// to how long sequences the original sound is chopped in the time-stretch algorithm. +/// See "STTypes.h" or README for more information. +#define SETTING_SEQUENCE_MS 3 + +/// Time-stretch algorithm seeking window length in milliseconds for algorithm that finds the +/// best possible overlapping location. This determines from how wide window the algorithm +/// may look for an optimal joining location when mixing the sound sequences back together. +/// See "STTypes.h" or README for more information. +#define SETTING_SEEKWINDOW_MS 4 + +/// Time-stretch algorithm overlap length in milliseconds. When the chopped sound sequences +/// are mixed back together, to form a continuous sound stream, this parameter defines over +/// how long period the two consecutive sequences are let to overlap each other. +/// See "STTypes.h" or README for more information. +#define SETTING_OVERLAP_MS 5 + + +/// Call "getSetting" with this ID to query processing sequence size in samples. +/// This value gives approximate value of how many input samples you'll need to +/// feed into SoundTouch after initial buffering to get out a new batch of +/// output samples. +/// +/// This value does not include initial buffering at beginning of a new processing +/// stream, use SETTING_INITIAL_LATENCY to get the initial buffering size. +/// +/// Notices: +/// - This is read-only parameter, i.e. setSetting ignores this parameter +/// - This parameter value is not constant but change depending on +/// tempo/pitch/rate/samplerate settings. +#define SETTING_NOMINAL_INPUT_SEQUENCE 6 + + +/// Call "getSetting" with this ID to query nominal average processing output +/// size in samples. This value tells approcimate value how many output samples +/// SoundTouch outputs once it does DSP processing run for a batch of input samples. +/// +/// Notices: +/// - This is read-only parameter, i.e. setSetting ignores this parameter +/// - This parameter value is not constant but change depending on +/// tempo/pitch/rate/samplerate settings. +#define SETTING_NOMINAL_OUTPUT_SEQUENCE 7 + + +/// Call "getSetting" with this ID to query initial processing latency, i.e. +/// approx. how many samples you'll need to enter to SoundTouch pipeline before +/// you can expect to get first batch of ready output samples out. +/// +/// After the first output batch, you can then expect to get approx. +/// SETTING_NOMINAL_OUTPUT_SEQUENCE ready samples out for every +/// SETTING_NOMINAL_INPUT_SEQUENCE samples that you enter into SoundTouch. +/// +/// Example: +/// processing with parameter -tempo=5 +/// => initial latency = 5509 samples +/// input sequence = 4167 samples +/// output sequence = 3969 samples +/// +/// Accordingly, you can expect to feed in approx. 5509 samples at beginning of +/// the stream, and then you'll get out the first 3969 samples. After that, for +/// every approx. 4167 samples that you'll put in, you'll receive again approx. +/// 3969 samples out. +/// +/// This also means that average latency during stream processing is +/// INITIAL_LATENCY-OUTPUT_SEQUENCE/2, in the above example case 5509-3969/2 +/// = 3524 samples +/// +/// Notices: +/// - This is read-only parameter, i.e. setSetting ignores this parameter +/// - This parameter value is not constant but change depending on +/// tempo/pitch/rate/samplerate settings. +#define SETTING_INITIAL_LATENCY 8 + + +class SoundTouch : public FIFOProcessor +{ +private: + /// Rate transposer class instance + class RateTransposer *pRateTransposer; + + /// Time-stretch class instance + class TDStretch *pTDStretch; + + /// Virtual pitch parameter. Effective rate & tempo are calculated from these parameters. + double virtualRate; + + /// Virtual pitch parameter. Effective rate & tempo are calculated from these parameters. + double virtualTempo; + + /// Virtual pitch parameter. Effective rate & tempo are calculated from these parameters. + double virtualPitch; + + /// Flag: Has sample rate been set? + bool bSrateSet; + + /// Accumulator for how many samples in total will be expected as output vs. samples put in, + /// considering current processing settings. + double samplesExpectedOut; + + /// Accumulator for how many samples in total have been read out from the processing so far + long samplesOutput; + + /// Calculates effective rate & tempo valuescfrom 'virtualRate', 'virtualTempo' and + /// 'virtualPitch' parameters. + void calcEffectiveRateAndTempo(); + +protected : + /// Number of channels + uint channels; + + /// Effective 'rate' value calculated from 'virtualRate', 'virtualTempo' and 'virtualPitch' + double rate; + + /// Effective 'tempo' value calculated from 'virtualRate', 'virtualTempo' and 'virtualPitch' + double tempo; + +public: + SoundTouch(); + virtual ~SoundTouch(); + + /// Get SoundTouch library version string + static const char *getVersionString(); + + /// Get SoundTouch library version Id + static uint getVersionId(); + + /// Sets new rate control value. Normal rate = 1.0, smaller values + /// represent slower rate, larger faster rates. + void setRate(double newRate); + + /// Sets new tempo control value. Normal tempo = 1.0, smaller values + /// represent slower tempo, larger faster tempo. + void setTempo(double newTempo); + + /// Sets new rate control value as a difference in percents compared + /// to the original rate (-50 .. +100 %) + void setRateChange(double newRate); + + /// Sets new tempo control value as a difference in percents compared + /// to the original tempo (-50 .. +100 %) + void setTempoChange(double newTempo); + + /// Sets new pitch control value. Original pitch = 1.0, smaller values + /// represent lower pitches, larger values higher pitch. + void setPitch(double newPitch); + + /// Sets pitch change in octaves compared to the original pitch + /// (-1.00 .. +1.00) + void setPitchOctaves(double newPitch); + + /// Sets pitch change in semi-tones compared to the original pitch + /// (-12 .. +12) + void setPitchSemiTones(int newPitch); + void setPitchSemiTones(double newPitch); + + /// Sets the number of channels, 1 = mono, 2 = stereo + void setChannels(uint numChannels); + + /// Sets sample rate. + void setSampleRate(uint srate); + + /// Get ratio between input and output audio durations, useful for calculating + /// processed output duration: if you'll process a stream of N samples, then + /// you can expect to get out N * getInputOutputSampleRatio() samples. + /// + /// This ratio will give accurate target duration ratio for a full audio track, + /// given that the the whole track is processed with same processing parameters. + /// + /// If this ratio is applied to calculate intermediate offsets inside a processing + /// stream, then this ratio is approximate and can deviate +- some tens of milliseconds + /// from ideal offset, yet by end of the audio stream the duration ratio will become + /// exact. + /// + /// Example: if processing with parameters "-tempo=15 -pitch=-3", the function + /// will return value 0.8695652... Now, if processing an audio stream whose duration + /// is exactly one million audio samples, then you can expect the processed + /// output duration be 0.869565 * 1000000 = 869565 samples. + double getInputOutputSampleRatio(); + + /// Flushes the last samples from the processing pipeline to the output. + /// Clears also the internal processing buffers. + // + /// Note: This function is meant for extracting the last samples of a sound + /// stream. This function may introduce additional blank samples in the end + /// of the sound stream, and thus it's not recommended to call this function + /// in the middle of a sound stream. + void flush(); + + /// Adds 'numSamples' pcs of samples from the 'samples' memory position into + /// the input of the object. Notice that sample rate _has_to_ be set before + /// calling this function, otherwise throws a runtime_error exception. + virtual void putSamples( + const SAMPLETYPE *samples, ///< Pointer to sample buffer. + uint numSamples ///< Number of samples in buffer. Notice + ///< that in case of stereo-sound a single sample + ///< contains data for both channels. + ); + + /// Output samples from beginning of the sample buffer. Copies requested samples to + /// output buffer and removes them from the sample buffer. If there are less than + /// 'numsample' samples in the buffer, returns all that available. + /// + /// \return Number of samples returned. + virtual uint receiveSamples(SAMPLETYPE *output, ///< Buffer where to copy output samples. + uint maxSamples ///< How many samples to receive at max. + ); + + /// Adjusts book-keeping so that given number of samples are removed from beginning of the + /// sample buffer without copying them anywhere. + /// + /// Used to reduce the number of samples in the buffer when accessing the sample buffer directly + /// with 'ptrBegin' function. + virtual uint receiveSamples(uint maxSamples ///< Remove this many samples from the beginning of pipe. + ); + + /// Clears all the samples in the object's output and internal processing + /// buffers. + virtual void clear(); + + /// Changes a setting controlling the processing system behaviour. See the + /// 'SETTING_...' defines for available setting ID's. + /// + /// \return 'true' if the setting was successfully changed + bool setSetting(int settingId, ///< Setting ID number. see SETTING_... defines. + int value ///< New setting value. + ); + + /// Reads a setting controlling the processing system behaviour. See the + /// 'SETTING_...' defines for available setting ID's. + /// + /// \return the setting value. + int getSetting(int settingId ///< Setting ID number, see SETTING_... defines. + ) const; + + /// Returns number of samples currently unprocessed. + virtual uint numUnprocessedSamples() const; + + /// Return number of channels + uint numChannels() const + { + return channels; + } + + /// Other handy functions that are implemented in the ancestor classes (see + /// classes 'FIFOProcessor' and 'FIFOSamplePipe') + /// + /// - receiveSamples() : Use this function to receive 'ready' processed samples from SoundTouch. + /// - numSamples() : Get number of 'ready' samples that can be received with + /// function 'receiveSamples()' + /// - isEmpty() : Returns nonzero if there aren't any 'ready' samples. + /// - clear() : Clears all samples from ready/processing buffers. +}; + +} +#endif diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/TDStretch.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/TDStretch.h new file mode 100644 index 0000000..3ef79c7 --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/TDStretch.h @@ -0,0 +1,279 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// Sampled sound tempo changer/time stretch algorithm. Changes the sound tempo +/// while maintaining the original pitch by using a time domain WSOLA-like method +/// with several performance-increasing tweaks. +/// +/// Note : MMX/SSE optimized functions reside in separate, platform-specific files +/// 'mmx_optimized.cpp' and 'sse_optimized.cpp' +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef TDStretch_H +#define TDStretch_H + +#include +#include "STTypes.h" +#include "RateTransposer.h" +#include "FIFOSamplePipe.h" + +namespace soundtouch +{ + +/// Default values for sound processing parameters: +/// Notice that the default parameters are tuned for contemporary popular music +/// processing. For speech processing applications these parameters suit better: +/// #define DEFAULT_SEQUENCE_MS 40 +/// #define DEFAULT_SEEKWINDOW_MS 15 +/// #define DEFAULT_OVERLAP_MS 8 +/// + +/// Default length of a single processing sequence, in milliseconds. This determines to how +/// long sequences the original sound is chopped in the time-stretch algorithm. +/// +/// The larger this value is, the lesser sequences are used in processing. In principle +/// a bigger value sounds better when slowing down tempo, but worse when increasing tempo +/// and vice versa. +/// +/// Increasing this value reduces computational burden & vice versa. +//#define DEFAULT_SEQUENCE_MS 40 +#define DEFAULT_SEQUENCE_MS USE_AUTO_SEQUENCE_LEN + +/// Giving this value for the sequence length sets automatic parameter value +/// according to tempo setting (recommended) +#define USE_AUTO_SEQUENCE_LEN 0 + +/// Seeking window default length in milliseconds for algorithm that finds the best possible +/// overlapping location. This determines from how wide window the algorithm may look for an +/// optimal joining location when mixing the sound sequences back together. +/// +/// The bigger this window setting is, the higher the possibility to find a better mixing +/// position will become, but at the same time large values may cause a "drifting" artifact +/// because consequent sequences will be taken at more uneven intervals. +/// +/// If there's a disturbing artifact that sounds as if a constant frequency was drifting +/// around, try reducing this setting. +/// +/// Increasing this value increases computational burden & vice versa. +//#define DEFAULT_SEEKWINDOW_MS 15 +#define DEFAULT_SEEKWINDOW_MS USE_AUTO_SEEKWINDOW_LEN + +/// Giving this value for the seek window length sets automatic parameter value +/// according to tempo setting (recommended) +#define USE_AUTO_SEEKWINDOW_LEN 0 + +/// Overlap length in milliseconds. When the chopped sound sequences are mixed back together, +/// to form a continuous sound stream, this parameter defines over how long period the two +/// consecutive sequences are let to overlap each other. +/// +/// This shouldn't be that critical parameter. If you reduce the DEFAULT_SEQUENCE_MS setting +/// by a large amount, you might wish to try a smaller value on this. +/// +/// Increasing this value increases computational burden & vice versa. +#define DEFAULT_OVERLAP_MS 8 + + +/// Class that does the time-stretch (tempo change) effect for the processed +/// sound. +class TDStretch : public FIFOProcessor +{ +protected: + int channels; + int sampleReq; + + int overlapLength; + int seekLength; + int seekWindowLength; + int overlapDividerBitsNorm; + int overlapDividerBitsPure; + int slopingDivider; + int sampleRate; + int sequenceMs; + int seekWindowMs; + int overlapMs; + + unsigned long maxnorm; + float maxnormf; + + double tempo; + double nominalSkip; + double skipFract; + + bool bQuickSeek; + bool bAutoSeqSetting; + bool bAutoSeekSetting; + bool isBeginning; + + SAMPLETYPE *pMidBuffer; + SAMPLETYPE *pMidBufferUnaligned; + + FIFOSampleBuffer outputBuffer; + FIFOSampleBuffer inputBuffer; + + void acceptNewOverlapLength(int newOverlapLength); + + virtual void clearCrossCorrState(); + void calculateOverlapLength(int overlapMs); + + virtual double calcCrossCorr(const SAMPLETYPE *mixingPos, const SAMPLETYPE *compare, double &norm); + virtual double calcCrossCorrAccumulate(const SAMPLETYPE *mixingPos, const SAMPLETYPE *compare, double &norm); + + virtual int seekBestOverlapPositionFull(const SAMPLETYPE *refPos); + virtual int seekBestOverlapPositionQuick(const SAMPLETYPE *refPos); + virtual int seekBestOverlapPosition(const SAMPLETYPE *refPos); + + virtual void overlapStereo(SAMPLETYPE *output, const SAMPLETYPE *input) const; + virtual void overlapMono(SAMPLETYPE *output, const SAMPLETYPE *input) const; + virtual void overlapMulti(SAMPLETYPE *output, const SAMPLETYPE *input) const; + + void clearMidBuffer(); + void overlap(SAMPLETYPE *output, const SAMPLETYPE *input, uint ovlPos) const; + + void calcSeqParameters(); + void adaptNormalizer(); + + /// Changes the tempo of the given sound samples. + /// Returns amount of samples returned in the "output" buffer. + /// The maximum amount of samples that can be returned at a time is set by + /// the 'set_returnBuffer_size' function. + void processSamples(); + +public: + TDStretch(); + virtual ~TDStretch(); + + /// Operator 'new' is overloaded so that it automatically creates a suitable instance + /// depending on if we've a MMX/SSE/etc-capable CPU available or not. + static void *operator new(size_t s); + + /// Use this function instead of "new" operator to create a new instance of this class. + /// This function automatically chooses a correct feature set depending on if the CPU + /// supports MMX/SSE/etc extensions. + static TDStretch *newInstance(); + + /// Returns the output buffer object + FIFOSamplePipe *getOutput() { return &outputBuffer; }; + + /// Returns the input buffer object + FIFOSamplePipe *getInput() { return &inputBuffer; }; + + /// Sets new target tempo. Normal tempo = 'SCALE', smaller values represent slower + /// tempo, larger faster tempo. + void setTempo(double newTempo); + + /// Returns nonzero if there aren't any samples available for outputting. + virtual void clear(); + + /// Clears the input buffer + void clearInput(); + + /// Sets the number of channels, 1 = mono, 2 = stereo + void setChannels(int numChannels); + + /// Enables/disables the quick position seeking algorithm. Zero to disable, + /// nonzero to enable + void enableQuickSeek(bool enable); + + /// Returns nonzero if the quick seeking algorithm is enabled. + bool isQuickSeekEnabled() const; + + /// Sets routine control parameters. These control are certain time constants + /// defining how the sound is stretched to the desired duration. + // + /// 'sampleRate' = sample rate of the sound + /// 'sequenceMS' = one processing sequence length in milliseconds + /// 'seekwindowMS' = seeking window length for scanning the best overlapping + /// position + /// 'overlapMS' = overlapping length + void setParameters(int sampleRate, ///< Samplerate of sound being processed (Hz) + int sequenceMS = -1, ///< Single processing sequence length (ms) + int seekwindowMS = -1, ///< Offset seeking window length (ms) + int overlapMS = -1 ///< Sequence overlapping length (ms) + ); + + /// Get routine control parameters, see setParameters() function. + /// Any of the parameters to this function can be NULL, in such case corresponding parameter + /// value isn't returned. + void getParameters(int *pSampleRate, int *pSequenceMs, int *pSeekWindowMs, int *pOverlapMs) const; + + /// Adds 'numsamples' pcs of samples from the 'samples' memory position into + /// the input of the object. + virtual void putSamples( + const SAMPLETYPE *samples, ///< Input sample data + uint numSamples ///< Number of samples in 'samples' so that one sample + ///< contains both channels if stereo + ); + + /// return nominal input sample requirement for triggering a processing batch + int getInputSampleReq() const + { + return (int)(nominalSkip + 0.5); + } + + /// return nominal output sample amount when running a processing batch + int getOutputBatchSize() const + { + return seekWindowLength - overlapLength; + } + + /// return approximate initial input-output latency + int getLatency() const + { + return sampleReq; + } +}; + + +// Implementation-specific class declarations: + +#ifdef SOUNDTOUCH_ALLOW_MMX + /// Class that implements MMX optimized routines for 16bit integer samples type. + class TDStretchMMX : public TDStretch + { + protected: + double calcCrossCorr(const short *mixingPos, const short *compare, double &norm); + double calcCrossCorrAccumulate(const short *mixingPos, const short *compare, double &norm); + virtual void overlapStereo(short *output, const short *input) const; + virtual void clearCrossCorrState(); + }; +#endif /// SOUNDTOUCH_ALLOW_MMX + + +#ifdef SOUNDTOUCH_ALLOW_SSE + /// Class that implements SSE optimized routines for floating point samples type. + class TDStretchSSE : public TDStretch + { + protected: + double calcCrossCorr(const float *mixingPos, const float *compare, double &norm); + double calcCrossCorrAccumulate(const float *mixingPos, const float *compare, double &norm); + }; + +#endif /// SOUNDTOUCH_ALLOW_SSE + +} +#endif /// TDStretch_H diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/cpu_detect.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/cpu_detect.h new file mode 100644 index 0000000..093b609 --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/cpu_detect.h @@ -0,0 +1,55 @@ +//////////////////////////////////////////////////////////////////////////////// +/// +/// A header file for detecting the Intel MMX instructions set extension. +/// +/// Please see 'mmx_win.cpp', 'mmx_cpp.cpp' and 'mmx_non_x86.cpp' for the +/// routine implementations for x86 Windows, x86 gnu version and non-x86 +/// platforms, respectively. +/// +/// Author : Copyright (c) Olli Parviainen +/// Author e-mail : oparviai 'at' iki.fi +/// SoundTouch WWW: http://www.surina.net/soundtouch +/// +//////////////////////////////////////////////////////////////////////////////// +// +// License : +// +// SoundTouch audio processing library +// Copyright (c) Olli Parviainen +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef _CPU_DETECT_H_ +#define _CPU_DETECT_H_ + +#include "STTypes.h" + +#define SUPPORT_MMX 0x0001 +#define SUPPORT_3DNOW 0x0002 +#define SUPPORT_ALTIVEC 0x0004 +#define SUPPORT_SSE 0x0008 +#define SUPPORT_SSE2 0x0010 + +/// Checks which instruction set extensions are supported by the CPU. +/// +/// \return A bitmask of supported extensions, see SUPPORT_... defines. +uint detectCPUextensions(void); + +/// Disables given set of instruction extensions. See SUPPORT_... defines. +void disableExtensions(uint wDisableMask); + +#endif // _CPU_DETECT_H_ diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/soundtouch_config.h b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/soundtouch_config.h new file mode 100644 index 0000000..4919465 --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Headers/soundtouch_config.h @@ -0,0 +1,94 @@ +/* include/soundtouch_config.h. Generated from soundtouch_config.h.in by configure. */ +/* include/soundtouch_config.h.in. Generated from configure.ac by autoheader. */ + +/* Never allow x86 optimizations in iOS simulator build */ +#define ALLOW_X86_OPTIMIZATIONS 0 + +/* Use Integer as Sample type */ +#define SOUNDTOUCH_INTEGER_SAMPLES 1 + +/* Use Float as Sample type */ +//#define FLOAT_SAMPLES 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `m' library (-lm). */ +#define HAVE_LIBM 1 + +/* Define to 1 if your system has a GNU libc compatible `malloc' function, and + to 0 otherwise. */ +#define HAVE_MALLOC 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Use Integer as Sample type */ +/* #undef INTEGER_SAMPLES */ + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "soundtouch" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "http://www.surina.net/soundtouch" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "SoundTouch" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "SoundTouch 1.4.0" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "soundtouch" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "1.4.0" + +/* Define as the return type of signal handlers (`int' or `void'). */ +#define RETSIGTYPE void + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Version number of package */ +#define VERSION "1.4.0" + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to `__inline__' or `__inline' if that's what the C compiler + calls it, or to nothing if 'inline' is not supported under any name. */ +#ifndef __cplusplus +/* #undef inline */ +#endif + +/* Define to rpl_malloc if the replacement function should be used. */ +/* #undef malloc */ diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Info.plist b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Info.plist new file mode 100644 index 0000000..b05da92 Binary files /dev/null and b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Info.plist differ diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Modules/module.modulemap b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Modules/module.modulemap new file mode 100644 index 0000000..50ad3a6 --- /dev/null +++ b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/Modules/module.modulemap @@ -0,0 +1,6 @@ +framework module SoundTouch { + umbrella header "SoundTouch.h" + + export * + module * { export * } +} diff --git a/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/SoundTouch b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/SoundTouch new file mode 100644 index 0000000..3523b0b Binary files /dev/null and b/src/ios/SoundTouch.xcframework/ios-arm64_armv7/SoundTouch.framework/SoundTouch differ diff --git a/src/ios/TXLiteAVSDK_ReplayKitExt.framework/Headers/TXLiteAVSDK_ReplayKitExt.h b/src/ios/TXLiteAVSDK_ReplayKitExt.framework/Headers/TXLiteAVSDK_ReplayKitExt.h new file mode 100644 index 0000000..b040a17 --- /dev/null +++ b/src/ios/TXLiteAVSDK_ReplayKitExt.framework/Headers/TXLiteAVSDK_ReplayKitExt.h @@ -0,0 +1,5 @@ +/* +* Module: TXReplayKitExt @ TXLiteAVSDK +*/ + +#import diff --git a/src/ios/TXLiteAVSDK_ReplayKitExt.framework/Headers/TXReplayKitExt.h b/src/ios/TXLiteAVSDK_ReplayKitExt.framework/Headers/TXReplayKitExt.h new file mode 100644 index 0000000..24da563 --- /dev/null +++ b/src/ios/TXLiteAVSDK_ReplayKitExt.framework/Headers/TXReplayKitExt.h @@ -0,0 +1,90 @@ +/* +* Module: TXReplayKitExt @ TXLiteAVSDK +* +* Function: 腾讯云 ReplayKit 录屏功能在Extension中的主要接口类 +* +* Version: 9.5.11234 +*/ + +/// @defgroup TXReplayKitExt_ios TXReplayKitExt +/// 腾讯云 ReplayKit 录屏功能在Extension中的主要接口类 +/// @{ + +#import +#import +#import + +NS_ASSUME_NONNULL_BEGIN + +typedef NS_ENUM(NSUInteger, TXReplayKitExtReason) { + /// 主进程请求结束 + TXReplayKitExtReasonRequestedByMain, + /// 链接断开,主进程退出 + TXReplayKitExtReasonDisconnected, + /// 版本号与主进程SDK不符 + TXReplayKitExtReasonVersionMismatch +}; + +@protocol TXReplayKitExtDelegate; + +/// 屏幕分享主入口类 +API_AVAILABLE(ios(11.0)) +@interface TXReplayKitExt : NSObject + +/// 获取单例 ++ (instancetype)sharedInstance; + +/// 初始化方法 +/// +/// 需要在 RPBroadcastSampleHandler 的实现类中的 broadcastStartedWithSetupInfo 方法中调用 +/// @param appGroup App group ID +/// @param delegate 回调对象 +- (void)setupWithAppGroup:(NSString *)appGroup delegate:(id)delegate; + +/// 录屏暂停方法 +/// +/// 通过系统控制中心停止录屏时,会回调 RPBroadcastSampleHandler.broadcastPaused,在 broadcastPaused 方法中调用 +- (void)broadcastPaused; + +/// 录屏恢复方法 +/// +/// 通过系统控制中心停止录屏时,会回调 RPBroadcastSampleHandler.broadcastResumed,在 broadcastResumed 方法中调用 +- (void)broadcastResumed; + +/// 录屏完成方法 +/// +/// 通过系统控制中心停止录屏时,会回调 RPBroadcastSampleHandler.broadcastFinished,在 broadcastFinished 方法中调用 +- (void)broadcastFinished; + +/// 媒体数据(音视频)发送方法 +/// +/// 需要在 RPBroadcastSampleHandler 的实现类中的 processSampleBuffer: 方法中调用 +/// +/// @param sampleBuffer 系统回调的视频或音频帧 +/// @param sampleBufferType 媒体输入类型 +/// @note +/// - sampleBufferType 当前支持 RPSampleBufferTypeVideo 和 RPSampleBufferTypeAudioApp 类型的数据帧处理。 +/// - RPSampleBufferTypeAudioMic 不支持,请在主 app 处理麦克风采集数据 +- (void)sendSampleBuffer:(CMSampleBufferRef)sampleBuffer withType:(RPSampleBufferType)sampleBufferType; + +/// 视频发送方法 +/// 已废弃,请使用 - (void)sendSampleBuffer:(CMSampleBufferRef)sampleBuffer withType:(RPSampleBufferType)sampleBufferType; 代替 +/// 需要在 RPBroadcastSampleHandler 的实现类中的 processSampleBuffer: 方法中调用 +/// +/// @param sampleBuffer 系统回调的视频帧 +- (void)sendVideoSampleBuffer:(CMSampleBufferRef)sampleBuffer __attribute__((deprecated("use sendSampleBuffer:withType instead"))); + +@end + +@protocol TXReplayKitExtDelegate + +/// 录屏完成回调 +/// +/// @param broadcast 发出回调的实例 +/// @param reason 结束原因代码, 参见 TXReplayKitExtReason +- (void)broadcastFinished:(TXReplayKitExt *)broadcast reason:(TXReplayKitExtReason)reason; + +@end + +NS_ASSUME_NONNULL_END +/// @} \ No newline at end of file diff --git a/src/ios/TXLiteAVSDK_ReplayKitExt.framework/Info.plist b/src/ios/TXLiteAVSDK_ReplayKitExt.framework/Info.plist new file mode 100644 index 0000000..7e1a7a2 Binary files /dev/null and b/src/ios/TXLiteAVSDK_ReplayKitExt.framework/Info.plist differ diff --git a/src/ios/TXLiteAVSDK_ReplayKitExt.framework/Modules/module.modulemap b/src/ios/TXLiteAVSDK_ReplayKitExt.framework/Modules/module.modulemap new file mode 100644 index 0000000..571dc84 --- /dev/null +++ b/src/ios/TXLiteAVSDK_ReplayKitExt.framework/Modules/module.modulemap @@ -0,0 +1,4 @@ +framework module TXLiteAVSDK_ReplayKitExt { + umbrella header "TXLiteAVSDK_ReplayKitExt.h" + export * +} diff --git a/src/ios/TXLiteAVSDK_ReplayKitExt.framework/TXLiteAVSDK_ReplayKitExt b/src/ios/TXLiteAVSDK_ReplayKitExt.framework/TXLiteAVSDK_ReplayKitExt new file mode 100644 index 0000000..799cb02 Binary files /dev/null and b/src/ios/TXLiteAVSDK_ReplayKitExt.framework/TXLiteAVSDK_ReplayKitExt differ diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/ITRTCAudioPacketListener.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/ITRTCAudioPacketListener.h new file mode 100644 index 0000000..f20fec9 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/ITRTCAudioPacketListener.h @@ -0,0 +1,30 @@ +/* +* Module: 网络音频包UDT自定义数据回调 +* +* Function: 给客户回调发送前、接收后的 UDT 自定义数据 +* +*/ +#ifndef LITEAV_ITRTCAUDIOPACKETLISTENER_H +#define LITEAV_ITRTCAUDIOPACKETLISTENER_H + +#include +#include "TXLiteAVBuffer.h" + +namespace liteav { + struct TRTCAudioPacket { + const char *userId; + liteav::TXLiteAVBuffer* extraData; + }; + + class ITRTCAudioPacketListener { + public: + virtual ~ITRTCAudioPacketListener() {} + /*网络层接收到音频数据包*/ + virtual bool onRecvAudioPacket(TRTCAudioPacket &data) { return false; } + /*网络层即将发送的音频数据包*/ + virtual bool onSendAudioPacket(TRTCAudioPacket &data) { return false; } + }; +} + + +#endif //LITEAV_ITRTCAUDIOPACKETLISTENER_H diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloud.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloud.h new file mode 100644 index 0000000..bfe7a0d --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloud.h @@ -0,0 +1,2175 @@ +/** + * Module: TRTCCloud @ TXLiteAVSDK + * Function: TRTC's main feature API + * Version: <:Version:> + */ +#import +#import +#import "TRTCCloudDelegate.h" +#import "TRTCCloudDef.h" +#import "TXBeautyManager.h" +#import "TXAudioEffectManager.h" +#import "TXDeviceManager.h" + +/// @defgroup TRTCCloud_ios TRTCCloud +/// Tencent Cloud TRTC Core Function Interface +/// @{ +@interface TRTCCloud : NSObject + ++ (instancetype)new __attribute__((unavailable("Use +sharedInstance instead"))); +- (instancetype)init __attribute__((unavailable("Use +sharedInstance instead"))); + +///////////////////////////////////////////////////////////////////////////////// +// +// Create Instance And Event Callback +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Create Instance And Event Callback +/// @{ + +/** + * 1.1 Create `TRTCCloud` instance (singleton mode) + */ ++ (instancetype)sharedInstance; + +/** + * 1.2 Terminate `TRTCCloud` instance (singleton mode) + */ ++ (void)destroySharedIntance; + +/** + * 1.3 Set TRTC event callback + * + * You can use {@link TRTCCloudDelegate} to get various event notifications from the SDK, such as error codes, warning codes, and audio/video status parameters. + */ +@property(nonatomic, weak) id delegate; + +/** + * 1.4 Set the queue that drives the `TRTCCloudDelegate` event callback + * + * If you do not specify a `delegateQueue`, the SDK will use `MainQueue` as the queue for driving {@link TRTCCloudDelegate} event callbacks by default. + * In other words, if you do not set the `delegateQueue` attribute, all callback functions in {@link TRTCCloudDelegate} will be driven by `MainQueue`. + * @note If you specify a `delegateQueue`, please do not manipulate the UI in the {@link TRTCCloudDelegate} callback function; otherwise, thread safety issues will occur. + */ +@property(nonatomic, strong) dispatch_queue_t delegateQueue; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Room APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Room APIs +/// @{ + +/** + * 2.1 Enter room + * + * All TRTC users need to enter a room before they can "publish" or "subscribe to" audio/video streams. "Publishing" refers to pushing their own streams to the cloud, and "subscribing to" refers to pulling the streams of other users in the room from + * the cloud. When calling this API, you need to specify your application scenario ({@link TRTCAppScene}) to get the best audio/video transfer experience. We provide the following four scenarios for your choice: + * - {@link TRTCAppSceneVideoCall}: + * Video call scenario. Use cases: [one-to-one video call], [video conferencing with up to 300 participants], [online medical diagnosis], [small class], [video interview], etc. + * In this scenario, each room supports up to 300 concurrent online users, and up to 50 of them can speak simultaneously. + * - {@link TRTCAppSceneAudioCall}: + * Audio call scenario. Use cases: [one-to-one audio call], [audio conferencing with up to 300 participants], [audio chat], [online Werewolf], etc. + * In this scenario, each room supports up to 300 concurrent online users, and up to 50 of them can speak simultaneously. + * - {@link TRTCAppSceneLIVE}: + * Live streaming scenario. Use cases: [low-latency video live streaming], [interactive classroom for up to 100,000 participants], [live video competition], [video dating room], [remote training], [large-scale conferencing], etc. + * In this scenario, each room supports up to 100,000 concurrent online users, but you should specify the user roles: anchor ({@link TRTCRoleAnchor }) or audience ({@link TRTCRoleAudience }). + * - {@link TRTCAppSceneVoiceChatRoom}: + * Audio chat room scenario. Use cases: [Clubhouse], [online karaoke room], [music live room], [FM radio], etc. + * In this scenario, each room supports up to 100,000 concurrent online users, but you should specify the user roles: anchor ({@link TRTCRoleAnchor }) or audience ({@link TRTCRoleAudience }). + * After calling this API, you will receive the `onEnterRoom(result)` callback from {@link TRTCCloudDelegate}: + * - If room entry succeeded, the `result` parameter will be a positive number (`result` > 0), indicating the time in milliseconds (ms) between function call and room entry. + * - If room entry failed, the `result` parameter will be a negative number (`result` < 0), indicating the [error code](https://cloud.tencent.com/document/product/647/32257) for room entry failure. + * @param param Room entry parameter, which is used to specify the user's identity, role, authentication credentials, and other information. For more information, please see {@link TRTCParams}. + * @param scene Application scenario, which is used to specify the use case. The same {@link TRTCAppScene} should be configured for all users in the same room. + * @note + * 1. If `scene` is specified as {@link TRTCAppSceneLIVE} or {@link TRTCAppSceneVoiceChatRoom}, you must use the `role` field in {@link TRTCParams} to specify the role of the current user in the room. + * 2. The same `scene` should be configured for all users in the same room. + * 3. Please try to ensure that {@link enterRoom} and {@link exitRoom} are used in pair; that is, please make sure that "the previous room is exited before the next room is entered"; otherwise, many issues may occur. + */ +- (void)enterRoom:(TRTCParams *)param appScene:(TRTCAppScene)scene; + +/** + * 2.2 Exit room + * + * Calling this API will allow the user to leave the current audio or video room and release the camera, mic, speaker, and other device resources. + * After resources are released, the SDK will use the `onExitRoom()` callback in {@link TRTCCloudDelegate} to notify you. + * If you need to call {@link enterRoom} again or switch to the SDK of another provider, we recommend you wait until you receive the `onExitRoom()` callback, so as to avoid the problem of the camera or mic being occupied. + */ +- (void)exitRoom; + +/** + * 2.3 Switch role + * + * This API is used to switch the user role between "anchor" and "audience". + * As video live rooms and audio chat rooms need to support an audience of up to 100,000 concurrent online users, the rule "only anchors can publish their audio/video streams" has been set. Therefore, when some users want to publish their streams (so + * that they can interact with anchors), they need to switch their role to "anchor" first. You can use the `role` field in {@link TRTCParams} during room entry to specify the user role in advance or use the `switchRole` API to switch roles after room + * entry. + * @param role Role, which is "anchor" by default: + * - {@link TRTCRoleAnchor}: anchor, who can publish their audio/video streams. Up to 50 anchors are allowed to publish streams at the same time in one room. + * - {@link TRTCRoleAudience}: audience, who cannot publish their audio/video streams, but can only watch streams of anchors in the room. If they want to publish their streams, they need to switch to the "anchor" role first through {@link + * switchRole}. One room supports an audience of up to 100,000 concurrent online users. + * @note + * 1. This API is only applicable to two scenarios: live streaming ({@link TRTCAppSceneLIVE}) and audio chat room ({@link TRTCAppSceneVoiceChatRoom}). + * 2. If the `scene` you specify in {@link enterRoom} is {@link TRTCAppSceneVideoCall} or {@link TRTCAppSceneAudioCall}, please do not call this API. + */ +- (void)switchRole:(TRTCRoleType)role; + +/** + * 2.4 Switch room + * + * This API is used to quickly switch a user from one room to another. + * - If the user's role is "audience", calling this API is equivalent to `exitRoom` (current room) + `enterRoom` (new room). + * - If the user's role is "anchor", the API will retain the current audio/video publishing status while switching the room; therefore, during the room switch, camera preview and sound capturing will not be interrupted. + * This API is suitable for the online education scenario where the supervising teacher can perform fast room switch across multiple rooms. In this scenario, using `switchRoom` can get better smoothness and use less code than `exitRoom + enterRoom`. + * The API call result will be called back through `onSwitchRoom(errCode, errMsg)` in {@link TRTCCloudDelegate}. + * @param config Room parameter. For more information, please see {@link TRTCSwitchRoomConfig}. + * @note Due to the requirement for compatibility with legacy versions of the SDK, the `config` parameter contains both `roomId` and `strRoomId` parameters. You should pay special attention as detailed below when specifying these two parameters: + * 1. If you decide to use `strRoomId`, then set `roomId` to 0. If both are specified, `roomId` will be used. + * 2. All rooms need to use either `strRoomId` or `roomId` at the same time. They cannot be mixed; otherwise, there will be many unexpected bugs. + */ +- (void)switchRoom:(TRTCSwitchRoomConfig *)config; + +/** + * 2.5 Request cross-room call + * + * By default, only users in the same room can make audio/video calls with each other, and the audio/video streams in different rooms are isolated from each other. + * However, you can publish the audio/video streams of an anchor in another room to the current room by calling this API. At the same time, this API will also publish the local audio/video streams to the target anchor's room. + * In other words, you can use this API to share the audio/video streams of two anchors in two different rooms, so that the audience in each room can watch the streams of these two anchors. This feature can be used to implement anchor competition. + * The result of requesting cross-room call will be returned through the `onConnectOtherRoom()` callback in {@link TRTCCloudDelegate}. + * For example, after anchor A in room "101" uses `connectOtherRoom()` to successfully call anchor B in room "102": + * - All users in room "101" will receive the `onRemoteUserEnterRoom(B)` and `onUserVideoAvailable(B,YES)` event callbacks of anchor B; that is, all users in room "101" can subscribe to the audio/video streams of anchor B. + * - All users in room "102" will receive the `onRemoteUserEnterRoom(A)` and `onUserVideoAvailable(A,YES)` event callbacks of anchor A; that is, all users in room "102" can subscribe to the audio/video streams of anchor A. + *
+ *                                   Room 101                          Room 102
+ *                             ---------------------               ---------------------
+ *  Before cross-room call:   | Anchor:     A       |             | Anchor:     B       |
+ *                            | Users :   U, V, W   |             | Users:   X, Y, Z    |
+ *                             ---------------------               ---------------------
+ *
+ *                                   Room 101                           Room 102
+ *                             ---------------------               ---------------------
+ *  After cross-room call:    | Anchors: A and B    |             | Anchors: B and A    |
+ *                            | Users  : U, V, W    |             | Users  : X, Y, Z    |
+ *                             ---------------------               ---------------------
+ * 
+ * For compatibility with subsequent extended fields for cross-room call, parameters in JSON format are used currently. + * Case 1: numeric room ID + * If anchor A in room "101" wants to co-anchor with anchor B in room "102", then anchor A needs to pass in {"roomId": 102, "userId": "userB"} when calling this API. + * Below is the sample code: + *
+ *   NSMutableDictionaryjsonDict = [[NSMutableDictionary alloc] init];
+ *   [jsonDict setObject:@(102) forKey:@"roomId"];
+ *   [jsonDict setObject:@"userB" forKey:@"userId"];
+ *   NSData* jsonData = [NSJSONSerialization dataWithJSONObject:jsonDict options:NSJSONWritingPrettyPrinted error:nil];
+ *   NSString* jsonString = [[NSString alloc] initWithData:jsonData encoding:NSUTF8StringEncoding];
+ *   [trtc connectOtherRoom:jsonString];
+ * 
+ * + * Case 2: string room ID + * If you use a string room ID, please be sure to replace the `roomId` in JSON with `strRoomId`, such as {"strRoomId": "102", "userId": "userB"} + * Below is the sample code: + *
+ *   NSMutableDictionaryjsonDict = [[NSMutableDictionary alloc] init];
+ *   [jsonDict setObject:@"102" forKey:@"strRoomId"];
+ *   [jsonDict setObject:@"userB" forKey:@"userId"];
+ *   NSData* jsonData = [NSJSONSerialization dataWithJSONObject:jsonDict options:NSJSONWritingPrettyPrinted error:nil];
+ *   NSString* jsonString = [[NSString alloc] initWithData:jsonData encoding:NSUTF8StringEncoding];
+ *   [trtc connectOtherRoom:jsonString];
+ * 
+ * + * @param param You need to pass in a string parameter in JSON format: `roomId` represents the room ID in numeric format, `strRoomId` represents the room ID in string format, and `userId` represents the user ID of the target anchor. + */ +- (void)connectOtherRoom:(NSString *)param; + +/** + * 2.6 Exit cross-room call + * + * The result will be returned through the `onDisconnectOtherRoom()` callback in {@link TRTCCloudDelegate}. + */ +- (void)disconnectOtherRoom; + +/** + * 2.7 Set subscription mode (which must be set before room entry for it to take effect) + * + * You can switch between the "automatic subscription" and "manual subscription" modes through this API: + * - Automatic subscription: this is the default mode, where the user will immediately receive the audio/video streams in the room after room entry, so that the audio will be automatically played back, and the video will be automatically decoded (you + * still need to bind the rendering control through the `startRemoteView` API). + * - Manual subscription: after room entry, the user needs to manually call the {@startRemoteView} API to start subscribing to and decoding the video stream and call the `{@muteRemoteAudio} (NO)` API to start playing back the audio stream. + * In most scenarios, users will subscribe to the audio/video streams of all anchors in the room after room entry. Therefore, TRTC adopts the automatic subscription mode by default in order to achieve the best "instant streaming experience". + * In your application scenario, if there are many audio/video streams being published at the same time in each room, and each user only wants to subscribe to 1–2 streams of them, we recommend you use the "manual subscription" mode to reduce the + * traffic costs. + * @param autoRecvAudio YES: automatic subscription to audio; NO: manual subscription to audio by calling `muteRemoteAudio(NO)`. Default value: YES + * @param autoRecvVideo YES: automatic subscription to video; NO: manual subscription to video by calling `startRemoteView`. Default value: YES + * @note + * 1. The configuration takes effect only if this API is called before room entry (enterRoom). + * 2. In the automatic subscription mode, if the user does not call {@startRemoteView} to subscribe to the video stream after room entry, the SDK will automatically stop subscribing to the video stream in order to reduce the traffic consumption. + */ +- (void)setDefaultStreamRecvMode:(BOOL)autoRecvAudio video:(BOOL)autoRecvVideo; + +/** + * 2.8 Create room subinstance (for concurrent multi-room listen/watch) + * + * `TRTCCloud` was originally designed to work in the singleton mode, which limited the ability to watch concurrently in multiple rooms. + * By calling this API, you can create multiple `TRTCCloud` instances, so that you can enter multiple different rooms at the same time to listen/watch audio/video streams. + * However, it should be noted that because there are still only one camera and one mic available, you can exist as an "anchor" in only one `TRTCCloud` instance at any time; that is, you can only publish your audio/video streams in one `TRTCCloud` + * instance at any time. This feature is mainly used in the "super small class" use case in the online education scenario to break the limit that "only up to 50 users can publish their audio/video streams simultaneously in one TRTC room". Below is + * the sample code:
 TRTCCloud *mainCloud = [TRTCCloud sharedInstance]; [mainCloud enterRoom:params1 appScene:TRTCAppSceneLIVE)];
+ *     //...
+ *     //Switch the role from "anchor" to "audience" in your own room
+ *     [mainCloud switchRole:TRTCRoleAudience];
+ *     [mainCloud muteLocalVideo:YES];
+ *     [mainCloud muteLocalAudio:YES];
+ *     //...
+ *     //Use subcloud to enter another room and switch the role from "audience" to "anchor"
+ *     TRTCCloud *subCloud = [mainCloud createSubCloud];
+ *     [subCloud enterRoom:params2 appScene:TRTCAppSceneLIVE)];
+ *     [subCloud switchRole:TRTCRoleAnchor];
+ *     [subCloud muteLocalVideo:NO];
+ *     [subCloud muteLocalAudio:NO];
+ *     //...
+ *     //Exit from new room and release it.
+ *     [subCloud exitRoom];
+ *     [mainCloud destroySubCloud:subCloud];
+ * 
+ * + * @note + * - The same user can enter multiple rooms with different `roomId` values by using the same `userId`. + * - Two devices cannot use the same `userId` to enter the same room with a specified `roomId`. + * - The same user can push a stream in only one `TRTCCloud` instance at any time. If streams are pushed simultaneously in different rooms, a status mess will be caused in the cloud, leading to various bugs. + * - The `TRTCCloud` instance created by the `createSubCloud` API cannot call APIs related to the local audio/video in the subinstance, except `switchRole`, `muteLocalVideo`, and `muteLocalAudio`. To use APIs such as the beauty filter, please use the + * original `TRTCCloud` instance object. + * @return `TRTCCloud` subinstance + */ +- (TRTCCloud *)createSubCloud; + +/** + * 2.9 Terminate room subinstance + * + * @param subCloud + */ +- (void)destroySubCloud:(TRTCCloud *)subCloud; + +///////////////////////////////////////////////////////////////////////////////// +// +// CDN APIs +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 3.1 Start publishing audio/video streams to Tencent Cloud CSS CDN + * + * This API sends a command to the TRTC server, requesting it to relay the current user's audio/video streams to CSS CDN. + * You can set the `StreamId` of the live stream through the `streamId` parameter, so as to specify the playback address of the user's audio/video streams on CSS CDN. + * For example, if you specify the current user's live stream ID as `user_stream_001` through this API, then the corresponding CDN playback address is: + * "http://yourdomain/live/user_stream_001.flv", where `yourdomain` is your playback domain name with an ICP filing. + * You can configure your playback domain name in the [CSS console](https://console.cloud.tencent.com/live). Tencent Cloud does not provide a default playback domain name. + *
+ * TRTCCloud *trtcCloud = [TRTCCloud sharedInstance];
+ * [trtcCloud enterRoom:params appScene:TRTCAppSceneLIVE];
+ * [trtcCloud startLocalPreview:frontCamera view:localView];
+ * [trtcCloud startLocalAudio];
+ * [trtcCloud startPublishing: @"user_stream_001" type:TRTCVideoStreamTypeBig];
+ * 
+ * + * You can also specify the `streamId` when setting the `TRTCParams` parameter of `enterRoom`, which is the recommended approach. + * @param streamId Custom stream ID. + * @param streamType Only `TRTCVideoStreamTypeBig` and `TRTCVideoStreamTypeSub` are supported. + * @note You need to enable the "Enable Relayed Push" option on the "Function Configuration" page in the [TRTC console](https://console.cloud.tencent.com/trtc/) in advance. + * - If you select "Specified stream for relayed push", you can use this API to push the corresponding audio/video stream to Tencent Cloud CDN and specify the entered stream ID. + * - If you select "Global auto-relayed push", you can use this API to adjust the default stream ID. + */ +- (void)startPublishing:(NSString *)streamId type:(TRTCVideoStreamType)streamType; + +/** + * 3.2 Stop publishing audio/video streams to Tencent Cloud CSS CDN + */ +- (void)stopPublishing; + +/** + * 3.3 Start publishing audio/video streams to non-Tencent Cloud CDN + * + * This API is similar to the `startPublishing` API. The difference is that `startPublishing` can only publish audio/video streams to Tencent Cloud CDN, while this API can relay streams to live streaming CDN services of other cloud providers. + * @param param CDN relaying parameter. For more information, please see {@link TRTCPublishCDNParam} + * @note + * - Using the `startPublishing` API to publish audio/video streams to Tencent Cloud CSS CDN does not incur additional fees. + * - Using the `startPublishCDNStream` API to publish audio/video streams to non-Tencent Cloud CDN incurs additional relaying bandwidth fees. + */ +- (void)startPublishCDNStream:(TRTCPublishCDNParam *)param; + +/** + * 3.4 Stop publishing audio/video streams to non-Tencent Cloud CDN + */ +- (void)stopPublishCDNStream; + +/** + * 3.5 Set the layout and transcoding parameters of On-Cloud MixTranscoding + * + * In a live room, there may be multiple anchors publishing their audio/video streams at the same time, but for audience on CSS CDN, they only need to watch one video stream in HTTP-FLV or HLS format. + * When you call this API, the SDK will send a command to the TRTC mixtranscoding server to combine multiple audio/video streams in the room into one stream. + * You can use the {@link TRTCTranscodingConfig} parameter to set the layout of each channel of image. You can also set the encoding parameters of the mixed audio/video streams. + * For more information, please see [On-Cloud MixTranscoding](https://cloud.tencent.com/document/product/647/16827). + *
+ *     **Image 1** => decoding ====> \\
+ *                                    \\
+ *     **Image 2** => decoding => image mixing => encoding => **mixed image**
+ *                                    //
+ *     **Image 3** => decoding ====> //
+ *
+ *     **Audio 1** => decoding ====> \\
+ *                                    \\
+ *     **Audio 2** => decoding => audio mixing => encoding => **mixed audio**
+ *                                    //
+ *     **Audio 3** => decoding ====> //
+ * 
+ * @param config If `config` is not empty, On-Cloud MixTranscoding will be started; otherwise, it will be stopped. For more information, please see {@link TRTCTranscodingConfig}. + * @note Notes on On-Cloud MixTranscoding: + * - Mixed-stream transcoding is a chargeable function, calling the interface will incur cloud-based mixed-stream transcoding fees, see https://intl.cloud.tencent.com/document/product/647/38929. + * - If the user calling this API does not set `streamId` in the `config` parameter, TRTC will mix the multiple channels of images in the room into the audio/video streams corresponding to the current user, i.e., A + B => A. + * - If the user calling this API sets `streamId` in the `config` parameter, TRTC will mix the multiple channels of images in the room into the specified `streamId`, i.e., A + B => streamId. + * - Please note that if you are still in the room but do not need mixtranscoding anymore, be sure to call this API again and leave `config` empty to cancel it; otherwise, additional fees may be incurred. + * - Please rest assured that TRTC will automatically cancel the mixtranscoding status upon room exit. + */ +- (void)setMixTranscodingConfig:(TRTCTranscodingConfig *)config; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Video APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Video APIs +/// @{ + +/** + * 4.1 Enable the preview image of local camera (mobile) + * + * If this API is called before `enterRoom`, the SDK will only enable the camera and wait until `enterRoom` is called before starting push. + * If it is called after `enterRoom`, the SDK will enable the camera and automatically start pushing the video stream. + * When the first camera video frame starts to be rendered, you will receive the `onCameraDidReady` callback in {@link TRTCCloudDelegate}. + * @param frontCamera YES: front camera; NO: rear camera + * @param view Control that carries the video image + * @note If you want to preview the camera image and adjust the beauty filter parameters through `BeautyManager` before going live, you can: + * - Scheme 1. Call `startLocalPreview` before calling `enterRoom` + * - Scheme 2. Call `startLocalPreview` and `muteLocalVideo(YES)` after calling `enterRoom` + */ +#if TARGET_OS_IPHONE +- (void)startLocalPreview:(BOOL)frontCamera view:(TXView *)view; +#endif + +/** + * 4.2 Enable the preview image of local camera (desktop) + * + * Before this API is called, `setCurrentCameraDevice` can be called first to select whether to use the macOS device's built-in camera or an external camera. + * If this API is called before `enterRoom`, the SDK will only enable the camera and wait until `enterRoom` is called before starting push. + * If it is called after `enterRoom`, the SDK will enable the camera and automatically start pushing the video stream. + * When the first camera video frame starts to be rendered, you will receive the `onCameraDidReady` callback in {@link TRTCCloudDelegate}. + * @param view Control that carries the video image + * @note If you want to preview the camera image and adjust the beauty filter parameters through `BeautyManager` before going live, you can: + * - Scheme 1. Call `startLocalPreview` before calling `enterRoom` + * - Scheme 2. Call `startLocalPreview` and `muteLocalVideo(YES)` after calling `enterRoom` + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startLocalPreview:(TXView *)view; +#endif + +/** + * 4.3 Update the preview image of local camera + */ +- (void)updateLocalView:(TXView *)view; + +/** + * 4.4 Stop camera preview + */ +- (void)stopLocalPreview; + +/** + * 4.5 Pause/Resume publishing local video stream + * + * This API can pause (or resume) publishing the local video image. After the pause, other users in the same room will not be able to see the local image. + * This API is equivalent to the two APIs of `startLocalPreview/stopLocalPreview` when TRTCVideoStreamTypeBig is specified, but has higher performance and response speed. + * The `startLocalPreview/stopLocalPreview` APIs need to enable/disable the camera, which are hardware device-related operations, so they are very time-consuming. + * In contrast, `muteLocalVideo` only needs to pause or allow the data stream at the software level, so it is more efficient and more suitable for scenarios where frequent enabling/disabling are needed. + * After local video publishing is paused, other members in the same room will receive the `onUserVideoAvailable(userId, NO)` callback notification. + * After local video publishing is resumed, other members in the same room will receive the `onUserVideoAvailable(userId, YES)` callback notification. + * @param streamType Specify for which video stream to pause (or resume). Only {@link TRTCVideoStreamTypeBig} and {@link TRTCVideoStreamTypeSub} are supported + * @param mute YES: pause; NO: resume + */ +- (void)muteLocalVideo:(TRTCVideoStreamType)streamType mute:(BOOL)mute; + +/** + * 4.6 Set placeholder image during local video pause + * + * When you call `muteLocalVideo(YES)` to pause the local video image, you can set a placeholder image by calling this API. Then, other users in the room will see this image instead of a black screen. + * @param image Placeholder image. A null value means that no more video stream data will be sent after `muteLocalVideo`. The default value is null. + * @param fps Frame rate of the placeholder image. Minimum value: 5. Maximum value: 10. Default value: 5 + */ +- (void)setVideoMuteImage:(TXImage *)image fps:(NSInteger)fps; + +/** + * 4.7 Subscribe to remote user's video stream and bind video rendering control + * + * Calling this API allows the SDK to pull the video stream of the specified `userId` and render it to the rendering control specified by the `view` parameter. You can set the display mode of the video image through {@link setRemoteRenderParams}. + * - If you already know the `userId` of a user who has a video stream in the room, you can directly call `startRemoteView` to subscribe to the user's video image. + * - If you don't know which users in the room are publishing video streams, you can wait for the notification from {@link onUserVideoAvailable} after `enterRoom`. + * + * Calling this API only starts pulling the video stream, and the image needs to be loaded and buffered at this time. After the buffering is completed, you will receive a notification from {@link onFirstVideoFrame}. + * @param userId ID of the specified remote user + * @param streamType Video stream type of the `userId` specified for watching: + * - HD big image: {@link TRTCVideoStreamTypeBig} + * - Smooth small image: {@link TRTCVideoStreamTypeSmall} (the remote user should enable dual-channel encoding through {@link enableEncSmallVideoStream} for this parameter to take effect) + * - Substream image (usually used for screen sharing): {@link TRTCVideoStreamTypeSub} + * + * @param view Rendering control that carries the video image + * @note The following requires your attention: + * 1. The SDK supports watching the big image and substream image or small image and substream image of a `userId` at the same time, but does not support watching the big image and small image at the same time. + * 2. Only when the specified `userId` enables dual-channel encoding through {@link enableEncSmallVideoStream} can the user's small image be viewed. + * 3. If the small image of the specified `userId` does not exist, the SDK will switch to the big image of the user by default. + */ +- (void)startRemoteView:(NSString *)userId streamType:(TRTCVideoStreamType)streamType view:(TXView *)view; + +/** + * 4.8 Update remote user's video rendering control + * + * This API can be used to update the rendering control of the remote video image. It is often used in interactive scenarios where the display area needs to be switched. + * @param view Control that carries the video image + * @param streamType Type of the stream for which to set the preview window (only {@link TRTCVideoStreamTypeBig} and {@link TRTCVideoStreamTypeSub} are supported) + * @param userId ID of the specified remote user + */ +- (void)updateRemoteView:(TXView *)view streamType:(TRTCVideoStreamType)streamType forUser:(NSString *)userId; + +/** + * 4.9 Stop subscribing to remote user's video stream and release rendering control + * + * Calling this API will cause the SDK to stop receiving the user's video stream and release the decoding and rendering resources for the stream. + * @param userId ID of the specified remote user + * @param streamType Video stream type of the `userId` specified for watching: + * - HD big image: {@link TRTCVideoStreamTypeBig} + * - Smooth small image: {@link TRTCVideoStreamTypeSmall} + * - Substream image (usually used for screen sharing): {@link TRTCVideoStreamTypeSub} + */ +- (void)stopRemoteView:(NSString *)userId streamType:(TRTCVideoStreamType)streamType; + +/** + * 4.10 Stop subscribing to all remote users' video streams and release all rendering resources + * + * Calling this API will cause the SDK to stop receiving all remote video streams and release all decoding and rendering resources. + * @note If a substream image (screen sharing) is being displayed, it will also be stopped. + */ +- (void)stopAllRemoteView; + +/** + * 4.11 Pause/Resume subscribing to remote user's video stream + * + * This API only pauses/resumes receiving the specified user's video stream but does not release displaying resources; therefore, the video image will freeze at the last frame before it is called. + * @param userId ID of the specified remote user + * @param streamType Specify for which video stream to pause (or resume). Only {@link TRTCVideoStreamTypeBig} and {@link TRTCVideoStreamTypeSub} are supported + * @param mute Whether to pause receiving + * @note This API can be called before room entry (enterRoom), and the pause status will be reset after room exit (exitRoom). + */ +- (void)muteRemoteVideoStream:(NSString *)userId streamType:(TRTCVideoStreamType)streamType mute:(BOOL)mute; + +/** + * 4.12 Pause/Resume subscribing to all remote users' video streams + * + * This API only pauses/resumes receiving all users' video streams but does not release displaying resources; therefore, the video image will freeze at the last frame before it is called. + * @param mute Whether to pause receiving + * @note This API can be called before room entry (enterRoom), and the pause status will be reset after room exit (exitRoom). + */ +- (void)muteAllRemoteVideoStreams:(BOOL)mute; + +/** + * 4.13 Set the encoding parameters of video encoder + * + * This setting can determine the quality of image viewed by remote users, which is also the image quality of on-cloud recording files. + * @param param It is used to set relevant parameters for the video encoder. For more information, please see {@link TRTCVideoEncParam}. + */ +- (void)setVideoEncoderParam:(TRTCVideoEncParam *)param; + +/** + * 4.14 Set network quality control parameters + * + * This setting determines the quality control policy in a poor network environment, such as "image quality preferred" or "smoothness preferred". + * @param param It is used to set relevant parameters for network quality control. For details, please refer to {@link TRTCNetworkQosParam}. + */ +- (void)setNetworkQosParam:(TRTCNetworkQosParam *)param; + +/** + * 4.15 Set the rendering parameters of local video image + * + * The parameters that can be set include video image rotation angle, fill mode, and mirror mode. + * @param params Video image rendering parameters. For more information, please see {@link TRTCRenderParams}. + */ +- (void)setLocalRenderParams:(TRTCRenderParams *)params; + +/** + * 4.16 Set the rendering mode of remote video image + * + * The parameters that can be set include video image rotation angle, fill mode, and mirror mode. + * @param userId ID of the specified remote user + * @param streamType It can be set to the primary stream image (TRTCVideoStreamTypeBig) or substream image (TRTCVideoStreamTypeSub). + * @param params Video image rendering parameters. For more information, please see {@link TRTCRenderParams}. + */ +- (void)setRemoteRenderParams:(NSString *)userId streamType:(TRTCVideoStreamType)streamType params:(TRTCRenderParams *)params; + +/** + * 4.17 Set the direction of image output by video encoder + * + * This setting does not affect the preview direction of the local video image, but affects the direction of the image viewed by other users in the room (and on-cloud recording files). + * When a phone or tablet is rotated upside down, as the capturing direction of the camera does not change, the video image viewed by other users in the room will become upside-down. + * In this case, you can call this API to rotate the image encoded by the SDK 180 degrees, so that other users in the room can view the image in the normal direction. + * If you want to achieve the aforementioned user-friendly interactive experience, we recommend you directly call {@link setGSensorMode} to implement smarter direction adaptation, with no need to call this API manually. + * @param rotation Currently, rotation angles of 0 and 180 degrees are supported. Default value: TRTCVideoRotation_0 (no rotation) + */ +- (void)setVideoEncoderRotation:(TRTCVideoRotation)rotation; + +/** + * 4.18 Set the mirror mode of image output by encoder + * + * This setting does not affect the mirror mode of the local video image, but affects the mirror mode of the image viewed by other users in the room (and on-cloud recording files). + * @param mirror Whether to enable remote mirror mode. YES: yes; NO: no. Default value: NO + */ +- (void)setVideoEncoderMirror:(BOOL)mirror; + +/** + * 4.19 Set the adaptation mode of G-sensor + * + * You can achieve the following user-friendly interactive experience through this API: + * When a phone or tablet is rotated upside down, as the capturing direction of the camera does not change, the video image viewed by other users in the room will become upside-down. + * In this case, you can call this API to let the SDK automatically adjust the rotation direction of the local video image and the image output by the encoder according to the direction of the device's gyroscope, so that remote viewers can see the + * image in the normal direction. + * @param mode G-sensor mode. For more information, please see {@link TRTCGSensorMode}. Default value: TRTCGSensorMode_UIAutoLayout + */ +- (void)setGSensorMode:(TRTCGSensorMode)mode; + +/** + * 4.20 Enable dual-channel encoding mode with big and small images + * + * In this mode, the current user's encoder will output two channels of video streams, i.e., **HD big image** and **Smooth small image**, at the same time (only one channel of audio stream will be output though). + * In this way, other users in the room can choose to subscribe to the **HD big image** or **Smooth small image** according to their own network conditions or screen size. + * @note Dual-channel encoding will consume more CPU resources and network bandwidth; therefore, this feature can be enabled on macOS, Windows, or high-spec tablets, but is not recommended for phones. + * @param enable Whether to enable small image encoding. Default value: NO + * @param smallVideoEncParam Video parameters of small image stream + * @return 0: success; -1: the current big image has been set to a lower quality, and it is not necessary to enable dual-channel encoding + */ +- (int)enableEncSmallVideoStream:(BOOL)enable withQuality:(TRTCVideoEncParam *)smallVideoEncParam; + +/** + * 4.21 Switch the big/small image of specified remote user + * + * After an anchor in a room enables dual-channel encoding, the video image that other users in the room subscribe to through {@link startRemoteView} will be **HD big image** by default. + * You can use this API to select whether the image subscribed to is the big image or small image. The API can take effect before or after {@link startRemoteView} is called. + * @note To implement this feature, the target user must have enabled the dual-channel encoding mode through {@link enableEncSmallVideoStream}; otherwise, this API will not work. + * @param userId ID of the specified remote user + * @param streamType Video stream type, i.e., big image or small image. Default value: big image + */ +- (void)setRemoteVideoStreamType:(NSString *)userId type:(TRTCVideoStreamType)streamType; + +/** + * 4.22 Screencapture video + * + * You can use this API to screencapture the local video image or the primary stream image and substream (screen sharing) image of a remote user. + * @param userId User ID. A null value indicates to screencapture the local video. + * @param streamType Video stream type, which can be the primary stream image ({@link TRTCVideoStreamTypeBig}, generally for camera) or substream image ({@link TRTCVideoStreamTypeSub}, generally for screen sharing) + * @param sourceType Video image source, which can be the video stream image ({@link TRTCSnapshotSourceTypeStream}, generally in higher definition) or the video rendering image ({@link TRTCSnapshotSourceTypeView}) + * @note On Windows, only video image from the {@link TRTCSnapshotSourceTypeStream} source can be screencaptured currently. + */ +- (void)snapshotVideo:(NSString *)userId type:(TRTCVideoStreamType)streamType sourceType:(TRTCSnapshotSourceType)sourceType completionBlock:(void (^)(TXImage *image))completionBlock; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Audio APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Audio APIs +/// @{ + +/** + * 5.1 Enable local audio capturing and publishing + * + * The SDK does not enable the mic by default. When a user wants to publish the local audio, the user needs to call this API to enable mic capturing and encode and publish the audio to the current room. + * After local audio capturing and publishing is enabled, other users in the room will receive the {@link onUserAudioAvailable}(userId, YES) notification. + * @param quality Sound quality + * - {@link TRTCAudioQualitySpeech} - Smooth: sample rate: 16 kHz; mono channel; audio bitrate: 16 Kbps. This is suitable for audio call scenarios, such as online meeting and audio call. + * - {@link TRTCAudioQualityDefault} - Default: sample rate: 48 kHz; mono channel; audio bitrate: 50 Kbps. This is the default sound quality of the SDK and recommended if there are no special requirements. + * - {@link TRTCAudioQualityMusic} - HD: sample rate: 48 kHz; dual channel + full band; audio bitrate: 128 Kbps. This is suitable for scenarios where Hi-Fi music transfer is required, such as online karaoke and music live streaming. + * @note This API will check the mic permission. If the current application does not have permission to use the mic, the SDK will automatically ask the user to grant the mic permission. + */ +- (void)startLocalAudio:(TRTCAudioQuality)quality; + +/** + * 5.2 Stop local audio capturing and publishing + * + * After local audio capturing and publishing is stopped, other users in the room will receive the {@link onUserAudioAvailable}(userId, NO) notification. + */ +- (void)stopLocalAudio; + +/** + * 5.3 Pause/Resume publishing local audio stream + * + * After local audio publishing is paused, other users in the room will receive the {@link onUserAudioAvailable}(userId, NO) notification. + * After local audio publishing is resumed, other users in the room will receive the {@link onUserAudioAvailable}(userId, YES) notification. + * Different from {@link stopLocalAudio}, `muteLocalAudio(YES)` does not release the mic permission; instead, it continues to send mute packets with extremely low bitrate. + * This is very suitable for scenarios that require on-cloud recording, as video file formats such as MP4 have a high requirement for audio continuity, while an MP4 recording file cannot be played back smoothly if {@link stopLocalAudio} is used. + * Therefore, `muteLocalAudio` instead of `stopLocalAudio` is recommended in scenarios where the requirement for recording file quality is high. + * @param mute YES: mute; NO: unmute + */ +- (void)muteLocalAudio:(BOOL)mute; + +/** + * 5.4 Pause/Resume playing back remote audio stream + * + * When you mute the remote audio of a specified user, the SDK will stop playing back the user's audio and pulling the user's audio data. + * @param userId ID of the specified remote user + * @param mute YES: mute; NO: unmute + * @note This API works when called either before or after room entry (enterRoom), and the mute status will be reset to `NO` after room exit (exitRoom). + */ +- (void)muteRemoteAudio:(NSString *)userId mute:(BOOL)mute; + +/** + * 5.5 Pause/Resume playing back all remote users' audio streams + * + * When you mute the audio of all remote users, the SDK will stop playing back all their audio streams and pulling all their audio data. + * @param mute YES: mute; NO: unmute + * @note This API works when called either before or after room entry (enterRoom), and the mute status will be reset to `NO` after room exit (exitRoom). + */ +- (void)muteAllRemoteAudio:(BOOL)mute; + +/** + * 5.6 Set audio route + * + * Setting "audio route" is to determine whether the sound is played back from the speaker or receiver of a mobile device; therefore, this API is only applicable to mobile devices such as phones. + * Generally, a phone has two speakers: one is the receiver at the top, and the other is the stereo speaker at the bottom. + * If audio route is set to the receiver, the volume is relatively low, and the sound can be heard clearly only when the phone is put near the ear. This mode has a high level of privacy and is suitable for answering calls. + * If audio route is set to the speaker, the volume is relatively high, so there is no need to put the phone near the ear. Therefore, this mode can implement the "hands-free" feature. + * @param route Audio route, i.e., whether the audio is output by speaker or receiver. Default value: TRTCAudioModeSpeakerphone + */ +- (void)setAudioRoute:(TRTCAudioRoute)route; + +/** + * 5.7 Set the audio playback volume of remote user + * + * You can mute the audio of a remote user through `setRemoteAudioVolume(userId, 0)`. + * @param userId ID of the specified remote user + * @param volume Volume. 100 is the original volume. Value range: [0,150]. Default value: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ +- (void)setRemoteAudioVolume:(NSString *)userId volume:(int)volume; + +/** + * 5.8 Set the capturing volume of local audio + * + * @param volume Volume. 100 is the original volume. Value range: [0,150]. Default value: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ +- (void)setAudioCaptureVolume:(NSInteger)volume; + +/** + * 5.9 Get the capturing volume of local audio + */ +- (NSInteger)getAudioCaptureVolume; + +/** + * 5.10 Set the playback volume of remote audio + * + * This API controls the volume of the sound ultimately delivered by the SDK to the system for playback. It affects the volume of the recorded local audio file but not the volume of in-ear monitoring. + * @param volume Volume. 100 is the original volume. Value range: [0,150]. Default value: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ +- (void)setAudioPlayoutVolume:(NSInteger)volume; + +/** + * 5.11 Get the playback volume of remote audio + */ +- (NSInteger)getAudioPlayoutVolume; + +/** + * 5.12 Enable volume reminder + * + * After this feature is enabled, the SDK will return the remote audio volume in the {@link onUserVoiceVolume} callback of {@link TRTCCloudDelegate}. + * @note To enable this feature, call this API before calling `startLocalAudio`. + * @param interval Set the interval in ms for triggering the `onUserVoiceVolume` callback. The minimum interval is 100 ms. If the value is smaller than or equal to 0, the callback will be disabled. We recommend you set this parameter to 300 ms. + */ +- (void)enableAudioVolumeEvaluation:(NSUInteger)interval; + +/** + * 5.13 Start audio recording + * + * After you call this API, the SDK will selectively record local and remote audio streams (such as local audio, remote audio, background music, and sound effects) into a local file. + * This API works when called either before or after room entry. If a recording task has not been stopped through `stopAudioRecording` before room exit, it will be automatically stopped after room exit. + * @param param Recording parameter. For more information, please see {@link TRTCAudioRecordingParams} + * @return 0: success; -1: audio recording has been started; -2: failed to create file or directory; -3: the audio format of the specified file extension is not supported + */ +- (int)startAudioRecording:(TRTCAudioRecordingParams *)param; + +/** + * 5.14 Stop audio recording + * + * If a recording task has not been stopped through this API before room exit, it will be automatically stopped after room exit. + */ +- (void)stopAudioRecording; + +/** + * 5.15 Start local media recording + * + * This API records the audio/video content during live streaming into a local file. + * @param params Recording parameter. For more information, please see {@link TRTCLocalRecordingParams} + */ +- (void)startLocalRecording:(TRTCLocalRecordingParams *)params; + +/** + * 5.16 Stop local media recording + * + * If a recording task has not been stopped through this API before room exit, it will be automatically stopped after room exit. + */ +- (void)stopLocalRecording; + +/** + * 5.18 Set the parallel strategy of remote audio streams + * + * For room with many speakers. + * @param params Audio parallel parameter. For more information, please see {@link TRTCAudioParallelParams} + */ +- (void)setRemoteAudioParallelParams:(TRTCAudioParallelParams *)params; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Device management APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Device management APIs +/// @{ + +/** + * 6.1 Get device management class (TXDeviceManager) + */ +- (TXDeviceManager *)getDeviceManager; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Beauty filter and watermark APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Beauty filter and watermark APIs +/// @{ + +/** + * 7.1 Get beauty filter management class (TXBeautyManager) + * + * You can use the following features with beauty filter management: + * - Set beauty effects such as "skin smoothing", "brightening", and "rosy skin". + * - Set face adjustment effects such as "eye enlarging", "face slimming", "chin slimming", "chin lengthening/shortening", "face shortening", "nose narrowing", "eye brightening", "teeth whitening", "eye bag removal", "wrinkle removal", and "smile + * line removal". + * - Set face adjustment effects such as "hairline", "eye distance", "eye corners", "mouth shape", "nose wing", "nose position", "lip thickness", and "face shape". + * - Set makeup effects such as "eye shadow" and "blush". + * - Set animated effects such as animated sticker and facial pendant. + */ +- (TXBeautyManager *)getBeautyManager; + +/** + * 7.2 Add watermark + * + * The watermark position is determined by the `rect` parameter, which is a quadruple in the format of (x, y, width, height). + * - x: X coordinate of watermark, which is a floating-point number between 0 and 1. + * - y: Y coordinate of watermark, which is a floating-point number between 0 and 1. + * - width: width of watermark, which is a floating-point number between 0 and 1. + * - height: it does not need to be set. The SDK will automatically calculate it according to the watermark image's aspect ratio. + * + * Sample parameter: + * If the encoding resolution of the current video is 540x960, and the `rect` parameter is set to (0.1, 0.1, 0.2, 0.0), + * then the coordinates of the top-left point of the watermark will be (540 * 0.1, 960 * 0.1), i.e., (54, 96), the watermark width will be 540 * 0.2 = 108 px, and the watermark height will be calculated automatically by the SDK based on the watermark + * image's aspect ratio. + * + * @param image Watermark image, **which must be a PNG image with transparent background** + * @param streamType Specify for which image to set the watermark. For more information, please see {@link TRTCVideoStreamType}. + * @param rect Unified coordinates of the watermark relative to the encoded resolution. Value range of `x`, `y`, `width`, and `height`: 0–1. + * @note If you want to set watermarks for both the primary image (generally for the camera) and the substream image (generally for screen sharing), you need to call this API twice with `streamType` set to different values. + */ +- (void)setWatermark:(TXImage *)image streamType:(TRTCVideoStreamType)streamType rect:(CGRect)rect; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Background music and sound effect APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Background music and sound effect APIs +/// @{ + +/** + * 8.1 Get sound effect management class (TXAudioEffectManager) + * + * `TXAudioEffectManager` is a sound effect management API, through which you can implement the following features: + * - Background music: both online music and local music can be played back with various features such as speed adjustment, pitch adjustment, original voice, accompaniment, and loop. + * - In-ear monitoring: the sound captured by the mic is played back in the headphones in real time, which is generally used for music live streaming. + * - Reverb effect: karaoke room, small room, big hall, deep, resonant, and other effects. + * - Voice changing effect: young girl, middle-aged man, heavy metal, and other effects. + * - Short sound effect: short sound effect files such as applause and laughter are supported (for files less than 10 seconds in length, please set the `isShortFile` parameter to `YES`). + */ +- (TXAudioEffectManager *)getAudioEffectManager; + +/** + * 8.2 Enable system audio capturing (for desktop systems only) + * + * This API captures audio data from the sound card of a macOS computer and mixes it into the current audio data stream of the SDK, so that other users in the room can also hear the sound played back on the current macOS system. + * In use cases such as video teaching or music live streaming, the teacher can use this feature to let the SDK capture the sound in the video played back by the teacher, so that students in the same room can also hear the sound in the video. + * @note + * 1. This feature needs to install a virtual audio device plugin on the user's macOS system. After the installation is completed, the SDK will capture sound from the installed virtual device. + * 2. The SDK will automatically download the appropriate plugin from the internet for installation, but the download may be slow. If you want to speed up this process, you can package the virtual audio plugin file into the `Resources` directory of + * your app bundle. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startSystemAudioLoopback; +#endif + +/** + * 8.3 Stop system audio capturing (for desktop systems only) + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)stopSystemAudioLoopback; +#endif + +/** + * 8.4 Set the volume of system audio capturing + * + * @param volume Set volume. Value range: [0, 150]. Default value: 100 + */ +- (void)setSystemAudioLoopbackVolume:(uint32_t)volume; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Screen sharing APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Screen sharing APIs +/// @{ + +/** + * 9.1 Start in-app screen sharing (for iOS 13.0 and above only) + * + * This API captures the real-time screen content of the current application and shares it with other users in the same room. It is applicable to iOS 13.0 and above. + * If you want to capture the screen content of the entire iOS system (instead of the current application), we recommend you use {@link startScreenCaptureByReplaykit}. + * Video encoding parameters recommended for screen sharing on iPhone ({@link TRTCVideoEncParam}): + * - Resolution (videoResolution): 1280x720 + * - Frame rate (videoFps): 10 fps + * - Bitrate (videoBitrate): 1600 Kbps + * - Resolution adaption (enableAdjustRes): NO + * + * @param streamType Channel used for screen sharing, which can be the primary stream ({@link TRTCVideoStreamTypeBig}) or substream ({@link TRTCVideoStreamTypeSub}). + * @param encParams Video encoding parameters for screen sharing. We recommend you use the above configuration. + * If you set `encParams` to `nil`, the SDK will use the video encoding parameters you set before calling the `startScreenCapture` API. + */ +#if TARGET_OS_IPHONE +- (void)startScreenCaptureInApp:(TRTCVideoStreamType)streamType encParam:(TRTCVideoEncParam *)encParams API_AVAILABLE(ios(13.0)); +#endif + +/** + * 9.1 Start system-level screen sharing (for iOS 11.0 and above only) + * + * This API supports capturing the screen of the entire iOS system, which can implement system-wide screen sharing similar to VooV Meeting. + * However, the integration steps are slightly more complicated than those of {@link startScreenCaptureInApp}. You need to implement a ReplayKit extension module for your application. + * For more information, please see [iOS](https://cloud.tencent.com/document/product/647/45750) + * Video encoding parameters recommended for screen sharing on iPhone ({@link TRTCVideoEncParam}): + * - Resolution (videoResolution): 1280x720 + * - Frame rate (videoFps): 10 fps + * - Bitrate (videoBitrate): 1600 Kbps + * - Resolution adaption (enableAdjustRes): NO + * + * @param streamType Channel used for screen sharing, which can be the primary stream ({@link TRTCVideoStreamTypeBig}) or substream ({@link TRTCVideoStreamTypeSub}). + * @param encParams Video encoding parameters for screen sharing. We recommend you use the above configuration. + * If you set `encParams` to `nil`, the SDK will use the video encoding parameters you set before calling the `startScreenCapture` API. + * @param appGroup Specify the `Application Group Identifier` shared by your application and the screen sharing process. You can specify this parameter as `nil`, but we recommend you set it as instructed in the documentation for higher reliability. + */ +#if TARGET_OS_IPHONE +- (void)startScreenCaptureByReplaykit:(TRTCVideoStreamType)streamType encParam:(TRTCVideoEncParam *)encParams appGroup:(NSString *)appGroup API_AVAILABLE(ios(11.0)); +#endif + +/** + * 9.1 Start desktop screen sharing (for desktop systems only) + * + * This API can capture the screen content of the entire macOS system or a specified application and share it with other users in the same room. + * @param view Parent control of the rendering control, which can be set to a null value, indicating not to display the preview of the shared screen. + * @param streamType Channel used for screen sharing, which can be the primary stream ({@link TRTCVideoStreamTypeBig}) or substream ({@link TRTCVideoStreamTypeSub}). + * @param encParam Image encoding parameters used for screen sharing, which can be set to `nil`, indicating to let the SDK choose the optimal encoding parameters (such as resolution and bitrate). + * + * @note + * 1. A user can publish at most one primary stream ({@link TRTCVideoStreamTypeBig}) and one substream ({@link TRTCVideoStreamTypeSub}) at the same time. + * 2. By default, screen sharing uses the substream image. If you want to use the primary stream for screen sharing, you need to stop camera capturing (through {@link stopLocalPreview}) in advance to avoid conflicts. + * 3. Only one user can use the substream for screen sharing in the same room at any time; that is, only one user is allowed to enable the substream in the same room at any time. + * 4. When there is already a user in the room using the substream for screen sharing, calling this API will return the `onError(ERR_SERVER_CENTER_ANOTHER_USER_PUSH_SUB_VIDEO)` callback from {@link TRTCCloudDelegate}. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startScreenCapture:(NSView *)view streamType:(TRTCVideoStreamType)streamType encParam:(TRTCVideoEncParam *)encParam; +#endif + +/** + * 9.2 Stop screen sharing + */ +- (int)stopScreenCapture API_AVAILABLE(ios(11.0)); + +/** + * 9.3 Pause screen sharing + */ +- (int)pauseScreenCapture API_AVAILABLE(ios(11.0)); + +/** + * 9.4 Resume screen sharing + */ +- (int)resumeScreenCapture API_AVAILABLE(ios(11.0)); + +/** + * 9.5 Enumerate shareable screens and windows (for macOS only) + * + * When you integrate the screen sharing feature of a desktop system, you generally need to display a UI for selecting the sharing target, so that users can use the UI to choose whether to share the entire screen or a certain window. + * Through this API, you can query the IDs, names, and thumbnails of sharable windows on the current system. We provide a default UI implementation in the demo for your reference. + * @note The returned list contains the screen and the application windows. The screen is the first element in the list. If the user has multiple displays, then each display is a sharing target. + * @param thumbnailSize Specify the thumbnail size of the window to be obtained. The thumbnail can be drawn on the window selection UI. + * @param iconSize Specify the icon size of the window to be obtained. + * @return List of windows (including the screen) + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (NSArray *)getScreenCaptureSourcesWithThumbnailSize:(CGSize)thumbnailSize iconSize:(CGSize)iconSize; +#endif + +/** + * 9.6 Select the screen or window to share (for macOS only) + * + * After you get the sharable screen and windows through `getScreenCaptureSources`, you can call this API to select the target screen or window you want to share. + * During the screen sharing process, you can also call this API at any time to switch the sharing target. + * @param screenSource Specify sharing source + * @param rect Specify the area to be captured (set this parameter to `CGRectZero`: when the sharing target is a window, the entire window will be shared, and when the sharing target is the desktop, the entire desktop will be shared) + * @param capturesCursor Whether to capture mouse cursor + * @param highlight Whether to highlight the window being shared + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)selectScreenCaptureTarget:(TRTCScreenCaptureSourceInfo *)screenSource rect:(CGRect)rect capturesCursor:(BOOL)capturesCursor highlight:(BOOL)highlight; +#endif + +/** + * 9.7 Set the video encoding parameters of screen sharing (i.e., substream) (for desktop and mobile systems) + * + * This API can set the image quality of screen sharing (i.e., the substream) viewed by remote users, which is also the image quality of screen sharing in on-cloud recording files. + * Please note the differences between the following two APIs: + * - {@link setVideoEncoderParam} is used to set the video encoding parameters of the primary stream image ({@link TRTCVideoStreamTypeBig}, generally for camera). + * - {@link setSubStreamEncoderParam} is used to set the video encoding parameters of the substream image ({@link TRTCVideoStreamTypeSub}, generally for screen sharing). + * + * @param param Substream encoding parameters. For more information, please see {@link TRTCVideoEncParam}. + * @note Even if you use the primary stream to transfer screen sharing data (set `type=TRTCVideoStreamTypeBig` when calling `startScreenCapture`), you still need to call the {@link setSubStreamEncoderParam} API instead of the {@link + * setVideoEncoderParam} API to set the screen sharing encoding parameters. + */ +- (void)setSubStreamEncoderParam:(TRTCVideoEncParam *)param; + +/** + * 9.8 Set the audio mixing volume of screen sharing (for desktop systems only) + * + * The greater the value, the larger the ratio of the screen sharing volume to the mic volume. We recommend you not set a high value for this parameter as a high volume will cover the mic sound. + * @param volume Set audio mixing volume. Value range: 0–100 + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)setSubStreamMixVolume:(NSInteger)volume; +#endif + +/** + * 9.9 Add specified windows to the exclusion list of screen sharing (for desktop systems only) + * + * The excluded windows will not be shared. This feature is generally used to add a certain application's window to the exclusion list to avoid privacy issues. + * You can set the filtered windows before starting screen sharing or dynamically add the filtered windows during screen sharing. + * @param window Window not to be shared + * @note + * 1. This API takes effect only if the `type` in {@link TRTCScreenCaptureSourceInfo} is specified as {@link TRTCScreenCaptureSourceTypeScreen}; that is, the feature of excluding specified windows works only when the entire screen is shared. + * 2. The windows added to the exclusion list through this API will be automatically cleared by the SDK after room exit. + * 3. On macOS, please pass in the window ID (CGWindowID), which can be obtained through the `sourceId` member in {@link TRTCScreenCaptureSourceInfo}. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)addExcludedShareWindow:(NSInteger)windowID; +#endif + +/** + * 9.10 Remove specified windows from the exclusion list of screen sharing (for desktop systems only) + * + * @param windowID + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)removeExcludedShareWindow:(NSInteger)windowID; +#endif + +/** + * 9.11 Remove all windows from the exclusion list of screen sharing (for desktop systems only) + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)removeAllExcludedShareWindows; +#endif + +/** + * 9.12 Add specified windows to the inclusion list of screen sharing (for desktop systems only) + * + * This API takes effect only if the `type` in {@link TRTCScreenCaptureSourceInfo} is specified as {@link TRTCScreenCaptureSourceTypeWindow}; that is, the feature of additionally including specified windows works only when a window is shared. + * You can call it before or after {@link startScreenCapture}. + * @param windowID Window to be shared (which is a window handle `HWND` on Windows) + * @note The windows added to the inclusion list by this method will be automatically cleared by the SDK after room exit. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)addIncludedShareWindow:(NSInteger)windowID; +#endif + +/** + * 9.13 Remove specified windows from the inclusion list of screen sharing (for desktop systems only) + * + * This API takes effect only if the `type` in {@link TRTCScreenCaptureSourceInfo} is specified as {@link TRTCScreenCaptureSourceTypeWindow}. + * That is, the feature of additionally including specified windows works only when a window is shared. + * @param windowID Window to be shared (window ID on macOS or HWND on Windows) + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)removeIncludedShareWindow:(NSInteger)windowID; +#endif + +/** + * 9.14 Remove all windows from the inclusion list of screen sharing (for desktop systems only) + * + * This API takes effect only if the `type` in {@link TRTCScreenCaptureSourceInfo} is specified as {@link TRTCScreenCaptureSourceTypeWindow}. + * That is, the feature of additionally including specified windows works only when a window is shared. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)removeAllIncludedShareWindows; +#endif + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Custom capturing and rendering APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Custom capturing and rendering APIs +/// @{ + +/** + * 10.1 Enable/Disable custom video capturing mode + * + * After this mode is enabled, the SDK will not run the original video capturing process (i.e., stopping camera data capturing and beauty filter operations) and will retain only the video encoding and sending capabilities. + * You need to use {@link sendCustomVideoData} to continuously insert the captured video image into the SDK. + * @param streamType Specify video stream type ({@link TRTCVideoStreamTypeBig}: HD big image; {@link TRTCVideoStreamTypeSub}: substream image). + * @param enable Whether to enable. Default value: NO + */ +- (void)enableCustomVideoCapture:(TRTCVideoStreamType)streamType enable:(BOOL)enable; + +/** + * 10.2 Deliver captured video frames to SDK + * + * You can use this API to deliver video frames you capture to the SDK, and the SDK will encode and transfer them through its own network module. + * We recommend you enter the following information for the {@link TRTCVideoFrame} parameter (other fields can be left empty): + * - pixelFormat: {@link TRTCVideoPixelFormat_NV12} is recommended. + * - bufferType: {@link TRTCVideoBufferType_PixelBuffer} is recommended. + * - pixelBuffer: common video data format on iOS/macOS. + * - data: raw video data format, which is used if `bufferType` is `NSData`. + * - timestamp (ms): Set it to the timestamp when video frames are captured, which you can obtain by calling {@link generateCustomPTS} after getting a video frame. + * - width: video image length, which needs to be set if `bufferType` is `NSData`. + * - height: video image width, which needs to be set if `bufferType` is `NSData`. + * + * For more information, please see [Custom Capturing and Rendering](https://cloud.tencent.com/document/product/647/34066). + * @param streamType Specify video stream type ({@link TRTCVideoStreamTypeBig}: HD big image; {@link TRTCVideoStreamTypeSub}: substream image). + * @param frame Video data, which can be in PixelBuffer NV12, BGRA, or I420 format. + * @note + * 1. We recommend you call the {@link generateCustomPTS} API to get the `timestamp` value of a video frame immediately after capturing it, so as to achieve the best audio/video sync effect. + * 2. The video frame rate eventually encoded by the SDK is not determined by the frequency at which you call this API, but by the FPS you set in {@link setVideoEncoderParam}. + * 3. Please try to keep the calling interval of this API even; otherwise, problems will occur, such as unstable output frame rate of the encoder or out-of-sync audio/video. + */ +- (void)sendCustomVideoData:(TRTCVideoStreamType)streamType frame:(TRTCVideoFrame *)frame; + +/** + * 10.3 Enable custom audio capturing mode + * + * After this mode is enabled, the SDK will not run the original audio capturing process (i.e., stopping mic data capturing) and will retain only the audio encoding and sending capabilities. + * You need to use {@link sendCustomAudioData} to continuously insert the captured audio data into the SDK. + * @param enable Whether to enable. Default value: NO + * @note As acoustic echo cancellation (AEC) requires strict control over the audio capturing and playback time, after custom audio capturing is enabled, AEC may fail. + */ +- (void)enableCustomAudioCapture:(BOOL)enable; + +/** + * 10.4 Deliver captured audio data to SDK + * + * We recommend you enter the following information for the {@link TRTCAudioFrame} parameter (other fields can be left empty): + * - audioFormat: audio data format, which can only be `TRTCAudioFrameFormatPCM`. + * - data: audio frame buffer. Audio frame data must be in PCM format, and it supports a frame length of 5–100 ms (20 ms is recommended). Length calculation method: **for example, if the sample rate is 48000, then the frame length for mono channel + * will be `48000 * 0.02s * 1 * 16 bit = 15360 bit = 1920 bytes`.** + * - sampleRate: sample rate. Valid values: 16000, 24000, 32000, 44100, 48000. + * - channel: number of channels (if stereo is used, data is interwoven). Valid values: 1: mono channel; 2: dual channel. + * - timestamp (ms): Set it to the timestamp when audio frames are captured, which you can obtain by calling {@link generateCustomPTS} after getting a audio frame. + * + * For more information, please see [Custom Capturing and Rendering](https://cloud.tencent.com/document/product/647/34066). + * @param frame Audio data + * @note Please call this API accurately at intervals of the frame length; otherwise, sound lag may occur due to uneven data delivery intervals. + */ +- (void)sendCustomAudioData:(TRTCAudioFrame *)frame; + +/** + * 10.5 Enable/Disable custom audio track + * + * After this feature is enabled, you can mix a custom audio track into the SDK through this API. With two boolean parameters, you can control whether to play back this track remotely or locally. + * @param enablePublish Whether the mixed audio track should be played back remotely. Default value: NO + * @param enablePlayout Whether the mixed audio track should be played back locally. Default value: NO + * @note If you specify both `enablePublish` and `enablePlayout` as `NO`, the custom audio track will be completely closed. + */ +- (void)enableMixExternalAudioFrame:(BOOL)enablePublish playout:(BOOL)enablePlayout; + +/** + * 10.6 Mix custom audio track into SDK + * + * Before you use this API to mix custom PCM audio into the SDK, you need to first enable custom audio tracks through {@link enableMixExternalAudioFrame}. + * You are expected to feed audio data into the SDK at an even pace, but we understand that it can be challenging to call an API at absolutely regular intervals. + * Given this, we have provided a buffer pool in the SDK, which can cache the audio data you pass in to reduce the fluctuations in intervals between API calls. + * The value returned by this API indicates the size (ms) of the buffer pool. For example, if `50` is returned, it indicates that the buffer pool has 50 ms of audio data. As long as you call this API again within 50 ms, the SDK can make sure that + * continuous audio data is mixed. If the value returned is `100` or greater, you can wait after an audio frame is played to call the API again. If the value returned is smaller than `100`, then there isn’t enough data in the buffer pool, and you + * should feed more audio data into the SDK until the data in the buffer pool is above the safety level. Fill the fields in {@link TRTCAudioFrame} as follows (other fields are not required). + * - `data`: audio frame buffer. Audio frames must be in PCM format. Each frame can be 5-100 ms (20 ms is recommended) in duration. Assume that the sample rate is 48000, and sound channels mono-channel. Then the **frame size would be 48000 x 0.02s x + * 1 x 16 bit = 15360 bit = 1920 bytes**. + * - `sampleRate`: sample rate. Valid values: 16000, 24000, 32000, 44100, 48000 + * - `channel`: number of sound channels (if dual-channel is used, data is interleaved). Valid values: `1` (mono-channel); `2` (dual channel) + * - `timestamp`: timestamp (ms). Set it to the timestamp when audio frames are captured, which you can obtain by calling {@link generateCustomPTS} after getting an audio frame. + * + * @param frame Audio data + * @return If the value returned is `0` or greater, the value represents the current size of the buffer pool; if the value returned is smaller than `0`, it means that an error occurred. `-1` indicates that you didn’t call {@link + * enableMixExternalAudioFrame} to enable custom audio tracks. + */ +- (int)mixExternalAudioFrame:(TRTCAudioFrame *)frame; + +/** + * 10.7 Set the publish volume and playback volume of mixed custom audio track + * + * @param publishVolume set the publish volume,from 0 to 100, -1 means no change + * @param playoutVolume set the play volume,from 0 to 100, -1 means no change + */ +- (void)setMixExternalAudioVolume:(NSInteger)publishVolume playoutVolume:(NSInteger)playoutVolume; + +/** + * 10.8 Generate custom capturing timestamp + * + * This API is only suitable for the custom capturing mode and is used to solve the problem of out-of-sync audio/video caused by the inconsistency between the capturing time and delivery time of audio/video frames. + * When you call APIs such as {@link sendCustomVideoData} or {@link sendCustomAudioData} for custom video or audio capturing, please use this API as instructed below: + * 1. First, when a video or audio frame is captured, call this API to get the corresponding PTS timestamp. + * 2. Then, send the video or audio frame to the preprocessing module you use (such as a third-party beauty filter or sound effect component). + * 3. When you actually call {@link sendCustomVideoData} or {@link sendCustomAudioData} for delivery, assign the PTS timestamp recorded when the frame was captured to the `timestamp` field in {@link TRTCVideoFrame} or {@link TRTCAudioFrame}. + * + * @return Timestamp in ms + */ ++ (uint64_t)generateCustomPTS; + +/** + * 10.9 Set video data callback for third-party beauty filters + * + * After this callback is set, the SDK will call back the captured video frames through the `delegate` you set and use them for further processing by a third-party beauty filter component. Then, the SDK will encode and send the processed video + * frames. + * @param delegate Custom preprocessing callback. For more information, please see {@link TRTCVideoFrameDelegate} + * @param pixelFormat Specify the format of the pixel called back. Currently, only {@link TRTCVideoPixelFormat_Texture_2D} is supported + * @param bufferType Specify the format of the data called back. Currently, only {@link TRTCVideoBufferType_Texture} is supported + * @return 0: success; values smaller than 0: error + */ +- (int)setLocalVideoProcessDelegete:(id)delegate pixelFormat:(TRTCVideoPixelFormat)pixelFormat bufferType:(TRTCVideoBufferType)bufferType; + +/** + * 10.10 Set the callback of custom rendering for local video + * + * After this callback is set, the SDK will skip its own rendering process and call back the captured data. Therefore, you need to complete image rendering on your own. + * - `pixelFormat` specifies the format of the called back data, such as NV12, I420, and 32BGRA. + * - `bufferType` specifies the buffer type. `PixelBuffer` has the highest efficiency, while `NSData` makes the SDK perform a memory conversion internally, which will result in extra performance loss. + * + * For more information, please see [Custom Capturing and Rendering](https://cloud.tencent.com/document/product/647/34066). + * @param delegate Callback for custom rendering + * @param pixelFormat Specify the format of the pixel called back + * @param bufferType PixelBuffer: this can be directly converted to `UIImage` by using `imageWithCVImageBuffer`; NSData: this is memory-mapped video data. + * @return 0: success; values smaller than 0: error + */ +- (int)setLocalVideoRenderDelegate:(id)delegate pixelFormat:(TRTCVideoPixelFormat)pixelFormat bufferType:(TRTCVideoBufferType)bufferType; + +/** + * 10.11 Set the callback of custom rendering for remote video + * + * After this callback is set, the SDK will skip its own rendering process and call back the captured data. Therefore, you need to complete image rendering on your own. + * - `pixelFormat` specifies the format of the called back data, such as NV12, I420, and 32BGRA. + * - `bufferType` specifies the buffer type. `PixelBuffer` has the highest efficiency, while `NSData` makes the SDK perform a memory conversion internally, which will result in extra performance loss. + * + * For more information, please see [Custom Capturing and Rendering](https://cloud.tencent.com/document/product/647/34066). + * @note Before this API is called, `startRemoteView(nil)` needs to be called to get the video stream of the remote user (`view` can be set to `nil` for this end); otherwise, there will be no data called back. + * @param userId ID of the specified remote user + * @param delegate Callback for custom rendering + * @param pixelFormat Specify the format of the pixel called back + * @param bufferType PixelBuffer: this can be directly converted to `UIImage` by using `imageWithCVImageBuffer`; NSData: this is memory-mapped video data. + * @return 0: success; values smaller than 0: error + */ +- (int)setRemoteVideoRenderDelegate:(NSString *)userId delegate:(id)delegate pixelFormat:(TRTCVideoPixelFormat)pixelFormat bufferType:(TRTCVideoBufferType)bufferType; + +/** + * 10.12 Set custom audio data callback + * + * After this callback is set, the SDK will internally call back the audio data (in PCM format), including: + * - {@link onCapturedRawAudioFrame}: callback of the original audio data captured by the local mic + * - {@link onLocalProcessedAudioFrame}: callback of the audio data captured by the local mic and preprocessed by the audio module + * - {@link onRemoteUserAudioFrame}: audio data from each remote user before audio mixing + * - {@link onMixedPlayAudioFrame}: callback of the audio data that will be played back by the system after audio streams are mixed + * + * @note Setting the callback to null indicates to stop the custom audio callback, while setting it to a non-null value indicates to start the custom audio callback. + */ +- (void)setAudioFrameDelegate:(id)delegate; + +/** + * 10.13 Set the callback format of original audio frames captured by local mic + * + * This API is used to set the `AudioFrame` format called back by {@link onCapturedRawAudioFrame}: + * - sampleRate: sample rate. Valid values: 16000, 32000, 44100, 48000 + * - channel: number of channels (if stereo is used, data is interwoven). Valid values: 1: mono channel; 2: dual channel + * - samplesPerCall: number of sample points, which defines the frame length of the callback data. The frame length must be an integer multiple of 10 ms. + * + * If you want to calculate the callback frame length in milliseconds, the formula for converting the number of milliseconds into the number of sample points is as follows: number of sample points = number of milliseconds * sample rate / 1000 + * For example, if you want to call back the data of 20 ms frame length with 48000 sample rate, then the number of sample points should be entered as 960 = 20 * 48000 / 1000 + * Note that the frame length of the final callback is in bytes, and the calculation formula for converting the number of sample points into the number of bytes is as follows: number of bytes = number of sample points * number of channels * 2 (bit + * width) For example, if the parameters are 48000 sample rate, dual channel, 20 ms frame length, and 960 sample points, then the number of bytes is 3840 = 960 * 2 * 2 + * @param format Audio data callback format + * @return 0: success; values smaller than 0: error + */ +- (int)setCapturedRawAudioFrameDelegateFormat:(TRTCAudioFrameDelegateFormat *)format; + +/** + * 10.14 Set the callback format of preprocessed local audio frames + * + * This API is used to set the `AudioFrame` format called back by {@link onLocalProcessedAudioFrame}: + * - sampleRate: sample rate. Valid values: 16000, 32000, 44100, 48000 + * - channel: number of channels (if stereo is used, data is interwoven). Valid values: 1: mono channel; 2: dual channel + * - samplesPerCall: number of sample points, which defines the frame length of the callback data. The frame length must be an integer multiple of 10 ms. + * + * If you want to calculate the callback frame length in milliseconds, the formula for converting the number of milliseconds into the number of sample points is as follows: number of sample points = number of milliseconds * sample rate / 1000 + * For example, if you want to call back the data of 20 ms frame length with 48000 sample rate, then the number of sample points should be entered as 960 = 20 * 48000 / 1000 + * Note that the frame length of the final callback is in bytes, and the calculation formula for converting the number of sample points into the number of bytes is as follows: number of bytes = number of sample points * number of channels * 2 (bit + * width) For example, if the parameters are 48000 sample rate, dual channel, 20 ms frame length, and 960 sample points, then the number of bytes is 3840 = 960 * 2 * 2 + * @param format Audio data callback format + * @return 0: success; values smaller than 0: error + */ +- (int)setLocalProcessedAudioFrameDelegateFormat:(TRTCAudioFrameDelegateFormat *)format; + +/** + * 10.15 Set the callback format of audio frames to be played back by system + * + * This API is used to set the `AudioFrame` format called back by {@link onMixedPlayAudioFrame}: + * - sampleRate: sample rate. Valid values: 16000, 32000, 44100, 48000 + * - channel: number of channels (if stereo is used, data is interwoven). Valid values: 1: mono channel; 2: dual channel + * - samplesPerCall: number of sample points, which defines the frame length of the callback data. The frame length must be an integer multiple of 10 ms. + * + * If you want to calculate the callback frame length in milliseconds, the formula for converting the number of milliseconds into the number of sample points is as follows: number of sample points = number of milliseconds * sample rate / 1000 + * For example, if you want to call back the data of 20 ms frame length with 48000 sample rate, then the number of sample points should be entered as 960 = 20 * 48000 / 1000 + * Note that the frame length of the final callback is in bytes, and the calculation formula for converting the number of sample points into the number of bytes is as follows: number of bytes = number of sample points * number of channels * 2 (bit + * width) For example, if the parameters are 48000 sample rate, dual channel, 20 ms frame length, and 960 sample points, then the number of bytes is 3840 = 960 * 2 * 2 + * @param format Audio data callback format + * @return 0: success; values smaller than 0: error + */ +- (int)setMixedPlayAudioFrameDelegateFormat:(TRTCAudioFrameDelegateFormat *)format; + +/** + * 10.16 Enabling custom audio playback + * + * You can use this API to enable custom audio playback if you want to connect to an external audio device or control the audio playback logic by yourself. + * After you enable custom audio playback, the SDK will stop using its audio API to play back audio. You need to call {@link getCustomAudioRenderingFrame} to get audio frames and play them by yourself. + * @param enable Whether to enable custom audio playback. It’s disabled by default. + * @note The parameter must be set before room entry to take effect. + */ +- (void)enableCustomAudioRendering:(BOOL)enable; + +/** + * 10.17 Getting playable audio data + * + * Before calling this API, you need to first enable custom audio playback using {@link enableCustomAudioRendering}. + * Fill the fields in {@link TRTCAudioFrame} as follows (other fields are not required): + * - `sampleRate`: sample rate (required). Valid values: 16000, 24000, 32000, 44100, 48000 + * - `channel`: number of sound channels (required). `1`: mono-channel; `2`: dual-channel; if dual-channel is used, data is interleaved. + * - `data`: the buffer used to get audio data. You need to allocate memory for the buffer based on the duration of an audio frame. + * The PCM data obtained can have a frame duration of 10 ms or 20 ms. 20 ms is recommended. + * Assume that the sample rate is 48000, and sound channels mono-channel. The buffer size for a 20 ms audio frame would be 48000 x 0.02s x 1 x 16 bit = 15360 bit = 1920 bytes. + * + * @param audioFrame Audio frames + * @note + * 1. You must set `sampleRate` and `channel` in `audioFrame`, and allocate memory for one frame of audio in advance. + * 2. The SDK will fill the data automatically based on `sampleRate` and `channel`. + * 3. We recommend that you use the system’s audio playback thread to drive the calling of this API, so that it is called each time the playback of an audio frame is complete. + * + */ +- (void)getCustomAudioRenderingFrame:(TRTCAudioFrame *)audioFrame; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Custom message sending APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Custom message sending APIs +/// @{ + +/** + * 11.1 Use UDP channel to send custom message to all users in room + * + * This API allows you to use TRTC's UDP channel to broadcast custom data to other users in the current room for signaling transfer. + * The UDP channel in TRTC was originally designed to transfer audio/video data. This API works by disguising the signaling data you want to send as audio/video data packets and sending them together with the audio/video data to be sent. + * Other users in the room can receive the message through the `onRecvCustomCmdMsg` callback in {@link TRTCCloudDelegate}. + * @param cmdID Message ID. Value range: 1–10 + * @param data Message to be sent. The maximum length of one single message is 1 KB. + * @param reliable Whether reliable sending is enabled. Reliable sending can achieve a higher success rate but with a longer reception delay than unreliable sending. + * @param ordered Whether orderly sending is enabled, i.e., whether the data packets should be received in the same order in which they are sent; if so, a certain delay will be caused. + * @return YES: sent the message successfully; NO: failed to send the message. + * @note + * 1. Up to 30 messages can be sent per second to all users in the room (this is not supported for web and mini program currently). + * 2. A packet can contain up to 1 KB of data; if the threshold is exceeded, the packet is very likely to be discarded by the intermediate router or server. + * 3. A client can send up to 8 KB of data in total per second. + * 4. `reliable` and `ordered` must be set to the same value (`YES` or `NO`) and cannot be set to different values currently. + * 5. We strongly recommend you set different `cmdID` values for messages of different types. This can reduce message delay when orderly sending is required. + */ +- (BOOL)sendCustomCmdMsg:(NSInteger)cmdID data:(NSData *)data reliable:(BOOL)reliable ordered:(BOOL)ordered; + +/** + * 11.2 Use SEI channel to send custom message to all users in room + * + * This API allows you to use TRTC's SEI channel to broadcast custom data to other users in the current room for signaling transfer. + * The header of a video frame has a header data block called SEI. This API works by embedding the custom signaling data you want to send in the SEI block and sending it together with the video frame. + * Therefore, the SEI channel has a better compatibility than {@link sendCustomCmdMsg} as the signaling data can be transferred to the CSS CDN along with the video frame. + * However, because the data block of the video frame header cannot be too large, we recommend you limit the size of the signaling data to only a few bytes when using this API. + * The most common use is to embed the custom timestamp into video frames through this API so as to implement a perfect alignment between the message and video image (such as between the teaching material and video signal in the education scenario). + * Other users in the room can receive the message through the `onRecvSEIMsg` callback in {@link TRTCCloudDelegate}. + * @param data Data to be sent, which can be up to 1 KB (1,000 bytes) + * @param repeatCount Data sending count + * @return YES: the message is allowed and will be sent with subsequent video frames; NO: the message is not allowed to be sent + * @note This API has the following restrictions: + * 1. The data will not be instantly sent after this API is called; instead, it will be inserted into the next video frame after the API call. + * 2. Up to 30 messages can be sent per second to all users in the room (this limit is shared with `sendCustomCmdMsg`). + * 3. Each packet can be up to 1 KB (this limit is shared with `sendCustomCmdMsg`). If a large amount of data is sent, the video bitrate will increase, which may reduce the video quality or even cause lagging. + * 4. Each client can send up to 8 KB of data in total per second (this limit is shared with `sendCustomCmdMsg`). + * 5. If multiple times of sending is required (i.e., `repeatCount` > 1), the data will be inserted into subsequent `repeatCount` video frames in a row for sending, which will increase the video bitrate. + * 6. If `repeatCount` is greater than 1, the data will be sent for multiple times, and the same message may be received multiple times in the `onRecvSEIMsg` callback; therefore, deduplication is required. + */ +- (BOOL)sendSEIMsg:(NSData *)data repeatCount:(int)repeatCount; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Network test APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Network test APIs +/// @{ + +/** + * 12.1 Start network speed test (used before room entry) + * + * @param params speed test options + * @return interface call result, <0: failure + * @note + * 1. The speed measurement process will incur a small amount of basic service fees, See [Purchase Guide > Base Services](https://intl.cloud.tencent.com/document/product/647/34610?lang=en&pg=#basic-services). + * 2. Please perform the Network speed test before room entry, because if performed after room entry, the test will affect the normal audio/video transfer, and its result will be inaccurate due to interference in the room. + * 3. Only one network speed test task is allowed to run at the same time. + */ +- (int)startSpeedTest:(TRTCSpeedTestParams *)params; + +/** + * 12.2 Stop network speed test + */ +- (void)stopSpeedTest; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Debugging APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Debugging APIs +/// @{ + +/** + * 13.1 Get SDK version information + */ ++ (NSString *)getSDKVersion; + +/** + * 13.2 Set log output level + * + * @param level For more information, please see {@link TRTCLogLevel}. Default value: {@link TRTCLogLevelNone} + */ ++ (void)setLogLevel:(TRTCLogLevel)level; + +/** + * 13.3 Enable/Disable console log printing + * + * @param enabled Specify whether to enable it, which is disabled by default + */ ++ (void)setConsoleEnabled:(BOOL)enabled; + +/** + * 13.4 Enable/Disable local log compression + * + * If compression is enabled, the log size will significantly reduce, but logs can be read only after being decompressed by the Python script provided by Tencent Cloud. + * If compression is disabled, logs will be stored in plaintext and can be read directly in Notepad, but will take up more storage capacity. + * @param enabled Specify whether to enable it, which is enabled by default + */ ++ (void)setLogCompressEnabled:(BOOL)enabled; + +/** + * 13.5 Set local log storage path + * + * You can use this API to change the default storage path of the SDK's local logs, which is as follows: + * - Windows: C:/Users/[username]/AppData/Roaming/liteav/log, i.e., under `%appdata%/liteav/log`. + * - iOS or macOS: under `sandbox Documents/log`. + * - Android: under `/app directory/files/log/liteav/`. + * @note Please be sure to call this API before all other APIs and make sure that the directory you specify exists and your application has read/write permissions of the directory. + * @param path Log storage path + */ ++ (void)setLogDirPath:(NSString *)path; + +/** + * 13.6 Set log callback + */ ++ (void)setLogDelegate:(id)logDelegate; + +/** + * 13.7 Display dashboard + * + * "Dashboard" is a semi-transparent floating layer for debugging information on top of the video rendering control. It is used to display audio/video information and event information to facilitate integration and debugging. + * @param showType 0: does not display; 1: displays lite edition (only with audio/video information); 2: displays full edition (with audio/video information and event information). + */ +- (void)showDebugView:(NSInteger)showType; + +/** + * 13.8 Set dashboard margin + * + * This API is used to adjust the position of the dashboard in the video rendering control. It must be called before `showDebugView` for it to take effect. + * @param userId User ID + * @param margin Inner margin of the dashboard. It should be noted that this is based on the percentage of `parentView`. Value range: 0–1 + */ +- (void)setDebugViewMargin:(NSString *)userId margin:(TXEdgeInsets)margin; + +/** + * 13.9 Call experimental APIs + */ +- (void)callExperimentalAPI:(NSString *)jsonStr; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Disused APIs (the corresponding new APIs are recommended) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Disused APIs (the corresponding new APIs are recommended) +/// @{ + +/** + * Set mic volume + * + * @deprecated This API is not recommended after v6.9. Please use {@link setAudioCaptureVolume} instead. + */ +- (void)setMicVolumeOnMixing:(NSInteger)volume __attribute__((deprecated("use setAudioCaptureVolume instead"))); + +/** + * Set the strength of beauty, brightening, and rosy skin filters + * + * @deprecated This API is not recommended after v6.9. Please use {@link getBeautyManager} instead. + */ +- (void)setBeautyStyle:(TRTCBeautyStyle)beautyStyle beautyLevel:(NSInteger)beautyLevel whitenessLevel:(NSInteger)whitenessLevel ruddinessLevel:(NSInteger)ruddinessLevel __attribute__((deprecated("use getBeautyManager instead"))); + +/** + * Set the strength of eye enlarging filter + * + * @deprecated This API is not recommended after v6.9. Please use {@link getBeautyManager} instead. + */ +#if TARGET_OS_IPHONE +- (void)setEyeScaleLevel:(float)eyeScaleLevel __attribute__((deprecated("use getBeautyManager instead"))); +#endif + +/** + * Set the strength of face slimming filter + * + * @deprecated This API is not recommended after v6.9. Please use {@link getBeautyManager} instead. + */ +#if TARGET_OS_IPHONE +- (void)setFaceScaleLevel:(float)faceScaleLevel __attribute__((deprecated("use getBeautyManager instead"))); +#endif + +/** + * Set the strength of chin slimming filter + * + * @deprecated This API is not recommended after v6.9. Please use {@link getBeautyManager} instead. + */ +#if TARGET_OS_IPHONE +- (void)setFaceVLevel:(float)faceVLevel __attribute__((deprecated("use getBeautyManager instead"))); +#endif + +/** + * Set the strength of chin lengthening/shortening filter + * + * @deprecated This API is not recommended after v6.9. Please use {@link getBeautyManager} instead. + */ +#if TARGET_OS_IPHONE +- (void)setChinLevel:(float)chinLevel __attribute__((deprecated("use getBeautyManager instead"))); +#endif + +/** + * Set the strength of face shortening filter + * + * @deprecated This API is not recommended after v6.9. Please use {@link getBeautyManager} instead. + */ +#if TARGET_OS_IPHONE +- (void)setFaceShortLevel:(float)faceShortlevel __attribute__((deprecated("use getBeautyManager instead"))); +#endif + +/** + * Set the strength of nose slimming filter + * + * @deprecated This API is not recommended after v6.9. Please use {@link getBeautyManager} instead. + */ +#if TARGET_OS_IPHONE +- (void)setNoseSlimLevel:(float)noseSlimLevel __attribute__((deprecated("use getBeautyManager instead"))); +#endif + +/** + * Set animated sticker + * + * @deprecated This API is not recommended after v6.9. Please use {@link getBeautyManager} instead. + */ +#if TARGET_OS_IPHONE +- (void)selectMotionTmpl:(NSString *)tmplPath __attribute__((deprecated("use getBeautyManager instead"))); +#endif + +/** + * Mute animated sticker + * + * @deprecated This API is not recommended after v6.9. Please use {@link getBeautyManager} instead. + */ +#if TARGET_OS_IPHONE +- (void)setMotionMute:(BOOL)motionMute __attribute__((deprecated("use getBeautyManager instead"))); +#endif + +/** + * Start screen sharing + * + * @deprecated This API is not recommended after v7.2. Please use `startScreenCapture:streamType:encParam:` instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startScreenCapture:(NSView *)view __attribute__((deprecated("use startScreenCapture:streamType:encParam: instead"))); +#endif + +/** + * Set color filter + * + * @deprecated This API is not recommended after v7.2. Please use {@link getBeautyManager} instead. + */ +- (void)setFilter:(TXImage *)image __attribute__((deprecated("use getBeautyManager instead"))); + +/** + * Set the strength of color filter + * + * @deprecated This API is not recommended after v7.2. Please use {@link getBeautyManager} instead. + */ +- (void)setFilterConcentration:(float)concentration __attribute__((deprecated("use getBeautyManager instead"))); + +/** + * Set green screen video + * + * @deprecated This API is not recommended after v7.2. Please use {@link getBeautyManager} instead. + */ +- (void)setGreenScreenFile:(NSURL *)file __attribute__((deprecated("use getBeautyManager instead"))); + +/** + * Start background music + * + * @deprecated This API is not recommended after v7.3. Please use {@link getAudioEffectManager} instead. + */ +- (void)playBGM:(NSString *)path + withBeginNotify:(void (^)(NSInteger errCode))beginNotify + withProgressNotify:(void (^)(NSInteger progressMS, NSInteger durationMS))progressNotify + andCompleteNotify:(void (^)(NSInteger errCode))completeNotify __attribute__((deprecated("use getAudioEffectManager instead"))); + +/** + * Stop background music + * + * @deprecated This API is not recommended after v7.3. Please use {@link getAudioEffectManager} instead. + */ +- (void)stopBGM __attribute__((deprecated("use getAudioEffectManager instead"))); + +/** + * Stop background music + * + * @deprecated This API is not recommended after v7.3. Please use {@link getAudioEffectManager} instead. + */ +- (void)pauseBGM __attribute__((deprecated("use getAudioEffectManager instead"))); + +/** + * Stop background music + * + * @deprecated This API is not recommended after v7.3. Please use {@link getAudioEffectManager} instead. + */ +- (void)resumeBGM __attribute__((deprecated("use getAudioEffectManager instead"))); + +/** + * Get the total length of background music in ms + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#getMusicDurationInMS} instead. + */ +- (NSInteger)getBGMDuration:(NSString *)path __attribute__((deprecated("use TXAudioEffectManager#getMusicDurationInMS instead"))); + +/** + * Set background music playback progress + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#seekMusicToPosInMS} instead. + */ +- (int)setBGMPosition:(NSInteger)pos __attribute__((deprecated("use TXAudioEffectManager#seekMusicToPosInMS instead"))); + +/** + * Set background music volume + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#setMusicVolume} instead. + */ +- (void)setBGMVolume:(NSInteger)volume __attribute__((deprecated("use TXAudioEffectManager#setMusicVolume instead"))); + +/** + * Set the local playback volume of background music + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#setMusicPlayoutVolume} instead. + */ +- (void)setBGMPlayoutVolume:(NSInteger)volume __attribute__((deprecated("use TXAudioEffectManager#setMusicPlayoutVolume instead"))); + +/** + * Set the remote playback volume of background music + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#setBGMPublishVolume} instead. + */ +- (void)setBGMPublishVolume:(NSInteger)volume __attribute__((deprecated("use TXAudioEffectManager#setBGMPublishVolume instead"))); + +/** + * Set reverb effect + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#setVoiceReverbType} instead. + */ +- (void)setReverbType:(TRTCReverbType)reverbType __attribute__((deprecated("use TXAudioEffectManager#setVoiceReverbType instead"))); + +/** + * Set voice changing type + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#setVoiceChangerType} instead. + */ +- (void)setVoiceChangerType:(TRTCVoiceChangerType)voiceChangerType __attribute__((deprecated("use TXAudioEffectManager#setVoiceChangerType instead"))); + +/** + * Play sound effect + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#startPlayMusic} instead. + */ +- (void)playAudioEffect:(TRTCAudioEffectParam *)effect __attribute__((deprecated("use TXAudioEffectManager#startPlayMusic instead"))); + +/** + * Set sound effect volume + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#setMusicPublishVolume} and {@link TXAudioEffectManager#setMusicPlayoutVolume} instead. + */ +- (void)setAudioEffectVolume:(int)effectId volume:(int)volume __attribute__((deprecated("use setMusicPublishVolume/setMusicPlayoutVolume instead"))); + +/** + * Stop sound effect + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#stopPlayMusic} instead. + */ +- (void)stopAudioEffect:(int)effectId __attribute__((deprecated("use TXAudioEffectManager#stopPlayMusic instead"))); + +/** + * Stop all sound effects + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#stopPlayMusic} instead. + */ +- (void)stopAllAudioEffects __attribute__((deprecated("use TXAudioEffectManager#stopPlayMusic instead"))); + +/** + * Set the volume of all sound effects + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#setMusicPublishVolume} and {@link TXAudioEffectManager#setMusicPlayoutVolume} instead. + */ +- (void)setAllAudioEffectsVolume:(int)volume __attribute__((deprecated("use setMusicPublishVolume/setMusicPlayoutVolume instead"))); + +/** + * Pause sound effect + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#pauseAudioEffect} instead. + */ +- (void)pauseAudioEffect:(int)effectId __attribute__((deprecated("use TXAudioEffectManager#pauseAudioEffect instead"))); + +/** + * Pause sound effect + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#resumePlayMusic} instead. + */ +- (void)resumeAudioEffect:(int)effectId __attribute__((deprecated("use TXAudioEffectManager#resumePlayMusic instead"))); + +/** + * Enable or disable in-ear monitoring + * + * @deprecated This API is not recommended after v7.3. Please use {@link TXAudioEffectManager#setVoiceEarMonitor} instead. + */ +#if TARGET_OS_IPHONE +- (void)enableAudioEarMonitoring:(BOOL)enable __attribute__((deprecated("use TXAudioEffectManager#setVoiceEarMonitor instead"))); +#endif + +/** + * Start displaying remote video image + * + * @deprecated This API is not recommended after v8.0. Please use {@link startRemoteView:streamType:view} instead. + */ +- (void)startRemoteView:(NSString *)userId view:(TXView *)view __attribute__((deprecated("use startRemoteView:streamType:view: instead"))); + +/** + * Stop displaying remote video image and pulling the video data stream of remote user + * + * @deprecated This API is not recommended after v8.0. Please use {@link stopRemoteView:streamType:} instead. + */ +- (void)stopRemoteView:(NSString *)userId __attribute__((deprecated("use stopRemoteView:streamType: instead"))); + +/** + * Set the rendering mode of remote image + * + * @deprecated This API is not recommended after v8.0. Please use {@link setRemoteRenderParams:streamType:params:} instead. + */ +- (void)setRemoteViewFillMode:(NSString *)userId mode:(TRTCVideoFillMode)mode __attribute__((deprecated("use setRemoteRenderParams:streamType:params: instead"))); + +/** + * Set the clockwise rotation angle of remote image + * + * @deprecated This API is not recommended after v8.0. Please use {@link setRemoteRenderParams:streamType:params:} instead. + */ +- (void)setRemoteViewRotation:(NSString *)userId rotation:(TRTCVideoRotation)rotation __attribute__((deprecated("use setRemoteRenderParams:streamType:params: instead"))); + +/** + * Set the rendering mode of local image + * + * @deprecated This API is not recommended after v8.0. Please use {@link setLocalRenderParams} instead. + */ +- (void)setLocalViewFillMode:(TRTCVideoFillMode)mode __attribute__((deprecated("use setLocalRenderParams instead"))); + +/** + * Set the clockwise rotation angle of local image + * + * @deprecated This API is not recommended after v8.0. Please use {@link setLocalRenderParams} instead. + */ +- (void)setLocalViewRotation:(TRTCVideoRotation)rotation __attribute__((deprecated("use setLocalRenderParams instead"))); + +/** + * Set the mirror mode of local camera's preview image + * + * @deprecated This API is not recommended after v8.0. Please use {@link setLocalRenderParams} instead. + */ +#if TARGET_OS_IPHONE +- (void)setLocalViewMirror:(TRTCLocalVideoMirrorType)mirror __attribute__((deprecated("use setLocalRenderParams: instead"))); +#elif TARGET_OS_MAC +- (void)setLocalViewMirror:(BOOL)mirror __attribute__((deprecated("use setLocalRenderParams: instead"))); +#endif + +/** + * Start displaying the substream image of remote user + * + * @deprecated This API is not recommended after v8.0. Please use {@link startRemoteView:streamType:view} instead. + */ +- (void)startRemoteSubStreamView:(NSString *)userId view:(TXView *)view __attribute__((deprecated("use startRemoteView:type:view: instead"))); + +/** + * Stop displaying the substream image of remote user + * + * @deprecated This API is not recommended after v8.0. Please use {@link stopRemoteView:streamType:} instead. + */ +- (void)stopRemoteSubStreamView:(NSString *)userId __attribute__((deprecated("use stopRemoteView:streamType: instead"))); + +/** + * Set the fill mode of substream image + * + * @deprecated This API is not recommended after v8.0. Please use {@link setRemoteRenderParams:streamType:params:} instead. + */ +- (void)setRemoteSubStreamViewFillMode:(NSString *)userId mode:(TRTCVideoFillMode)mode __attribute__((deprecated("use setRemoteRenderParams:streamType:params: instead"))); + +/** + * Set the clockwise rotation angle of substream image + * + * @deprecated This API is not recommended after v8.0. Please use {@link setRemoteRenderParams:streamType:params:} instead. + */ +- (void)setRemoteSubStreamViewRotation:(NSString *)userId rotation:(TRTCVideoRotation)rotation __attribute__((deprecated("use setRemoteRenderParams:streamType:params: instead"))); + +/** + * Specify whether to view the big or small image + * + * @deprecated This API is not recommended after v8.0. Please use {@link startRemoteView:streamType:view:} instead. + */ +- (void)setPriorRemoteVideoStreamType:(TRTCVideoStreamType)streamType __attribute__((deprecated("use startRemoteView:streamType:view: instead"))); + +/** + * Set sound quality + * + * @deprecated This API is not recommended after v8.0. Please use {@link startLocalAudio:}(quality) instead. + */ +- (void)setAudioQuality:(TRTCAudioQuality)quality __attribute__((deprecated("use startLocalAudio(quality) instead"))); + +/** + * Set sound quality + * + * @deprecated This API is not recommended after v8.0. Please use {@link startLocalAudio:}(quality) instead. + */ +- (void)startLocalAudio __attribute__((deprecated("use startLocalAudio(quality) instead"))); + +/** + * Switch camera + * + * @deprecated This API is not recommended after v8.0. Please use the `switchCamera` API in {@link TXDeviceManager} instead. + */ +#if TARGET_OS_IPHONE +- (void)switchCamera __attribute__((deprecated("use TXDeviceManager#switchCamera instead"))); +#endif + +/** + * Query whether the current camera supports zoom + * + * @deprecated This API is not recommended after v8.0. Please use the `isCameraZoomSupported` API in {@link TXDeviceManager} instead. + */ +#if TARGET_OS_IPHONE +- (BOOL)isCameraZoomSupported __attribute__((deprecated("use TXDeviceManager#isCameraZoomSupported instead"))); +#endif + +/** + * Set camera zoom ratio (focal length) + * + * @deprecated This API is not recommended after v8.0. Please use the `setCameraZoomRatio` API in {@link TXDeviceManager} instead. + */ +#if TARGET_OS_IPHONE +- (void)setZoom:(CGFloat)distance __attribute__((deprecated("use TXDeviceManager#setCameraZoomRatio instead"))); +#endif + +/** + * Query whether the device supports flash + * + * @deprecated This API is not recommended after v8.0. Please use the `isCameraTorchSupported` API in {@link TXDeviceManager} instead. + */ +#if TARGET_OS_IPHONE +- (BOOL)isCameraTorchSupported __attribute__((deprecated("use TXDeviceManager#isCameraTorchSupported instead"))); +#endif + +/** + * Enable/Disable flash + * + * @deprecated This API is not recommended after v8.0. Please use the `enableCameraTorch` API in {@link TXDeviceManager} instead. + */ +#if TARGET_OS_IPHONE +- (BOOL)enbaleTorch:(BOOL)enable __attribute__((deprecated("use TXDeviceManager#enableCameraTorch instead"))); +#endif + +/** + * Query whether the camera supports setting focus + * + * @deprecated This API is not recommended after v8.0. + */ +#if TARGET_OS_IPHONE +- (BOOL)isCameraFocusPositionInPreviewSupported __attribute__((deprecated)); +#endif + +/** + * Set the focal position of camera + * + * @deprecated This API is not recommended after v8.0. Please use the `setCameraFocusPosition` API in {@link TXDeviceManager} instead. + */ +#if TARGET_OS_IPHONE +- (void)setFocusPosition:(CGPoint)touchPoint __attribute__((deprecated("use TXDeviceManager#setCameraFocusPosition instead"))); +#endif + +/** + * Query whether the device supports the automatic recognition of face position + * + * @deprecated This API is not recommended after v8.0. Please use the `isAutoFocusEnabled` API in {@link TXDeviceManager} instead. + */ +#if TARGET_OS_IPHONE +- (BOOL)isCameraAutoFocusFaceModeSupported __attribute__((deprecated("use TXDeviceManager#isAutoFocusEnabled instead"))); +#endif + +/** + * Enable/Disable face auto focus + * + * @deprecated This API is not recommended after v8.0. Please use the `enableCameraAutoFocus` API in {@link TXDeviceManager} instead. + */ +#if TARGET_OS_IPHONE +- (void)enableAutoFaceFoucs:(BOOL)enable __attribute__((deprecated("use TXDeviceManager#enableCameraAutoFocus instead"))); +#endif + +/** + * Start camera test + * + * @deprecated This API is not recommended after v8.0. Please use the `startCameraDeviceTest` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startCameraDeviceTestInView:(NSView *)view __attribute__((deprecated("use TXDeviceManager#startCameraDeviceTest instead"))); +#endif + +/** + * Start camera test + * + * @deprecated This API is not recommended after v8.0. Please use the `stopCameraDeviceTest` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)stopCameraDeviceTest __attribute__((deprecated("use TXDeviceManager#stopCameraDeviceTest instead"))); +#endif + +/** + * Start mic test + * + * @deprecated This API is not recommended after v8.0. Please use the `startMicDeviceTest` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startMicDeviceTest:(NSInteger)interval testEcho:(void (^)(NSInteger volume))testEcho __attribute__((deprecated("use TXDeviceManager#startMicDeviceTest instead"))); +#endif + +/** + * Start mic test + * + * @deprecated This API is not recommended after v8.0. Please use the `stopMicDeviceTest` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)stopMicDeviceTest __attribute__((deprecated("use TXDeviceManager#stopMicDeviceTest instead"))); +#endif + +/** + * Start speaker test + * + * @deprecated This API is not recommended after v8.0. Please use the `startSpeakerDeviceTest` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)startSpeakerDeviceTest:(NSString *)audioFilePath onVolumeChanged:(void (^)(NSInteger volume, BOOL isLastFrame))volumeBlock __attribute__((deprecated("use TXDeviceManager#startSpeakerDeviceTest instead"))); +#endif + +/** + * Stop speaker test + * + * @deprecated This API is not recommended after v8.0. Please use the `stopSpeakerDeviceTest` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)stopSpeakerDeviceTest __attribute__((deprecated("use TXDeviceManager#stopSpeakerDeviceTest instead"))); +#endif + +/** + * Get the list of mics + * + * @deprecated This API is not recommended after v8.0. Please use the `getDevicesList` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (NSArray *)getMicDevicesList __attribute__((deprecated("use TXDeviceManager#getDevicesList instead"))); +#endif + +/** + * Get the current mic device + * + * @deprecated This API is not recommended after v8.0. Please use the `getCurrentDevice` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (TRTCMediaDeviceInfo *)getCurrentMicDevice __attribute__((deprecated("use TXDeviceManager#getCurrentDevice instead"))); +#endif + +/** + * Select the currently used mic + * + * @deprecated This API is not recommended after v8.0. Please use the `setCurrentDevice` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (int)setCurrentMicDevice:(NSString *)deviceId __attribute__((deprecated("use TXDeviceManager#setCurrentDevice instead"))); +#endif + +/** + * Get the current mic volume + * + * @deprecated This API is not recommended after v8.0. Please use the `getCurrentDeviceVolume` API in {@link TXDeviceManager} instead. + */ +#if TARGET_OS_MAC +- (float)getCurrentMicDeviceVolume __attribute__((deprecated("use TXDeviceManager#getCurrentDeviceVolume instead"))); +#endif + +/** + * Set the current mic volume + * + * @deprecated This API is not recommended after v8.0. Please use the `setCurrentDeviceVolume` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)setCurrentMicDeviceVolume:(NSInteger)volume __attribute__((deprecated("use TXDeviceManager#setCurrentDeviceVolume instead"))); +#endif + +/** + * Get the mute status of the current system mic + * + * @deprecated This API is not recommended after v8.0. Please use the `getCurrentDeviceMute` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (BOOL)getCurrentMicDeviceMute __attribute__((deprecated("use TXDeviceManager#getCurrentDeviceMute instead"))); +#endif + +/** + * Set the mute status of the current system mic + * + * @deprecated This API is not recommended after v8.0. Please use the `setCurrentDeviceMute` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)setCurrentMicDeviceMute:(BOOL)mute __attribute__((deprecated("use TXDeviceManager#setCurrentDeviceMute instead"))); +#endif + +/** + * Get the list of speakers + * + * @deprecated This API is not recommended after v8.0. Please use the `getDevicesList` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (NSArray *)getSpeakerDevicesList __attribute__((deprecated("use TXDeviceManager#getDevicesList instead"))); +#endif + +/** + * Get the currently used speaker + * + * @deprecated This API is not recommended after v8.0. Please use the `getCurrentDevice` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (TRTCMediaDeviceInfo *)getCurrentSpeakerDevice __attribute__((deprecated("use TXDeviceManager#getCurrentDevice instead"))); +#endif + +/** + * Set the speaker to use + * + * @deprecated This API is not recommended after v8.0. Please use the `setCurrentDevice` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (int)setCurrentSpeakerDevice:(NSString *)deviceId __attribute__((deprecated("use TXDeviceManager#setCurrentDevice instead"))); +#endif + +/** + * Get the current speaker volume + * + * @deprecated This API is not recommended after v8.0. Please use the `getCurrentDeviceVolume` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (float)getCurrentSpeakerDeviceVolume __attribute__((deprecated("use TXDeviceManager#getCurrentDeviceVolume instead"))); +#endif + +/** + * Set the current speaker volume + * + * @deprecated This API is not recommended after v8.0. Please use the `setCurrentDeviceVolume` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (int)setCurrentSpeakerDeviceVolume:(NSInteger)volume __attribute__((deprecated("use TXDeviceManager#setCurrentDeviceVolume instead"))); +#endif + +/** + * Get the mute status of the current system speaker + * + * @deprecated This API is not recommended after v8.0. Please use the `getCurrentDeviceMute` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (BOOL)getCurrentSpeakerDeviceMute __attribute__((deprecated("use TXDeviceManager#getCurrentDeviceMute instead"))); +#endif + +/** + * Set whether to mute the current system speaker + * + * @deprecated This API is not recommended after v8.0. Please use the `setCurrentDeviceMute` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)setCurrentSpeakerDeviceMute:(BOOL)mute __attribute__((deprecated("use TXDeviceManager#setCurrentDeviceMute instead"))); +#endif + +/** + * Get the list of cameras + * + * @deprecated This API is not recommended after v8.0. Please use the `getDevicesList` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (NSArray *)getCameraDevicesList __attribute__((deprecated("use TXDeviceManager#getDevicesList instead"))); +#endif + +/** + * Get the currently used camera + * + * @deprecated This API is not recommended after v8.0. Please use the `getCurrentDevice` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (TRTCMediaDeviceInfo *)getCurrentCameraDevice __attribute__((deprecated("use TXDeviceManager#getCurrentDevice instead"))); +#endif + +/** + * Set the camera to be used currently + * + * @deprecated This API is not recommended after v8.0. Please use the `setCurrentDevice` API in {@link TXDeviceManager} instead. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (int)setCurrentCameraDevice:(NSString *)deviceId __attribute__((deprecated("use TXDeviceManager#setCurrentDevice instead"))); +#endif + +/** + * Setting the system volume type (for mobile OS) + * + * @deprecated This API is not recommended after v8.0. Please use the `startLocalAudio(quality)` instead, which param `quality` is used to decide audio quality. + */ +- (void)setSystemVolumeType:(TRTCSystemVolumeType)type __attribute__((deprecated("use startLocalAudio:quality instead"))); + +/** + * Screencapture video + * + * @deprecated This API is not recommended after v8.2. Please use `snapshotVideo:type:sourceType:completionBlock` instead. + */ +- (void)snapshotVideo:(NSString *)userId type:(TRTCVideoStreamType)streamType completionBlock:(void (^)(TXImage *image))completionBlock __attribute__((deprecated("use snapshotVideo:type:sourceType:completionBlock instead"))); + +/** + * Enable custom video capturing mode + * + * @deprecated This API is not recommended after v8.5. Please use `enableCustomVideoCapture(streamType,enable)` instead. + */ +- (void)enableCustomVideoCapture:(BOOL)enable __attribute__((deprecated("use enableCustomVideoCapture:enable instead"))); + +/** + * Deliver captured video data to SDK + * + * @deprecated This API is not recommended after v8.5. Please use `sendCustomVideoData(streamType, TRTCVideoFrame)` instead. + */ +- (void)sendCustomVideoData:(TRTCVideoFrame *)frame __attribute__((deprecated("use sendCustomVideoData:frame instead"))); + +/** + * tart in-app screen sharing (for iOS 13.0 and above only) + * + * @deprecated This API is not recommended after v8.6. Please use `startScreenCaptureInApp:encParam:` instead. + */ +- (void)startScreenCaptureInApp:(TRTCVideoEncParam *)encParams __attribute__((deprecated("use startScreenCaptureInApp:encParam: instead"))); + +/** + * Start system-level screen sharing (for iOS 11.0 and above only) + * + * @deprecated This API is not recommended after v8.6. Please use `startScreenCaptureByReplaykit:encParam:appGroup:` instead. + */ +- (void)startScreenCaptureByReplaykit:(TRTCVideoEncParam *)encParams appGroup:(NSString *)appGroup __attribute__((deprecated("use startScreenCaptureByReplaykit:encParam:appGroup: instead"))); + +/** + * Pause/Resume publishing local video stream + * + * @deprecated This API is not recommended after v8.9. Please use `muteLocalVideo(streamType, mute)` instead. + */ +- (void)muteLocalVideo:(BOOL)mute __attribute__((deprecated("use muteLocalVideo:streamType:mute: instead"))); + +/** + * Pause/Resume subscribing to remote user's video stream + * + * @deprecated This API is not recommended after v8.9. Please use `muteRemoteVideoStream(userId, streamType, mute)` instead. + */ +- (void)muteRemoteVideoStream:(NSString *)userId mute:(BOOL)mute __attribute__((deprecated("use muteRemoteVideoStream:userid,streamType:mute: instead"))); + +/** + * Start network speed test (used before room entry) + * + * @deprecated This API is not recommended after v9.2. Please use `startSpeedTest(params)` instead. + */ +- (void)startSpeedTest:(uint32_t)sdkAppId + userId:(NSString *)userId + userSig:(NSString *)userSig + completion:(void (^)(TRTCSpeedTestResult *result, NSInteger completedCount, NSInteger totalCount))completion __attribute__((deprecated("use startSpeedTest: instead"))); + +///@} +@end +///@} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDef.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDef.h new file mode 100644 index 0000000..dad27a3 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDef.h @@ -0,0 +1,1486 @@ +/** + * Module: TRTC key class definition + * Description: definitions of enumerated and constant values such as resolution and quality level + */ +/// @defgroup TRTCCloudDef_ios TRTCCloudDef +/// Tencent Cloud TRTC Key Type Definition +/// @{ +#import +#import "TXDeviceManager.h" + +///////////////////////////////////////////////////////////////////////////////// +// +// Rendering control +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * [VIEW] Rendering control that renders the video image + * + * There are many APIs in TRTC that need to manipulate the video image, for which you should specify the video rendering control. + * - On iOS, you can directly use `UIView` as the video rendering control, and the SDK will draw the video image on the `UIView` you provide. + * - On macOS, you can directly use `NSView` as the video rendering control, and the SDK will draw the video image on the `NSView` you provide. + * Below is the sample code: + *
+ * UIView *videoView = [[UIView alloc] initWithFrame:CGRectMake(0, 0, 360, 640)];
+ * [self.view addSubview:videoView];
+ * [trtcCloud startLocalPreview:YES view:_localView];
+ * 
+ */ +#if TARGET_OS_IPHONE || TARGET_OS_SIMULATOR +#import +typedef UIView TXView; +typedef UIImage TXImage; +typedef UIEdgeInsets TXEdgeInsets; +#elif TARGET_OS_MAC +#import +typedef NSView TXView; +typedef NSImage TXImage; +typedef NSEdgeInsets TXEdgeInsets; +#endif + +///////////////////////////////////////////////////////////////////////////////// +// +// Definitions of video enumerated values +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 1.1 Video resolution + * + * Here, only the landscape resolution (e.g., 640x360) is defined. If the portrait resolution (e.g., 360x640) needs to be used, `Portrait` must be selected for `TRTCVideoResolutionMode`. + */ +typedef NS_ENUM(NSInteger, TRTCVideoResolution) { + + /// Aspect ratio: 1:1; resolution: 120x120; recommended bitrate (VideoCall): 80 Kbps; recommended bitrate (LIVE): 120 Kbps. + TRTCVideoResolution_120_120 = 1, + + /// Aspect ratio: 1:1; resolution: 160x160; recommended bitrate (VideoCall): 100 Kbps; recommended bitrate (LIVE): 150 Kbps. + TRTCVideoResolution_160_160 = 3, + + /// Aspect ratio: 1:1; resolution: 270x270; recommended bitrate (VideoCall): 200 Kbps; recommended bitrate (LIVE): 300 Kbps. + TRTCVideoResolution_270_270 = 5, + + /// Aspect ratio: 1:1; resolution: 480x480; recommended bitrate (VideoCall): 350 Kbps; recommended bitrate (LIVE): 500 Kbps. + TRTCVideoResolution_480_480 = 7, + + /// Aspect ratio: 4:3; resolution: 160x120; recommended bitrate (VideoCall): 100 Kbps; recommended bitrate (LIVE): 150 Kbps. + TRTCVideoResolution_160_120 = 50, + + /// Aspect ratio: 4:3; resolution: 240x180; recommended bitrate (VideoCall): 150 Kbps; recommended bitrate (LIVE): 250 Kbps. + TRTCVideoResolution_240_180 = 52, + + /// Aspect ratio: 4:3; resolution: 280x210; recommended bitrate (VideoCall): 200 Kbps; recommended bitrate (LIVE): 300 Kbps. + TRTCVideoResolution_280_210 = 54, + + /// Aspect ratio: 4:3; resolution: 320x240; recommended bitrate (VideoCall): 250 Kbps; recommended bitrate (LIVE): 375 Kbps. + TRTCVideoResolution_320_240 = 56, + + /// Aspect ratio: 4:3; resolution: 400x300; recommended bitrate (VideoCall): 300 Kbps; recommended bitrate (LIVE): 450 Kbps. + TRTCVideoResolution_400_300 = 58, + + /// Aspect ratio: 4:3; resolution: 480x360; recommended bitrate (VideoCall): 400 Kbps; recommended bitrate (LIVE): 600 Kbps. + TRTCVideoResolution_480_360 = 60, + + /// Aspect ratio: 4:3; resolution: 640x480; recommended bitrate (VideoCall): 600 Kbps; recommended bitrate (LIVE): 900 Kbps. + TRTCVideoResolution_640_480 = 62, + + /// Aspect ratio: 4:3; resolution: 960x720; recommended bitrate (VideoCall): 1000 Kbps; recommended bitrate (LIVE): 1500 Kbps. + TRTCVideoResolution_960_720 = 64, + + /// Aspect ratio: 16:9; resolution: 160x90; recommended bitrate (VideoCall): 150 Kbps; recommended bitrate (LIVE): 250 Kbps. + TRTCVideoResolution_160_90 = 100, + + /// Aspect ratio: 16:9; resolution: 256x144; recommended bitrate (VideoCall): 200 Kbps; recommended bitrate (LIVE): 300 Kbps. + TRTCVideoResolution_256_144 = 102, + + /// Aspect ratio: 16:9; resolution: 320x180; recommended bitrate (VideoCall): 250 Kbps; recommended bitrate (LIVE): 400 Kbps. + TRTCVideoResolution_320_180 = 104, + + /// Aspect ratio: 16:9; resolution: 480x270; recommended bitrate (VideoCall): 350 Kbps; recommended bitrate (LIVE): 550 Kbps. + TRTCVideoResolution_480_270 = 106, + + /// Aspect ratio: 16:9; resolution: 640x360; recommended bitrate (VideoCall): 500 Kbps; recommended bitrate (LIVE): 900 Kbps. + TRTCVideoResolution_640_360 = 108, + + /// Aspect ratio: 16:9; resolution: 960x540; recommended bitrate (VideoCall): 850 Kbps; recommended bitrate (LIVE): 1300 Kbps. + TRTCVideoResolution_960_540 = 110, + + /// Aspect ratio: 16:9; resolution: 1280x720; recommended bitrate (VideoCall): 1200 Kbps; recommended bitrate (LIVE): 1800 Kbps. + TRTCVideoResolution_1280_720 = 112, + + /// Aspect ratio: 16:9; resolution: 1920x1080; recommended bitrate (VideoCall): 2000 Kbps; recommended bitrate (LIVE): 3000 Kbps. + TRTCVideoResolution_1920_1080 = 114, + +}; + +/** + * 1.2 Video aspect ratio mode + * + * Only the landscape resolution (e.g., 640x360) is defined in `TRTCVideoResolution`. If the portrait resolution (e.g., 360x640) needs to be used, `Portrait` must be selected for `TRTCVideoResolutionMode`. + */ +typedef NS_ENUM(NSInteger, TRTCVideoResolutionMode) { + + /// Landscape resolution, such as TRTCVideoResolution_640_360 + TRTCVideoResolutionModeLandscape = 640x360. + TRTCVideoResolutionModeLandscape = 0, + + /// Portrait resolution, such as TRTCVideoResolution_640_360 + TRTCVideoResolutionModePortrait = 360x640. + TRTCVideoResolutionModePortrait = 1, + +}; + +/** + * 1.3 Video stream type + * + * TRTC provides three different video streams, including: + * - HD big image: it is generally used to transfer video data from the camera. + * - Smooth small image: it has the same content as the big image, but with lower resolution and bitrate and thus lower definition. + * - Substream image: it is generally used for screen sharing. Only one user in the room is allowed to publish the substream video image at any time, while other users must wait for this user to close the substream before they can publish their own + * substream. + * @note The SDK does not support enabling the smooth small image alone, which must be enabled together with the big image. It will automatically set the resolution and bitrate of the small image. + */ +typedef NS_ENUM(NSInteger, TRTCVideoStreamType) { + + /// HD big image: it is generally used to transfer video data from the camera. + TRTCVideoStreamTypeBig = 0, + + /// Smooth small image: it has the same content as the big image, but with lower resolution and bitrate and thus lower definition. + TRTCVideoStreamTypeSmall = 1, + + /// Substream image: it is generally used for screen sharing. Only one user in the room is allowed to publish the substream video image at any time, while other users must wait for this user to close the substream before they can publish their + /// own substream. + TRTCVideoStreamTypeSub = 2, + +}; + +/** + * 1.4 Video image fill mode + * + * If the aspect ratio of the video display area is not equal to that of the video image, you need to specify the fill mode: + */ +typedef NS_ENUM(NSInteger, TRTCVideoFillMode) { + + /// Fill mode: the video image will be centered and scaled to fill the entire display area, where parts that exceed the area will be cropped. The displayed image may be incomplete in this mode. + TRTCVideoFillMode_Fill = 0, + + /// Fit mode: the video image will be scaled based on its long side to fit the display area, where the short side will be filled with black bars. The displayed image is complete in this mode, but there may be black bars. + TRTCVideoFillMode_Fit = 1, + +}; + +/** + * 1.5 Video image rotation direction + * + * TRTC provides rotation angle setting APIs for local and remote images. The following rotation angles are all clockwise. + */ +typedef NS_ENUM(NSInteger, TRTCVideoRotation) { + + /// No rotation + TRTCVideoRotation_0 = 0, + + /// Clockwise rotation by 90 degrees + TRTCVideoRotation_90 = 1, + + /// Clockwise rotation by 180 degrees + TRTCVideoRotation_180 = 2, + + /// Clockwise rotation by 270 degrees + TRTCVideoRotation_270 = 3, + +}; + +/** + * 1.6 Beauty (skin smoothing) filter algorithm + * + * TRTC has multiple built-in skin smoothing algorithms. You can select the one most suitable for your product. + */ +typedef NS_ENUM(NSInteger, TRTCBeautyStyle) { + + /// Smooth style, which uses a more radical algorithm for more obvious effect and is suitable for show live streaming. + TRTCBeautyStyleSmooth = 0, + + /// Natural style, which retains more facial details for more natural effect and is suitable for most live streaming use cases. + TRTCBeautyStyleNature = 1, + + /// Pitu style, which is provided by YouTu Lab. Its skin smoothing effect is between the smooth style and the natural style, that is, it retains more skin details than the smooth style and has a higher skin smoothing degree than the natural + /// style. + TRTCBeautyStylePitu = 2, + +}; + +/** + * 1.7 Video pixel format + * + * TRTC provides custom video capturing and rendering features. + * - For the custom capturing feature, you can use the following enumerated values to describe the pixel format of the video you capture. + * - For the custom rendering feature, you can specify the pixel format of the video you expect the SDK to call back. + */ +typedef NS_ENUM(NSInteger, TRTCVideoPixelFormat) { + + /// Undefined format + TRTCVideoPixelFormat_Unknown = 0, + + /// YUV420P (I420) format + TRTCVideoPixelFormat_I420 = 1, + + /// OpenGL 2D texture format + TRTCVideoPixelFormat_Texture_2D = 7, + + /// BGRA32 format + TRTCVideoPixelFormat_32BGRA = 6, + + /// YUV420SP (NV12) format + TRTCVideoPixelFormat_NV12 = 5, + +}; + +/** + * 1.8 Video data transfer method + * + * For custom capturing and rendering features, you need to use the following enumerated values to specify the method of transferring video data: + * - Method 1. This method uses memory buffer to transfer video data. It is efficient on iOS but inefficient on Android. It is the only method supported on Windows currently. + * - Method 2. This method uses texture to transfer video data. It is efficient on both iOS and Android but is not supported on Windows. To use this method, you should have a general familiarity with OpenGL programming. + */ +typedef NS_ENUM(NSInteger, TRTCVideoBufferType) { + + /// Undefined transfer method + TRTCVideoBufferType_Unknown = 0, + + /// Use memory buffer to transfer video data. iOS: `PixelBuffer`; Android: `Direct Buffer` for JNI layer; Windows: memory data block. + TRTCVideoBufferType_PixelBuffer = 1, + + /// Use memory buffer to transfer video data. iOS: more compact memory block in `NSData` type after additional processing; Android: `byte[]` for Java layer. + /// This transfer method has a lower efficiency than other methods. + TRTCVideoBufferType_NSData = 2, + + /// Use texture to transfer video data + TRTCVideoBufferType_Texture = 3, + +}; + +/** + * 1.9 Video mirror type + * + * Video mirroring refers to the left-to-right flipping of the video image, especially for the local camera preview image. After mirroring is enabled, it can bring anchors a familiar "look into the mirror" experience. + */ +typedef NS_ENUM(NSUInteger, TRTCVideoMirrorType) { + + /// Auto mode: mirror the front camera's image but not the rear camera's image (for mobile devices only). + TRTCVideoMirrorTypeAuto = 0, + + /// Mirror the images of both the front and rear cameras. + TRTCVideoMirrorTypeEnable = 1, + + /// Disable mirroring for both the front and rear cameras. + TRTCVideoMirrorTypeDisable = 2, + +}; + +/// Old version of TRTCVideoMirrorType, reserved for compatibility with older interface. +typedef NS_ENUM(NSUInteger, TRTCLocalVideoMirrorType) { + TRTCLocalVideoMirrorType_Auto = TRTCVideoMirrorTypeAuto, + TRTCLocalVideoMirrorType_Enable = TRTCVideoMirrorTypeEnable, + TRTCLocalVideoMirrorType_Disable = TRTCVideoMirrorTypeDisable, +} __attribute__((deprecated("use TRTCVideoMirrorType instead"))); + +/** + * 1.10 Data source of local video screenshot + * + * The SDK can take screenshots from the following two data sources and save them as local files: + * - Video stream: the SDK screencaptures the native video content from the video stream. The screenshots are not controlled by the display of the rendering control. + * - Rendering layer: the SDK screencaptures the displayed video content from the rendering control, which can achieve the effect of WYSIWYG, but if the display area is too small, the screenshots will also be very small. + */ +typedef NS_ENUM(NSUInteger, TRTCSnapshotSourceType) { + + /// The SDK screencaptures the native video content from the video stream. The screenshots are not controlled by the display of the rendering control. + TRTCSnapshotSourceTypeStream = 0, + + /// The SDK screencaptures the displayed video content from the rendering control, which can achieve the effect of WYSIWYG, but if the display area is too small, the screenshots will also be very small. + TRTCSnapshotSourceTypeView = 1, + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// Definitions of network enumerated values +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 2.1 Use cases + * + * TRTC features targeted optimizations for common audio/video application scenarios to meet the differentiated requirements in various verticals. The main scenarios can be divided into the following two categories: + * - Live streaming scenario (LIVE): including `LIVE` (audio + video) and `VoiceChatRoom` (pure audio). + * In the live streaming scenario, users are divided into two roles: "anchor" and "audience". A single room can sustain up to 100,000 concurrent online users. This is suitable for live streaming to a large audience. + * - Real-Time scenario (RTC): including `VideoCall` (audio + video) and `AudioCall` (pure audio). + * In the real-time scenario, there is no role difference between users, but a single room can sustain only up to 300 concurrent online users. This is suitable for small-scale real-time communication. + */ +typedef NS_ENUM(NSInteger, TRTCAppScene) { + + /// In the video call scenario, 720p and 1080p HD image quality is supported. A single room can sustain up to 300 concurrent online users, and up to 50 of them can speak simultaneously. + /// Use cases: [one-to-one video call], [video conferencing with up to 300 participants], [online medical diagnosis], [small class], [video interview], etc. + TRTCAppSceneVideoCall = 0, + + /// In the interactive video live streaming scenario, mic can be turned on/off smoothly without waiting for switchover, and the anchor latency is as low as less than 300 ms. Live streaming to hundreds of thousands of concurrent users in the + /// audience role is supported with the playback latency down to 1,000 ms. + /// Use cases: [low-latency interactive live streaming], [big class], [anchor competition], [video dating room], [online interactive classroom], [remote training], [large-scale conferencing], etc. + ///@note In this scenario, you must use the `role` field in `TRTCParams` to specify the role of the current user. + TRTCAppSceneLIVE = 1, + + /// Audio call scenario, where the `SPEECH` sound quality is used by default. A single room can sustain up to 300 concurrent online users, and up to 50 of them can speak simultaneously. + /// Use cases: [one-to-one audio call], [audio conferencing with up to 300 participants], [audio chat], [online Werewolf], etc. + TRTCAppSceneAudioCall = 2, + + /// In the interactive audio live streaming scenario, mic can be turned on/off smoothly without waiting for switchover, and the anchor latency is as low as less than 300 ms. Live streaming to hundreds of thousands of concurrent users in the + /// audience role is supported with the playback latency down to 1,000 ms. + /// Use cases: [audio club], [online karaoke room], [music live room], [FM radio], etc. + ///@note In this scenario, you must use the `role` field in `TRTCParams` to specify the role of the current user. + TRTCAppSceneVoiceChatRoom = 3, + +}; + +/** + * 2.2 Role + * + * Role is applicable only to live streaming scenarios (`TRTCAppSceneLIVE` and `TRTCAppSceneVoiceChatRoom`). Users are divided into two roles: + * - Anchor, who can publish their audio/video streams. There is a limit on the number of anchors. Up to 50 anchors are allowed to publish streams at the same time in one room. + * - Audience, who can only listen to or watch audio/video streams of anchors in the room. If they want to publish their streams, they need to switch to the "anchor" role first through {@link switchRole}. One room can sustain up to 100,000 concurrent + * online users in the audience role. + */ +typedef NS_ENUM(NSInteger, TRTCRoleType) { + + /// An anchor can publish their audio/video streams. There is a limit on the number of anchors. Up to 50 anchors are allowed to publish streams at the same time in one room. + TRTCRoleAnchor = 20, + + /// Audience can only listen to or watch audio/video streams of anchors in the room. If they want to publish their streams, they need to switch to the "anchor" role first through {@link switchRole}. One room can sustain up to 100,000 concurrent + /// online users in the audience role. + TRTCRoleAudience = 21, + +}; + +/** + * 2.3 QoS control mode (disused) + */ +typedef NS_ENUM(NSInteger, TRTCQosControlMode) { + + /// Client-based control, which is for internal debugging of SDK and shall not be used by users. + TRTCQosControlModeClient = 0, + + /// On-cloud control, which is the default and recommended mode. + TRTCQosControlModeServer = 1, + +}; + +/** + * 2.4 Image quality preference + * + * TRTC has two control modes in weak network environments: "ensuring clarity" and "ensuring smoothness". Both modes will give priority to the transfer of audio data. + */ +typedef NS_ENUM(NSInteger, TRTCVideoQosPreference) { + + /// Ensuring smoothness: in this mode, when the current network is unable to transfer a clear and smooth video image, the smoothness of the image will be given priority, but there will be blurs. + TRTCVideoQosPreferenceSmooth = 1, + + /// Ensuring clarity (default value): in this mode, when the current network is unable to transfer a clear and smooth video image, the clarity of the image will be given priority, but there will be lags. + TRTCVideoQosPreferenceClear = 2, + +}; + +/** + * 2.5 Network quality + * + * TRTC evaluates the current network quality once every two seconds. The evaluation results are divided into six levels: `Excellent` indicates the best, and `Down` indicates the worst. + */ +typedef NS_ENUM(NSInteger, TRTCQuality) { + + /// Undefined + TRTCQuality_Unknown = 0, + + /// The current network is excellent + TRTCQuality_Excellent = 1, + + /// The current network is good + TRTCQuality_Good = 2, + + /// The current network is fair + TRTCQuality_Poor = 3, + + /// The current network is bad + TRTCQuality_Bad = 4, + + /// The current network is very bad + TRTCQuality_Vbad = 5, + + /// The current network cannot meet the minimum requirements of TRTC + TRTCQuality_Down = 6, + +}; + +/** + * 2.6 Audio/Video playback status + * + * This enumerated type is used in the video status changed API {@link onRemoteVideoStatusUpdated} to specify the current video status. + */ +typedef NS_ENUM(NSUInteger, TRTCAVStatusType) { + + /// Stopped + TRTCAVStatusStopped = 0, + + /// Playing + TRTCAVStatusPlaying = 1, + + /// Loading + TRTCAVStatusLoading = 2, + +}; + +/** + * 2.7 Reasons for playback status changes + * + * This enumerated type is used in the video status changed API {@link onRemoteVideoStatusUpdated} to specify the reason for the current video status change. + */ +typedef NS_ENUM(NSUInteger, TRTCAVStatusChangeReason) { + + /// Default value + TRTCAVStatusChangeReasonInternal = 0, + + /// The stream enters the "Loading" state due to network congestion + TRTCAVStatusChangeReasonBufferingBegin = 1, + + /// The stream enters the "Playing" state after network recovery + TRTCAVStatusChangeReasonBufferingEnd = 2, + + /// As a start-related API was directly called locally, the stream enters the "Playing" state + TRTCAVStatusChangeReasonLocalStarted = 3, + + /// As a stop-related API was directly called locally, the stream enters the "Stopped" state + TRTCAVStatusChangeReasonLocalStopped = 4, + + /// As the remote user started (or resumed) publishing the video stream, the stream enters the "Loading" or "Playing" state + TRTCAVStatusChangeReasonRemoteStarted = 5, + + /// As the remote user stopped (or paused) publishing the video stream, the stream enters the "Stopped" state + TRTCAVStatusChangeReasonRemoteStopped = 6, + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// Definitions of audio enumerated values +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 3.1 Audio sample rate + * + * The audio sample rate is used to measure the audio fidelity. A higher sample rate indicates higher fidelity. If there is music in the use case, `TRTCAudioSampleRate48000` is recommended. + */ +typedef NS_ENUM(NSInteger, TRTCAudioSampleRate) { + + /// 16 kHz sample rate + TRTCAudioSampleRate16000 = 16000, + + /// 32 kHz sample rate + TRTCAudioSampleRate32000 = 32000, + + /// 44.1 kHz sample rate + TRTCAudioSampleRate44100 = 44100, + + /// 48 kHz sample rate + TRTCAudioSampleRate48000 = 48000, + +}; + +/** + * 3.2 Sound quality + * + * TRTC provides three well-tuned modes to meet the differentiated requirements for sound quality in various verticals: + * - Speech mode (Speech): it is suitable for application scenarios that focus on human communication. In this mode, the audio transfer is more resistant, and TRTC uses various voice processing technologies to ensure the optimal smoothness even in + * weak network environments. + * - Music mode (Music): it is suitable for scenarios with demanding requirements for music. In this mode, the amount of transferred audio data is very large, and TRTC uses various technologies to ensure that the high-fidelity details of music + * signals can be restored in each frequency band. + * - Default mode (Default): it is between `Speech` and `Music`. In this mode, the reproduction of music is better than that in `Speech` mode, and the amount of transferred data is much lower than that in `Music` mode; therefore, this mode has good + * adaptability to various scenarios. + */ +typedef NS_ENUM(NSInteger, TRTCAudioQuality) { + + /// Speech mode: sample rate: 16 kHz; mono channel; bitrate: 16 Kbps. This mode has the best resistance among all modes and is suitable for audio call scenarios, such as online meeting and audio call. + TRTCAudioQualitySpeech = 1, + + /// Default mode: sample rate: 48 kHz; mono channel; bitrate: 50 Kbps. This mode is between the speech mode and the music mode as the default mode in the SDK and is recommended. + TRTCAudioQualityDefault = 2, + + /// Music mode: sample rate: 48 kHz; full-band stereo; bitrate: 128 Kbps. This mode is suitable for scenarios where Hi-Fi music transfer is required, such as online karaoke and music live streaming. + TRTCAudioQualityMusic = 3, + +}; + +/** + * 3.3 Audio route (i.e., audio playback mode) + * + * "Audio route" determines whether the sound is played back from the speaker or receiver of a mobile device; therefore, this API is applicable only to mobile devices such as phones. + * Generally, a phone has two speakers: one is the receiver at the top, and the other is the stereo speaker at the bottom. + * - If the audio route is set to the receiver, the volume is relatively low, and the sound can be heard clearly only when the phone is put near the ear. This mode has a high level of privacy and is suitable for answering calls. + * - If the audio route is set to the speaker, the volume is relatively high, so there is no need to put the phone near the ear. Therefore, this mode can implement the "hands-free" feature. + */ +typedef NS_ENUM(NSInteger, TRTCAudioRoute) { + + /// Speakerphone: the speaker at the bottom is used for playback (hands-free). With relatively high volume, it is used to play music out loud. + TRTCAudioModeSpeakerphone = 0, + + /// Earpiece: the receiver at the top is used for playback. With relatively low volume, it is suitable for call scenarios that require privacy. + TRTCAudioModeEarpiece = 1, + +}; + +/** + * 3.4 Audio reverb mode + * + * This enumerated value is used to set the audio reverb mode in the live streaming scenario and is often used in show live streaming. + */ +typedef NS_ENUM(NSInteger, TRTCReverbType) { + + /// Disable reverb + TRTCReverbType_0 = 0, + + /// KTV + TRTCReverbType_1 = 1, + + /// Small room + TRTCReverbType_2 = 2, + + /// Hall + TRTCReverbType_3 = 3, + + /// Deep + TRTCReverbType_4 = 4, + + /// Resonant + TRTCReverbType_5 = 5, + + /// Metallic + TRTCReverbType_6 = 6, + + /// Husky + TRTCReverbType_7 = 7, + +}; + +/** + * 3.5 Voice changing type + * + * This enumerated value is used to set the voice changing mode in the live streaming scenario and is often used in show live streaming. + */ +typedef NS_ENUM(NSInteger, TRTCVoiceChangerType) { + + /// Disable voice changing + TRTCVoiceChangerType_0 = 0, + + /// Child + TRTCVoiceChangerType_1 = 1, + + /// Girl + TRTCVoiceChangerType_2 = 2, + + /// Middle-Aged man + TRTCVoiceChangerType_3 = 3, + + /// Heavy metal + TRTCVoiceChangerType_4 = 4, + + /// Nasal + TRTCVoiceChangerType_5 = 5, + + /// Punk + TRTCVoiceChangerType_6 = 6, + + /// Trapped beast + TRTCVoiceChangerType_7 = 7, + + /// Otaku + TRTCVoiceChangerType_8 = 8, + + /// Electronic + TRTCVoiceChangerType_9 = 9, + + /// Robot + TRTCVoiceChangerType_10 = 10, + + /// Ethereal + TRTCVoiceChangerType_11 = 11, + +}; + +/** + * 3.6 System volume type (only for mobile devices) + * + * Smartphones usually have two types of system volume: call volume and media volume. + * - Call volume is designed for call scenarios. It comes with acoustic echo cancellation (AEC) and supports audio capturing by Bluetooth earphones, but its sound quality is average. + * If you cannot turn the volume down to 0 (i.e., mute the phone) using the volume buttons, then your phone is using call volume. + * - Media volume is designed for media scenarios such as music playback. AEC does not work when media volume is used, and Bluetooth earphones cannot be used for audio capturing. However, media volume delivers better music listening experience. + * If you are able to mute your phone using the volume buttons, then your phone is using media volume. + * The SDK offers three system volume control modes: auto, call volume, and media volume. + */ +typedef NS_ENUM(NSInteger, TRTCSystemVolumeType) { + + /// Auto: + /// In the auto mode, call volume is used for anchors, and media volume for audience. This mode is suitable for live streaming scenarios. + /// If the scenario you select during `enterRoom` is `TRTCAppSceneLIVE` or `TRTCAppSceneVoiceChatRoom`, the SDK will automatically use this mode. + TRTCSystemVolumeTypeAuto = 0, + + /// Media volume: + /// In this mode, media volume is used in all scenarios. It is rarely used, mainly suitable for music scenarios with demanding requirements on audio quality. + /// Use this mode if most of your users use peripheral devices such as audio cards. Otherwise, it is not recommended. + TRTCSystemVolumeTypeMedia = 1, + + /// Call volume: + /// In this mode, the audio module does not change its work mode when users switch between anchors and audience, enabling seamless mic on/off. This mode is suitable for scenarios where users need to switch frequently between anchors and audience. + /// If the scenario you select during `enterRoom` is `TRTCAppSceneVideoCall` or `TRTCAppSceneAudioCall`, the SDK will automatically use this mode. + TRTCSystemVolumeTypeVOIP = 2, + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// Definitions of other enumerated values +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 4.1 Log level + * + * Different log levels indicate different levels of details and number of logs. We recommend you set the log level to `TRTCLogLevelInfo` generally. + */ +typedef NS_ENUM(NSInteger, TRTCLogLevel) { + + /// Output logs at all levels + TRTCLogLevelVerbose = 0, + + /// Output logs at the DEBUG, INFO, WARNING, ERROR, and FATAL levels + TRTCLogLevelDebug = 1, + + /// Output logs at the INFO, WARNING, ERROR, and FATAL levels + TRTCLogLevelInfo = 2, + + /// Output logs at the WARNING, ERROR, and FATAL levels + TRTCLogLevelWarn = 3, + + /// Output logs at the ERROR and FATAL levels + TRTCLogLevelError = 4, + + /// Output logs at the FATAL level + TRTCLogLevelFatal = 5, + + /// Do not output any SDK logs + TRTCLogLevelNone = 6, + +}; + +/** + * 4.2 G-sensor switch (for mobile devices only) + */ +typedef NS_ENUM(NSInteger, TRTCGSensorMode) { + + /// Do not adapt to G-sensor orientation + /// This mode is the default value for desktop platforms. In this mode, the video image published by the current user is not affected by the change of the G-sensor orientation. + TRTCGSensorMode_Disable = 0, + + /// Adapt to G-sensor orientation + /// This mode is the default value on mobile platforms. In this mode, the video image published by the current user is adjusted according to the G-sensor orientation, while the orientation of the local preview image remains unchanged. + /// One of the adaptation modes currently supported by the SDK is as follows: when the phone or tablet is upside down, in order to ensure that the screen orientation seen by the remote user is normal, the SDK will automatically rotate the + /// published video image by 180 degrees. + /// If the UI layer of your application has enabled G-sensor adaption, we recommend you use the `UIFixLayout` mode. + TRTCGSensorMode_UIAutoLayout = 1, + + /// Adapt to G-sensor orientation + /// In this mode, the video image published by the current user is adjusted according to the G-sensor orientation, and the local preview image will also be rotated accordingly. + /// One of the features currently supported is as follows: when the phone or tablet is upside down, in order to ensure that the screen orientation seen by the remote user is normal, the SDK will automatically rotate the published video image by + /// 180 degrees. + /// If the UI layer of your application doesn't support G-sensor adaption, but you want the video image in the SDK to adapt to the G-sensor orientation, we recommend you use the `UIFixLayout` mode. + TRTCGSensorMode_UIFixLayout = 2, + +}; + +/** + * 4.3 Screen sharing target type (for desktops only) + */ +typedef NS_ENUM(NSInteger, TRTCScreenCaptureSourceType) { + + /// Undefined + TRTCScreenCaptureSourceTypeUnknown = -1, + + /// The screen sharing target is the window of an application + TRTCScreenCaptureSourceTypeWindow = 0, + + /// The screen sharing target is the entire screen + TRTCScreenCaptureSourceTypeScreen = 1, + +}; + +/** + * 4.4 Layout mode of On-Cloud MixTranscoding + * + * TRTC's On-Cloud MixTranscoding service can mix multiple audio/video streams in the room into one stream. Therefore, you need to specify the layout scheme of the video images. The following layout modes are provided: + */ +typedef NS_ENUM(NSInteger, TRTCTranscodingConfigMode) { + + /// Undefined + TRTCTranscodingConfigMode_Unknown = 0, + + /// Manual layout mode + /// In this mode, you need to specify the precise position of each video image. This mode has the highest degree of freedom, but its ease of use is the worst: + ///- You need to enter all the parameters in `TRTCTranscodingConfig`, including the position coordinates of each video image (TRTCMixUser). + ///- You need to listen on the `onUserVideoAvailable()` and `onUserAudioAvailable()` event callbacks in `TRTCCloudDelegate` and constantly adjust the `mixUsers` parameter according to the audio/video status of each user with mic on in the current + /// room. + TRTCTranscodingConfigMode_Manual = 1, + + /// Pure audio mode + /// This mode is suitable for pure audio scenarios such as audio call (AudioCall) and audio chat room (VoiceChatRoom). + ///- You only need to set it once through the `setMixTranscodingConfig()` API after room entry, and then the SDK will automatically mix the audio of all mic-on users in the room into the current user's live stream. + ///- You don't need to set the `mixUsers` parameter in `TRTCTranscodingConfig`; instead, you only need to set the `audioSampleRate`, `audioBitrate` and `audioChannels` parameters. + TRTCTranscodingConfigMode_Template_PureAudio = 2, + + /// Preset layout mode + /// This is the most popular layout mode, because it allows you to set the position of each video image in advance through placeholders, and then the SDK automatically adjusts it dynamically according to the number of video images in the room. + /// In this mode, you still need to set the `mixUsers` parameter, but you can set `userId` as a "placeholder". Placeholder values include: + /// - "$PLACE_HOLDER_REMOTE$": image of remote user. Multiple images can be set. + /// - "$PLACE_HOLDER_LOCAL_MAIN$": local camera image. Only one image can be set. + /// - "$PLACE_HOLDER_LOCAL_SUB$": local screen sharing image. Only one image can be set. + /// In this mode, you don't need to listen on the `onUserVideoAvailable()` and `onUserAudioAvailable()` callbacks in `TRTCCloudDelegate` to make real-time adjustments. + /// Instead, you only need to call `setMixTranscodingConfig()` once after successful room entry. Then, the SDK will automatically populate the placeholders you set with real `userId` values. + TRTCTranscodingConfigMode_Template_PresetLayout = 3, + + /// Screen sharing mode + /// This mode is suitable for screen sharing-based use cases such as online education and supported only by the SDKs for Windows and macOS. + /// In this mode, the SDK will first build a canvas according to the target resolution you set (through the `videoWidth` and `videoHeight` parameters). + ///- Before the teacher enables screen sharing, the SDK will scale up the teacher's camera image and draw it onto the canvas. + ///- After the teacher enables screen sharing, the SDK will draw the video image shared on the screen onto the same canvas. + /// The purpose of this layout mode is to ensure consistency in the output resolution of the mixtranscoding module and avoid problems with blurred screen during course replay and webpage playback (web players don't support adjustable resolution). + /// Meanwhile, the audio of mic-on students will be mixed into the teacher's audio/video stream by default. + ///< br> + /// Video content is primarily the shared screen in teaching mode, and it is a waste of bandwidth to transfer camera image and screen image at the same time. + /// Therefore, the recommended practice is to directly draw the camera image onto the current screen through the `setLocalVideoRenderCallback` API. + /// In this mode, you don't need to set the `mixUsers` parameter in `TRTCTranscodingConfig`, and the SDK will not mix students' images so as not to interfere with the screen sharing effect. + ///< br> + /// You can set width x height in `TRTCTranscodingConfig` to 0 px x 0 px, and the SDK will automatically calculate a suitable resolution based on the aspect ratio of the user's current screen. + ///- If the teacher's current screen width is less than or equal to 1920 px, the SDK will use the actual resolution of the teacher's current screen. + ///- If the teacher's current screen width is greater than 1920 px, the SDK will select one of the three resolutions of 1920x1080 (16:9), 1920x1200 (16:10), and 1920x1440 (4:3) according to the current screen aspect ratio. + TRTCTranscodingConfigMode_Template_ScreenSharing = 4, + +}; + +/** + * 4.5 Media recording type + * + * This enumerated type is used in the local media recording API {@link startLocalRecording} to specify whether to record audio/video files or pure audio files. + */ +typedef NS_ENUM(NSUInteger, TRTCRecordType) { + + /// Record audio only + TRTCRecordTypeAudio = 0, + + /// Record video only + TRTCRecordTypeVideo = 1, + + /// Record both audio and video + TRTCRecordTypeBoth = 2, + +}; + +/** + * 4.6 Stream mix input type + */ +typedef NS_ENUM(NSUInteger, TRTCMixInputType) { + + /// Unspecified. The SDK will determine the stream mix input type according to the value of the `pureAudio` parameter + TRTCMixInputTypeUndefined = 0, + + /// Mix both audio and video + TRTCMixInputTypeAudioVideo = 1, + + /// Mix video only + TRTCMixInputTypePureVideo = 2, + + /// Mix audio only + TRTCMixInputTypePureAudio = 3, + +}; + +/** + * 4.7 Device type (for desktop platforms only) + * + * This enumerated value is used to define three types of audio/video devices, namely, camera, mic, and speaker, so that the same device management API can control the three different types of devices. + * Starting from v8.0, TRTC redefines `TXMediaDeviceType` in `TXDeviceManager` to replace `TRTCMediaDeviceType` on legacy versions. + * Only the definition of `TRTCMediaDeviceType` is retained here for compatibility with customer code on legacy versions. + */ +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +typedef NS_ENUM(NSInteger, TRTCMediaDeviceType) { + TRTCMediaDeviceTypeUnknown = -1, ///< undefined device type + TRTCMediaDeviceTypeAudioInput = 0, ///< microphone + TRTCMediaDeviceTypeAudioOutput = 1, ///< speaker + TRTCMediaDeviceTypeVideoCamera = 2, ///< camera + TRTCMediaDeviceTypeVideoWindow = 3, ///< windows(for screen share) + TRTCMediaDeviceTypeVideoScreen = 4, ///< screen (for screen share) +} __attribute__((deprecated("use TXDeviceManager#TXMediaDeviceType instead"))); + +typedef TXMediaDeviceInfo TRTCMediaDeviceInfo __attribute__((deprecated("use TXDeviceManager#TXMediaDeviceInfo instead"))); +#endif + +/** + * 4.11 Audio recording content type + * + * This enumerated type is used in the audio recording API {@link startAudioRecording} to specify the content of the recorded audio. + */ +typedef NS_ENUM(NSUInteger, TRTCAudioRecordingContent) { + + /// Record both local and remote audio + TRTCAudioRecordingContentAll = 0, + + /// Record local audio only + TRTCAudioRecordingContentLocal = 1, + + /// Record remote audio only + TRTCAudioRecordingContentRemote = 2, + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// Definitions of core TRTC classes +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 5.1 Room entry parameters + * + * As the room entry parameters in the TRTC SDK, these parameters must be correctly set so that the user can successfully enter the audio/video room specified by `roomId` or `strRoomId`. + * For historical reasons, TRTC supports two types of room IDs: `roomId` and `strRoomId`. + * Note: do not mix `roomId` and `strRoomId`, because they are not interchangeable. For example, the number `123` and the string `123` are two completely different rooms in TRTC. + */ +@interface TRTCParams : NSObject + +///**Field description:** application ID, which is required. Tencent Cloud generates bills based on `sdkAppId`. +///**Recommended value:** the ID can be obtained on the account information page in the [TRTC console](https://console.cloud.tencent.com/rav/) after the corresponding application is created. +@property(nonatomic, assign) UInt32 sdkAppId; + +///**Field description:** user ID, which is required. It is the `userId` of the local user in UTF-8 encoding and acts as the username. +///**Recommended value:** if the ID of a user in your account system is "mike", `userId` can be set to "mike". +@property(nonatomic, copy, nonnull) NSString *userId; + +///**Field description:** user signature, which is required. It is the authentication signature corresponding to the current `userId` and acts as the login password for Tencent Cloud services. +///**Recommended value:** for the calculation method, please see [UserSig](https://cloud.tencent.com/document/product/647/17275). +@property(nonatomic, copy, nonnull) NSString *userSig; + +///**Field description:** numeric room ID. Users (userId) in the same room can see one another and make audio/video calls. +///**Recommended value:** value range: 1–4294967294. +///**Note:** `roomId` and `strRoomId` are mutually exclusive. If you decide to use `strRoomId`, then `roomId` should be entered as 0. If both are entered, `roomId` will be used. +///**Note:** do not mix `roomId` and `strRoomId`, because they are not interchangeable. For example, the number `123` and the string `123` are two completely different rooms in TRTC. +@property(nonatomic, assign) UInt32 roomId; + +///**Field description:** string-type room ID. Users (userId) in the same room can see one another and make audio/video calls. +///**Note:** `roomId` and `strRoomId` are mutually exclusive. If you decide to use `strRoomId`, then `roomId` should be entered as 0. If both are entered, `roomId` will be used. +///**Note:** do not mix `roomId` and `strRoomId`, because they are not interchangeable. For example, the number `123` and the string `123` are two completely different rooms in TRTC. +///**Recommended value:** the length limit is 64 bytes. The following 89 characters are supported: +/// - Uppercase and lowercase letters (a–z and A–Z) +/// - Digits (0–9) +/// - Space, "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", and ",". +@property(nonatomic, copy, nonnull) NSString *strRoomId; + +///**Field description:** role in the live streaming scenario, which is applicable only to the live streaming scenario ({@link TRTCAppSceneLIVE} or {@link TRTCAppSceneVoiceChatRoom}) but doesn't take effect in the call scenario. +///**Recommended value:** default value: anchor ({@link TRTCRoleAnchor}). +@property(nonatomic, assign) TRTCRoleType role; + +///**Field description:** specified `streamId` in Tencent Cloud CSS, which is optional. After setting this field, you can play back the user's audio/video stream on Tencent Cloud CSS CDN through a standard pull scheme (FLV or HLS). +///**Recommended value:** this parameter can contain up to 64 bytes and can be left empty. We recommend you use `sdkappid_roomid_userid_main` as the `streamid`, which is easier to identify and will not cause conflicts in your multiple applications. +///**Note:** to use Tencent Cloud CSS CDN, you need to enable the auto-relayed live streaming feature on the "Function Configuration" page in the [console](https://console.cloud.tencent.com/trtc/) first. +/// For more information, please see [CDN Relayed Live Streaming](https://cloud.tencent.com/document/product/647/16826). +@property(nonatomic, copy, nullable) NSString *streamId; + +///**Field description:** on-cloud recording field, which is optional and used to specify whether to record the user's audio/video stream in the cloud. +/// For more information, please see [On-Cloud Recording and Playback](https://cloud.tencent.com/document/product/647/16823). +///**Recommended value:** it can contain up to 64 bytes. Letters (a–z and A–Z), digits (0–9), underscores, and hyphens are allowed. +///< p> +/// Scheme 1. Manual recording +/// 1. Enable on-cloud recording in "Application Management" > "On-cloud Recording Configuration" in the [console](https://console.cloud.tencent.com/trtc). +/// 2. Set "Recording Mode" to "Manual Recording". +/// 3. After manual recording is set, in a TRTC room, only users with the `userDefineRecordId` parameter set will have video recording files in the cloud, while users without this parameter set will not. +/// 4. The recording file will be named in the format of "userDefineRecordId_start time_end time" in the cloud. +///< p> +/// Scheme 2. Auto-recording +/// 1. You need to enable on-cloud recording in "Application Management" > "On-cloud Recording Configuration" in the [console](https://console.cloud.tencent.com/trtc). +/// 2. Set "Recording Mode" to "Auto-recording". +/// 3. After auto-recording is set, any user who upstreams audio/video in a TRTC room will have a video recording file in the cloud. +/// 4. The file will be named in the format of "userDefineRecordId_start time_end time". If `userDefineRecordId` is not specified, the file will be named in the format of "streamId_start time_end time". +///< br> +@property(nonatomic, copy, nullable) NSString *userDefineRecordId; + +///**Field description:** permission credential used for permission control, which is optional. If you want only users with the specified `userId` values to enter a room, you need to use `privateMapKey` to restrict the permission. +///**Recommended value:** we recommend you use this parameter only if you have high security requirements. For more information, please see [Enabling Advanced Permission Control](https://cloud.tencent.com/document/product/647/32240). +@property(nonatomic, copy, nullable) NSString *privateMapKey; + +///**Field description:** business data, which is optional. This field is needed only by some advanced features. +///**Recommended value:** do not set this field on your own. +@property(nonatomic, copy, nullable) NSString *bussInfo; +@end + +/** + * 5.2 Video encoding parameters + * + * These settings determine the quality of image viewed by remote users as well as the image quality of recorded video files in the cloud. + */ +@interface TRTCVideoEncParam : NSObject + +///**Field description:** video resolution +///**Recommended value** +/// - For mobile video call, we recommend you select a resolution of 360x640 or below and select `Portrait` (portrait resolution) for `resMode`. +/// - For mobile live streaming, we recommend you select a resolution of 540x960 and select `Portrait` (portrait resolution) for `resMode`. +/// - For desktop platforms (Windows and macOS), we recommend you select a resolution of 640x360 or above and select `Landscape` (landscape resolution) for `resMode`. +///**Note:** to use a portrait resolution, please specify `resMode` as `Portrait`; for example, when used together with `Portrait`, 640x360 represents 360x640. +@property(nonatomic, assign) TRTCVideoResolution videoResolution; + +///**Field description:** resolution mode (landscape/portrait) +///**Recommended value:** for mobile platforms (iOS and Android), `Portrait` is recommended; for desktop platforms (Windows and macOS), `Landscape` is recommended. +///**Note:** to use a portrait resolution, please specify `resMode` as `Portrait`; for example, when used together with `Portrait`, 640x360 represents 360x640. +@property(nonatomic, assign) TRTCVideoResolutionMode resMode; + +////// **Field description:** video capturing frame rate +////// **Recommended value:** 15 or 20 fps. If the frame rate is lower than 5 fps, there will be obvious lagging; if lower than 10 fps but higher than 5 fps, there will be slight lagging; if higher than 20 fps, the bandwidth will be wasted (the frame +/// rate of movies is generally 24 fps). +////// **Note:** the front cameras on certain Android phones do not support a capturing frame rate higher than 15 fps. For some Android phones that focus on beautification features, the capturing frame rate of the front cameras may be lower than 10 +/// fps. +@property(nonatomic, assign) int videoFps; + +///**Field description:** target video bitrate. The SDK encodes streams at the target video bitrate and will actively reduce the bitrate only in weak network environments. +///**Recommended value:** please see the optimal bitrate for each specification in `TRTCVideoResolution`. You can also slightly increase the optimal bitrate. +/// For example, `TRTCVideoResolution_1280_720` corresponds to the target bitrate of 1,200 Kbps. You can also set the bitrate to 1,500 Kbps for higher definition. +///**Note:** you can set the `videoBitrate` and `minVideoBitrate` parameters at the same time to restrict the SDK's adjustment range of the video bitrate: +/// - If you want to "ensure clarity while allowing lag in weak network environments", you can set `minVideoBitrate` to 60% of `videoBitrate`. +/// - If you want to "ensure smoothness while allowing blur in weak network environments", you can set `minVideoBitrate` to a low value, for example, 100 Kbps. +/// - If you set `videoBitrate` and `minVideoBitrate` to the same value, it is equivalent to disabling the adaptive adjustment capability of the SDK for the video bitrate. +@property(nonatomic, assign) int videoBitrate; + +///**Field description:** minimum video bitrate. The SDK will reduce the bitrate to as low as the value specified by `minVideoBitrate` to ensure the smoothness only if the network conditions are poor. +///**Note:** default value: 0, indicating that a reasonable value of the lowest bitrate will be automatically calculated by the SDK according to the resolution you specify. +///**Recommended value:** you can set the `videoBitrate` and `minVideoBitrate` parameters at the same time to restrict the SDK's adjustment range of the video bitrate: +/// - If you want to "ensure clarity while allowing lag in weak network environments", you can set `minVideoBitrate` to 60% of `videoBitrate`. +/// - If you want to "ensure smoothness while allowing blur in weak network environments", you can set `minVideoBitrate` to a low value, for example, 100 Kbps. +/// - If you set `videoBitrate` and `minVideoBitrate` to the same value, it is equivalent to disabling the adaptive adjustment capability of the SDK for the video bitrate. +@property(nonatomic, assign) int minVideoBitrate; + +///**Field description:** whether to allow dynamic resolution adjustment. Once enabled, this field will affect on-cloud recording. +///**Recommended value:** this feature is suitable for scenarios that don't require on-cloud recording. After it is enabled, the SDK will intelligently select a suitable resolution according to the current network conditions to avoid the inefficient +/// encoding mode of "large resolution + small bitrate". +///**Note:** default value: NO. If you need on-cloud recording, please do not enable this feature, because if the video resolution changes, the MP4 file recorded in the cloud cannot be played back normally by common players. +@property(nonatomic, assign) BOOL enableAdjustRes; +@end + +/** + * 5.3 Network QoS control parameter set + * + * Network QoS control parameter. The settings determine the QoS control policy of the SDK in weak network conditions (e.g., whether to "ensure clarity" or "ensure smoothness"). + */ +@interface TRTCNetworkQosParam : NSObject + +///**Field description:** whether to ensure smoothness or clarity +///**Recommended value:** ensuring clarity +///**Note:** this parameter mainly affects the audio/video performance of TRTC in weak network environments: +/// - Ensuring smoothness: in this mode, when the current network is unable to transfer a clear and smooth video image, the smoothness of the image will be given priority, but there will be blurs. +/// - Ensuring clarity (default value): in this mode, when the current network is unable to transfer a clear and smooth video image, the clarity of the image will be given priority, but there will be lags. +@property(nonatomic, assign) TRTCVideoQosPreference preference; + +///**Field description:** QoS control mode (disused) +///**Recommended value:** on-cloud control +///**Note:** please set the on-cloud control mode (TRTCQosControlModeServer). +@property(nonatomic, assign) TRTCQosControlMode controlMode; +@end + +/** + * 5.4 Rendering parameters of video image + * + * You can use these parameters to control the video image rotation angle, fill mode, and mirror mode. + */ +@interface TRTCRenderParams : NSObject + +///**Field description:** clockwise image rotation angle +///**Recommended value:** rotation angles of 90, 180, and 270 degrees are supported. Default value: {@link TRTCVideoRotation_0} +@property(nonatomic) TRTCVideoRotation rotation; + +///**Field description:** image fill mode +///**Recommended value:** fill (the image may be stretched or cropped) or fit (there may be black bars in unmatched areas). Default value: {@link TRTCVideoFillMode_Fill} +@property(nonatomic) TRTCVideoFillMode fillMode; + +///**Field description:** image mirror mode +///**Recommended value:** default value: {@link TRTCVideoMirrorType_Auto} +@property(nonatomic) TRTCVideoMirrorType mirrorType; + +@end + +/** + * 5.5 Network quality + * + * This indicates the quality of the network. You can use it to display the network quality of each user on the UI. + */ +@interface TRTCQualityInfo : NSObject + +/// User ID +@property(nonatomic, copy, nullable) NSString *userId; + +/// Network quality +@property(nonatomic, assign) TRTCQuality quality; +@end + +/** + * 5.6 Volume + * + * This indicates the audio volume value. You can use it to display the volume of each user in the UI. + */ +@interface TRTCVolumeInfo : NSObject + +///`userId` of the speaker. An empty value indicates the local user. +@property(nonatomic, copy, nullable) NSString *userId; + +/// Volume of the speaker. Value range: 0–100. +@property(assign, nonatomic) NSUInteger volume; + +@end + +/** + * 5.7 Network speed testing parameters + * + * You can test the network speed through the {@link startSpeedTest:} interface before the user enters the room (this API cannot be called during a call). + */ +@interface TRTCSpeedTestParams : NSObject + +/// Application identification, please refer to the relevant instructions in {@link TRTCParams}. +@property(nonatomic) uint32_t sdkAppId; + +/// User identification, please refer to the relevant instructions in {@link TRTCParams}. +@property(nonatomic, copy, nonnull) NSString *userId; + +/// User signature, please refer to the relevant instructions in {@link TRTCParams}. +@property(nonatomic, copy, nonnull) NSString *userSig; + +/// Expected upstream bandwidth (kbps, value range: 10 to 5000, no uplink bandwidth test when it is 0). +@property(nonatomic) NSInteger expectedUpBandwidth; + +/// Expected downstream bandwidth (kbps, value range: 10 to 5000, no downlink bandwidth test when it is 0). +@property(nonatomic) NSInteger expectedDownBandwidth; +@end + +/** + * 5.8 Network speed test result + * + * The {@link startSpeedTest:} API can be used to test the network speed before a user enters a room (this API cannot be called during a call). + */ +@interface TRTCSpeedTestResult : NSObject + +/// Whether the network speed test is successful. +@property(nonatomic) BOOL success; + +/// Error message for network speed test. +@property(nonatomic, copy, nonnull) NSString *errMsg; + +/// Server IP address. +@property(nonatomic, copy, nonnull) NSString *ip; + +/// Network quality, which is tested and calculated based on the internal evaluation algorithm. For more information, please see {@link TRTCQuality} +@property(nonatomic) TRTCQuality quality; + +/// Upstream packet loss rate between 0 and 1.0. For example, 0.3 indicates that 3 data packets may be lost in every 10 packets sent to the server. +@property(nonatomic) float upLostRate; + +/// Downstream packet loss rate between 0 and 1.0. For example, 0.2 indicates that 2 data packets may be lost in every 10 packets received from the server. +@property(nonatomic) float downLostRate; + +/// Delay in milliseconds, which is the round-trip time between the current device and TRTC server. The smaller the value, the better. The normal value range is 10–100 ms. +@property(nonatomic) uint32_t rtt; + +/// Upstream bandwidth (in kbps, -1: invalid value). +@property(nonatomic) NSInteger availableUpBandwidth; + +/// Downstream bandwidth (in kbps, -1: invalid value). +@property(nonatomic) NSInteger availableDownBandwidth; +@end + +/** + * 5.10 Video frame information + * + * `TRTCVideoFrame` is used to describe the raw data of a frame of the video image, which is the image data before frame encoding or after frame decoding. + */ +@interface TRTCVideoFrame : NSObject + +///**Field description:** video pixel format +@property(nonatomic, assign) TRTCVideoPixelFormat pixelFormat; + +///**Field description:** video data structure type +@property(nonatomic, assign) TRTCVideoBufferType bufferType; + +///**Field description:** video data when `bufferType` is {@link TRTCVideoBufferType_PixelBuffer}, which carries the `PixelBuffer` unique to iOS. +@property(nonatomic, assign, nullable) CVPixelBufferRef pixelBuffer; + +///**Field description:** video data when `bufferType` is {@link TRTCVideoBufferType_NSData}, which carries the memory data blocks in `NSData` type. +@property(nonatomic, retain, nullable) NSData *data; + +///**Field description:** video texture ID, i.e., video data when `bufferType` is {@link TRTCVideoBufferType_Texture}, which carries the texture data used for OpenGL rendering. +@property(nonatomic, assign) GLuint textureId; + +///**Field description:** video width +///**Recommended value:** please enter the width of the video data passed in. +@property(nonatomic, assign) uint32_t width; + +///**Field description:** video height +///**Recommended value:** please enter the height of the video data passed in. +@property(nonatomic, assign) uint32_t height; + +///**Field description:** video frame timestamp in milliseconds +///**Recommended value:** this parameter can be set to 0 for custom video capturing. In this case, the SDK will automatically set the `timestamp` field. However, please "evenly" set the calling interval of `sendCustomVideoData`. +@property(nonatomic, assign) uint64_t timestamp; + +///**Field description:** clockwise rotation angle of video pixels +@property(nonatomic, assign) TRTCVideoRotation rotation; + +@end + +/** + * 5.11 Audio frame data + */ +@interface TRTCAudioFrame : NSObject + +///**Field description:** audio data +@property(nonatomic, retain, nonnull) NSData *data; + +///**Field description:** sample rate +@property(nonatomic, assign) TRTCAudioSampleRate sampleRate; + +///**Field description:** number of sound channels +@property(nonatomic, assign) int channels; + +///**Field description:** timestamp in ms +@property(nonatomic, assign) uint64_t timestamp; + +///**Field description:** extra data in audio frame, message sent by remote users through `onLocalProcessedAudioFrame` that add to audio frame will be callback through this field. +@property(nonatomic, retain, nullable) NSData *extraData; +@end + +/** + * 5.12 Description information of each video image in On-Cloud MixTranscoding + * + * `TRTCMixUser` is used to specify the location, size, layer, and stream type of each video image in On-Cloud MixTranscoding. + */ +@interface TRTCMixUser : NSObject + +///**Field description:** user ID +@property(nonatomic, copy, nonnull) NSString *userId; + +///**Field description:** ID of the room where this audio/video stream is located (an empty value indicates the local room ID) +@property(nonatomic, copy, nullable) NSString *roomID; + +///**Field description:** specify the coordinate area of this video image in px +@property(nonatomic, assign) CGRect rect; + +///**Field description:** specify the level of this video image (value range: 1–15; the value must be unique) +@property(nonatomic, assign) int zOrder; + +///**Field description:** specify whether this video image is the primary stream image ({@link TRTCVideoStreamTypeBig}) or substream image ({@link TRTCVideoStreamTypeSub}). +@property(nonatomic) TRTCVideoStreamType streamType; + +///**Field description:** specify whether this stream mixes audio only +///**Recommended value:** default value: NO +///**Note:** this field has been disused. We recommend you use the new field `inputType` introduced in v8.5. +@property(nonatomic, assign) BOOL pureAudio; + +///**Field description:** specify the mixed content of this stream (audio only, video only, or audio and video). This field is an upgrade to the `pureAudio` field. +///**Recommended value:** default value: TRTCMixInputTypeUndefined, which represents the value of `pureAudio`. +/// - If you are using TRTC for the first time and have not set the `pureAudio` field before, you can set this field according to your actual needs. We recommend you not set `pureAudio` again. +/// - If you have already used the `pureAudio` field on a legacy version and want to keep its setting, you can set `inputType` to `TRTCMixInputTypeUndefined`. +@property(nonatomic, assign) TRTCMixInputType inputType; + +@end + +/** + * 5.13 Layout and transcoding parameters of On-Cloud MixTranscoding + * + * These parameters are used to specify the layout position information of each video image and the encoding parameters of mixtranscoding during On-Cloud MixTranscoding. + */ +@interface TRTCTranscodingConfig : NSObject + +///**Field description:** layout mode +///**Recommended value:** please choose a value according to your business needs. The preset mode has better applicability. +@property(nonatomic, assign) TRTCTranscodingConfigMode mode; + +///**Field description:** `appId` of Tencent Cloud CSS +///**Recommended value:** please click **Application Management** > **Application Information** in the [TRTC console](https://console.cloud.tencent.com/trtc) and get the `appId` in **Relayed Live Streaming Info**. +@property(nonatomic) int appId; + +///**Field description:** `bizId` of Tencent Cloud CSS +///**Recommended value:** please click **Application Management** > **Application Information** in the [TRTC console](https://console.cloud.tencent.com/trtc) and get the `bizId` in **Relayed Live Streaming Info**. +@property(nonatomic) int bizId; + +///**Field description:** specify the target resolution (width) of On-Cloud MixTranscoding +///**Recommended value:** 360 px. If you only mix audio streams, please set both `width` and `height` to 0; otherwise, there will be a black background in the live stream after mixtranscoding. +@property(nonatomic, assign) int videoWidth; + +///**Field description:** specify the target resolution (height) of On-Cloud MixTranscoding +///**Recommended value:** 640 px. If you only mix audio streams, please set both `width` and `height` to 0; otherwise, there will be a black background in the live stream after mixtranscoding. +@property(nonatomic, assign) int videoHeight; + +///**Field description:** specify the target video bitrate (Kbps) of On-Cloud MixTranscoding +///**Recommended value:** if you enter 0, TRTC will estimate a reasonable bitrate value based on `videoWidth` and `videoHeight`. You can also refer to the recommended bitrate value in the video resolution enumeration definition (in the comment +/// section). +@property(nonatomic, assign) int videoBitrate; + +///**Field description:** specify the target video frame rate (fps) of On-Cloud MixTranscoding +///**Recommended value:** default value: 15 fps. Value range: (0,30]. +@property(nonatomic, assign) int videoFramerate; + +///**Field description:** specify the target video keyframe interval (GOP) of On-Cloud MixTranscoding +///**Recommended value:** default value: 2 (in seconds). Value range: [1,8]. +@property(nonatomic, assign) int videoGOP; + +///**Field description:** specify the background color of the mixed video image. +///**Recommended value:** default value: 0x000000, which means black and is in the format of hex number; for example: "0x61B9F1" represents the RGB color (97,158,241). +@property(nonatomic, assign) int backgroundColor; + +///**Field description:** specify the background image of the mixed video image. +///**Recommended value:** default value: null, indicating not to set the background image. +///**Note:** you need to upload the background image by clicking **Add image** in "Application Management" > "Function Configuration" > "Material Management" in the [console](https://console.cloud.tencent.com/trtc) in advance. +/// After the upload is successful, you can get the corresponding "image ID". Then, you need to convert it into a string and set it as `backgroundImage`. +/// For example, if the "image ID" is 63, you can set `backgroundImage = @"63"`; +@property(nonatomic, copy, nullable) NSString *backgroundImage; + +///**Field description:** specify the target audio sample rate of On-Cloud MixTranscoding +///**Recommended value:** default value: 48000 Hz. Valid values: 12000 Hz, 16000 Hz, 22050 Hz, 24000 Hz, 32000 Hz, 44100 Hz, 48000 Hz. +@property(nonatomic, assign) int audioSampleRate; + +///**Field description:** specify the target audio bitrate of On-Cloud MixTranscoding +///**Recommended value:** default value: 64 Kbps. Value range: [32,192]. +@property(nonatomic, assign) int audioBitrate; + +///**Field description:** specify the number of sound channels of On-Cloud MixTranscoding +///**Recommended value:** default value: 1, which means mono channel. Valid values: 1: mono channel; 2: dual channel. +@property(nonatomic, assign) int audioChannels; + +///**Field description:** specify the audio encoding type of On-Cloud MixTranscoding +///**Recommended value:** default value: 0, which means LC-AAC. Valid values: 0: LC-AAC; 1: HE-AAC; 2: HE-AACv2. +///**Note:** HE-AAC and HE-AACv2 only support [48000, 44100, 32000, 24000, 16000] sample rate. +///**Note:** HE-AACv2 only support dual channel. +///**Note:** HE-AAC and HE-AACv2 take effects iff the output streamId is specified. +@property(nonatomic, assign) int audioCodec; + +///**Field description:** specify the position, size, layer, and stream type of each video image in On-Cloud MixTranscoding +///**Recommended value:** this field is an array in `TRTCMixUser` type, where each element represents the information of a video image. +@property(nonatomic, copy, nonnull) NSArray *mixUsers; + +///**Field description:** ID of the live stream output to CDN +///**Recommended value:** default value: null, that is, the audio/video streams in the room will be mixed into the audio/video stream of the caller of this API. +/// - If you don't set this parameter, the SDK will execute the default logic, that is, it will mix the multiple audio/video streams in the room into the audio/video stream of the caller of this API, i.e., A + B => A. +/// - If you set this parameter, the SDK will mix the audio/video streams in the room into the live stream you specify, i.e., A + B => C (C is the `streamId` you specify). +@property(nonatomic, copy, nullable) NSString *streamId; + +@end + +/** + * 5.14 Push parameters required to be set when publishing audio/video streams to non-Tencent Cloud CDN + * + * TRTC's backend service supports publishing audio/video streams to third-party live CDN service providers through the standard RTMP protocol. + * If you use the Tencent Cloud CSS CDN service, you don't need to care about this parameter; instead, just use the {@link startPublish} API. + */ +@interface TRTCPublishCDNParam : NSObject + +///**Field description:** `appId` of Tencent Cloud CSS +///**Recommended value:** please click **Application Management** > **Application Information** in the [TRTC console](https://console.cloud.tencent.com/trtc) and get the `appId` in **Relayed Live Streaming Info**. +@property(nonatomic) int appId; + +///**Field description:** `bizId` of Tencent Cloud CSS +///**Recommended value:** please click **Application Management** > **Application Information** in the [TRTC console](https://console.cloud.tencent.com/trtc) and get the `bizId` in **Relayed Live Streaming Info**. +@property(nonatomic) int bizId; + +///**Field description:** specify the push address (in RTMP format) of this audio/video stream at the third-party live streaming service provider +///**Recommended value:** the push URL rules vary greatly by service provider. Please enter a valid push URL according to the requirements of the target service provider. TRTC's backend server will push audio/video streams in the standard format to +/// the third-party service provider according to the URL you enter. +///**Note:** the push URL must be in RTMP format and meet the specifications of your target live streaming service provider; otherwise, the target service provider will reject the push requests from TRTC's backend service. +@property(nonatomic, strong, nonnull) NSString *url; + +///**Field description:** specify the push address (in RTMP format) of this audio/video stream at the third-party live streaming service provider +///**Recommended value:** default value: null,that is, the audio/video streams in the room will be pushed to the target service provider of the caller of this API. +@property(nonatomic, strong, nonnull) NSString *streamId; + +@end + +/** + * 5.15 Local audio file recording parameters + * + * This parameter is used to specify the recording parameters in the audio recording API {@link startAudioRecording}. + */ +@interface TRTCAudioRecordingParams : NSObject + +///**Field description:** storage path of the audio recording file, which is required. +///**Note:** this path must be accurate to the file name and extension. The extension determines the format of the audio recording file. Currently, supported formats include PCM, WAV, and AAC. +/// For example, if you specify the path as `mypath/record/audio.aac`, it means that you want the SDK to generate an audio recording file in AAC format. +/// Please specify a valid path with read/write permissions; otherwise, the audio recording file cannot be generated. +@property(nonatomic, strong, nonnull) NSString *filePath; + +///**Field description:** Audio recording content type. +///**Note:** Record all local and remote audio by default. +@property(nonatomic, assign) TRTCAudioRecordingContent recordingContent; + +@end + +/** + * 5.16 Local media file recording parameters + * + * This parameter is used to specify the recording parameters in the local media file recording API {@link startLocalRecording}. + * The `startLocalRecording` API is an enhanced version of the `startAudioRecording` API. The former can record video files, while the latter can only record audio files. + */ +@interface TRTCLocalRecordingParams : NSObject + +///**Field description:** address of the recording file, which is required. Please ensure that the path is valid with read/write permissions; otherwise, the recording file cannot be generated. +///**Note:** this path must be accurate to the file name and extension. The extension determines the format of the recording file. Currently, only the MP4 format is supported. +/// For example, if you specify the path as `mypath/record/test.mp4`, it means that you want the SDK to generate a local video file in MP4 format. +/// Please specify a valid path with read/write permissions; otherwise, the recording file cannot be generated. +@property(nonatomic, copy, nonnull) NSString *filePath; + +///**Field description:** media recording type, which is `TRTCRecordTypeBoth` by default, indicating to record both audio and video. +@property(nonatomic, assign) TRTCRecordType recordType; + +///**Field description:** `interval` is the update frequency of the recording information in milliseconds. Value range: 1000–10000. Default value: -1, indicating not to call back +@property(nonatomic, assign) int interval; + +@end + +/** + * 5.17 Sound effect parameter (disused) + * + * "Sound effects" in TRTC refer to some short audio files (usually only a few seconds), such as "applause" and "laughter". + * This parameter is used to specify the path and number of playback times of a sound effect file (short audio file) in the sound effect playback API {@link TRTCCloud#playAudioEffect} on legacy versions. + * After v7.3, the sound effect API has been replaced by a new {@link TXAudioEffectManager#startPlayMusic} API. + * When you specify the {@link TXAudioMusicParam} parameter of `startPlayMusic`, if `isShortFile` is set to `YES`, the file is a "sound effect" file. + */ +@interface TRTCAudioEffectParam : NSObject + ++ (_Nonnull instancetype)new __attribute__((unavailable("Use -initWith:(int)effectId path:(NSString * )path instead"))); +- (_Nonnull instancetype)init __attribute__((unavailable("Use -initWith:(int)effectId path:(NSString *)path instead"))); + +///**Field description:** sound effect ID +///**Note:** the SDK supports playing multiple sound effects. IDs are used to distinguish different sound effects and control their start, end, volume, etc. +@property(nonatomic, assign) int effectId; + +///**Field description:** sound effect file path. Supported file formats include AAC, MP3, and M4A. +@property(nonatomic, copy, nonnull) NSString *path; + +///**Field description:** number of times the sound effect is looped +///**Valid values:** 0 or any positive integer. 0 (default) indicates that the sound effect is played once, 1 twice, and so on. +@property(nonatomic, assign) int loopCount; + +///**Field description:** whether the sound effect is upstreamed +///**Recommended value:** YES: when the sound effect is played back locally, it will be upstreamed to the cloud and can be heard by remote users. NO: the sound effect will not be upstreamed to the cloud and can only be heard locally. Default value: +/// NO +@property(nonatomic, assign) BOOL publish; + +///**Field description:** sound effect volume +///**Recommended value:** value range: 0–100. Default value: 100 +@property(nonatomic, assign) int volume; + +- (_Nonnull instancetype)initWith:(int)effectId path:(NSString *_Nonnull)path; +@end + +/** + * 5.18 Room switch parameter + * + * This parameter is used for the room switch API {@link switchRoom}, which can quickly switch a user from one room to another. + */ +@interface TRTCSwitchRoomConfig : NSObject + +///**Field description:** numeric room ID, which is optional. Users in the same room can see one another and make audio/video calls. +///**Recommended value:** value range: 1–4294967294. +///**Note:** either `roomId` or `strRoomId` must be entered. If both are entered, `roomId` will be used. +@property(nonatomic, assign) UInt32 roomId; + +///**Field description:** string-type room ID, which is optional. Users in the same room can see one another and make audio/video calls. +///**Note:** either `roomId` or `strRoomId` must be entered. If both are entered, `roomId` will be used. +@property(nonatomic, copy, nullable) NSString *strRoomId; + +///**Field description:** user signature, which is optional. It is the authentication signature corresponding to the current `userId` and acts as the login password. +/// If you don't specify the newly calculated `userSig` during room switch, the SDK will continue to use the `userSig` you specified during room entry (enterRoom). +/// This requires you to ensure that the old `userSig` is still within the validity period allowed by the signature at the moment of room switch; otherwise, room switch will fail. +///**Recommended value:** for the calculation method, please see [UserSig](https://cloud.tencent.com/document/product/647/17275). +@property(nonatomic, copy, nullable) NSString *userSig; + +///**Field description:** permission credential used for permission control, which is optional. If you want only users with the specified `userId` values to enter a room, you need to use `privateMapKey` to restrict the permission. +///**Recommended value:** we recommend you use this parameter only if you have high security requirements. For more information, please see [Enabling Advanced Permission Control](https://cloud.tencent.com/document/product/647/32240). +@property(nonatomic, copy, nullable) NSString *privateMapKey; + +@end + +/** + * 5.19 Format parameter of custom audio callback + * + * This parameter is used to set the relevant format (including sample rate and number of channels) of the audio data called back by the SDK in the APIs related to custom audio callback. + */ +@interface TRTCAudioFrameDelegateFormat : NSObject + +///**Field description:** sample rate +///**Recommended value:** default value: 48000 Hz. Valid values: 16000, 32000, 44100, 48000. +@property(nonatomic, assign) TRTCAudioSampleRate sampleRate; + +///**Field description:** number of sound channels +///**Recommended value:** default value: 1, which means mono channel. Valid values: 1: mono channel; 2: dual channel. +@property(nonatomic, assign) int channels; + +///**Field description:** number of sample points +///**Recommended value:** the value must be an integer multiple of sampleRate/100. +@property(nonatomic, assign) int samplesPerCall; + +@end + +/** + * 5.21 Screen sharing target information (for desktop systems only) + * + * When users share the screen, they can choose to share the entire desktop or only the window of a certain program. + * `TRTCScreenCaptureSourceInfo` is used to describe the information of the target to be shared, including ID, name, and thumbnail. The fields in this structure are read-only. + */ +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +@interface TRTCScreenCaptureSourceInfo : NSObject + +///**Field description:** capturing source type (i.e., whether to share the entire screen or a certain window) +@property(assign, nonatomic) TRTCScreenCaptureSourceType type; + +///**Field description:** capturing source ID. For a window, this field indicates a window ID; for a screen, this field indicates a display ID. +@property(copy, nonatomic, nullable) NSString *sourceId; + +///**Field description:** capturing source name (encoded in UTF-8) +@property(copy, nonatomic, nullable) NSString *sourceName; + +///**Field description:** extended window information +@property(nonatomic, strong, nullable) NSDictionary *extInfo; + +///**Field description:** thumbnail of the shared window +@property(nonatomic, readonly, nullable) NSImage *thumbnail; + +///**Field description:** icon of the shared window +@property(nonatomic, readonly, nullable) NSImage *icon; + +@end +#endif + +/** + * 5.24 parameter of the parallel strategy of remote audio streams + * + * This parameter is used to set the parallel strategy of remote audio streams. + */ +@interface TRTCAudioParallelParams : NSObject + +///**Field description:** Max number of remote audio streams. Default value: 0 +/// if maxCount > 0 and the number of people in the room is more than `maxCount`,SDK will select `maxCount` of remote audio streams in real time, which can reduce performance cost greatly. +/// if maxCount = 0,SDK won't limit the number of remote audio streams, which may cause performance cost when there are many speakers in one room. +@property(assign, nonatomic) UInt32 maxCount; + +///**Field description:** Users included that must be able to play. +///**Note:** A list of user IDs. These users must be able to play and do not participate in smart selection. +/// The number of `incluseUsers` must be less than `maxCount`. Otherwise, the setting of the parallel strategy of remote audio streams is invalid. +///`incluseUsers` is valid when `maxCount` > 0. When `incluseUsers` takes effect, the max number of remote audio streams is (`maxCount` - the number of valid users in `incluseUsers`). +@property(nonatomic, strong, nullable) NSArray *includeUsers; + +@end + +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDelegate.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDelegate.h new file mode 100644 index 0000000..4014f3f --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TRTCCloudDelegate.h @@ -0,0 +1,901 @@ +/** + * Module: TRTCCloudDelegate @ TXLiteAVSDK + * Function: event callback APIs for TRTC’s video call feature + */ +/// @defgroup TRTCCloudDelegate_ios TRTCCloudDelegate +/// Tencent Cloud TRTC Event Notification Interface +/// @{ +#import +#import "TRTCCloudDef.h" +#import "TXLiteAVCode.h" + +NS_ASSUME_NONNULL_BEGIN + +@class TRTCCloud; +@class TRTCStatistics; + +@protocol TRTCCloudDelegate +@optional + +///////////////////////////////////////////////////////////////////////////////// +// +// Error and warning events +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Error and warning events +/// @{ + +/** + * 1.1 Error event callback + * + * Error event, which indicates that the SDK threw an irrecoverable error such as room entry failure or failure to start device + * For more information, see [Error Codes](https://intl.cloud.tencent.com/document/product/647/35124). + * + * @param errCode Error code + * @param errMsg Error message + * @param extInfo Extended field. Certain error codes may carry extra information for troubleshooting. + */ +- (void)onError:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg extInfo:(nullable NSDictionary *)extInfo; + +/** + * 1.2 Warning event callback + * + * Warning event, which indicates that the SDK threw an error requiring attention, such as video lag or high CPU usage + * For more information, see [Error Codes](https://intl.cloud.tencent.com/document/product/647/35124). + * + * @param warningCode Warning code + * @param warningMsg Warning message + * @param extInfo Extended field. Certain warning codes may carry extra information for troubleshooting. + */ +- (void)onWarning:(TXLiteAVWarning)warningCode warningMsg:(nullable NSString *)warningMsg extInfo:(nullable NSDictionary *)extInfo; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Room event callback +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Room event callback +/// @{ + +/** + * 2.1 Whether room entry is successful + * + * After calling the `enterRoom()` API in `TRTCCloud` to enter a room, you will receive the `onEnterRoom(result)` callback from `TRTCCloudDelegate`. + * - If room entry succeeded, `result` will be a positive number (`result` > 0), indicating the time in milliseconds (ms) the room entry takes. + * - If room entry failed, `result` will be a negative number (result < 0), indicating the error code for the failure. + * For more information on the error codes for room entry failure, see [Error Codes](https://intl.cloud.tencent.com/document/product/647/35124). + * + * @note + * 1. In TRTC versions below 6.6, the `onEnterRoom(result)` callback is returned only if room entry succeeds, and the `onError()` callback is returned if room entry fails. + * 2. In TRTC 6.6 and above, the `onEnterRoom(result)` callback is returned regardless of whether room entry succeeds or fails, and the `onError()` callback is also returned if room entry fails. + * + * @param result If `result` is greater than 0, it indicates the time (in ms) the room entry takes; if `result` is less than 0, it represents the error code for room entry. + */ +- (void)onEnterRoom:(NSInteger)result; + +/** + * 2.2 Room exit + * + * Calling the `exitRoom()` API in `TRTCCloud` will trigger the execution of room exit-related logic, such as releasing resources of audio/video devices and codecs. + * After all resources occupied by the SDK are released, the SDK will return the `onExitRoom()` callback. + * + * If you need to call `enterRoom()` again or switch to another audio/video SDK, please wait until you receive the `onExitRoom()` callback. + * Otherwise, you may encounter problems such as the camera or mic being occupied. + * + * @param reason Reason for room exit. `0`: the user called `exitRoom` to exit the room; `1`: the user was removed from the room by the server; `2`: the room was dismissed. + */ +- (void)onExitRoom:(NSInteger)reason; + +/** + * 2.3 Role switching + * + * You can call the `switchRole()` API in `TRTCCloud` to switch between the anchor and audience roles. This is accompanied by a line switching process. + * After the switching, the SDK will return the `onSwitchRole()` event callback. + * + * @param errCode Error code. `ERR_NULL` indicates a successful switch. For more information, please see [Error Codes](https://intl.cloud.tencent.com/document/product/647/35124). + * @param errMsg Error message + */ +- (void)onSwitchRole:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg; + +/** + * 2.4 Result of room switching + * + * You can call the `switchRoom()` API in `TRTCCloud` to switch from one room to another. + * After the switching, the SDK will return the `onSwitchRoom()` event callback. + * + * @param errCode Error code. `ERR_NULL` indicates a successful switch. For more information, please see [Error Codes](https://intl.cloud.tencent.com/document/product/647/35124). + * @param errMsg Error message + */ +- (void)onSwitchRoom:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg; + +/** + * 2.5 Result of requesting cross-room call + * + * You can call the `connectOtherRoom()` API in `TRTCCloud` to establish a video call with the anchor of another room. This is the “anchor competition” feature. + * The caller will receive the `onConnectOtherRoom()` callback, which can be used to determine whether the cross-room call is successful. + * If it is successful, all users in either room will receive the `onUserVideoAvailable()` callback from the anchor of the other room. + * + * @param userId The user ID of the anchor (in another room) to be called + * @param errCode Error code. `ERR_NULL` indicates that cross-room connection is established successfully. For more information, please see [Error Codes](https://intl.cloud.tencent.com/document/product/647/35124). + * @param errMsg Error message + */ +- (void)onConnectOtherRoom:(NSString *)userId errCode:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg; + +/** + * 2.6 Result of ending cross-room call + */ +- (void)onDisconnectOtherRoom:(TXLiteAVError)errCode errMsg:(nullable NSString *)errMsg; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// User event callback +// +///////////////////////////////////////////////////////////////////////////////// +/// @name User event callback +/// @{ + +/** + * 3.1 A user entered the room + * + * Due to performance concerns, this callback works differently in different scenarios (i.e., `AppScene`, which you can specify by setting the second parameter when calling `enterRoom`). + * - Live streaming scenarios (`TRTCAppSceneLIVE` or `TRTCAppSceneVoiceChatRoom`): in live streaming scenarios, a user is either in the role of an anchor or audience. The callback is returned only when an anchor enters the room. + * - Call scenarios (`TRTCAppSceneVideoCall` or `TRTCAppSceneAudioCall`): in call scenarios, the concept of roles does not apply (all users can be considered as anchors), and the callback is returned when any user enters the room. + * + * @note + * 1. The `onRemoteUserEnterRoom` callback indicates that a user entered the room, but it does not necessarily mean that the user enabled audio or video. + * 2. If you want to know whether a user enabled video, we recommend you use the `onUserVideoAvailable()` callback. + * @param userId User ID of the remote user + */ +- (void)onRemoteUserEnterRoom:(NSString *)userId; + +/** + * 3.2 A user exited the room + * + * As with `onRemoteUserEnterRoom`, this callback works differently in different scenarios (i.e., `AppScene`, which you can specify by setting the second parameter when calling `enterRoom`). + * - Live streaming scenarios (`TRTCAppSceneLIVE` or `TRTCAppSceneVoiceChatRoom`): the callback is triggered only when an anchor exits the room. + * - Call scenarios (`TRTCAppSceneVideoCall` or `TRTCAppSceneAudioCall`): in call scenarios, the concept of roles does not apply, and the callback is returned when any user exits the room. + * + * @param userId User ID of the remote user + * @param reason Reason for room exit. `0`: the user exited the room voluntarily; `1`: the user exited the room due to timeout; `2`: the user was removed from the room. + */ +- (void)onRemoteUserLeaveRoom:(NSString *)userId reason:(NSInteger)reason; + +/** + * 3.3 A remote user published/unpublished primary stream video + * + * The primary stream is usually used for camera images. If you receive the `onUserVideoAvailable(userId, YES)` callback, it indicates that the user has available primary stream video. + * You can then call {@link startRemoteView} to subscribe to the remote user’s video. If the subscription is successful, you will receive the `onFirstVideoFrame(userid)` callback, which indicates that the first video frame of the user is rendered. + * + * If you receive the `onUserVideoAvailable(userId, NO)` callback, it indicates that the video of the remote user is disabled, which may be because the user called {@link muteLocalVideo} or {@link stopLocalPreview}. + * + * @param userId User ID of the remote user + * @param available Whether the user published (or unpublished) primary stream video. `YES`: published; `NO`: unpublished + */ +- (void)onUserVideoAvailable:(NSString *)userId available:(BOOL)available; + +/** + * 3.4 A remote user published/unpublished substream video + * + * The substream is usually used for screen sharing images. If you receive the `onUserSubStreamAvailable(userId, YES)` callback, it indicates that the user has available substream video. + * You can then call {@link startRemoteSubStreamView} to subscribe to the remote user’s video. If the subscription is successful, you will receive the `onFirstVideoFrame(userid)` callback, which indicates that the first frame of the user is rendered. + * + * @note The API used to display substream images is {@link startRemoteSubStreamView}, not {@link startRemoteView}. + * + * @param userId User ID of the remote user + * @param available Whether the user published (or unpublished) substream video. `YES`: published; `NO`: unpublished + */ +- (void)onUserSubStreamAvailable:(NSString *)userId available:(BOOL)available; + +/** + * 3.5 A remote user published/unpublished audio + * + * If you receive the `onUserAudioAvailable(userId, YES)` callback, it indicates that the user published audio. + * - In auto-subscription mode, the SDK will play the user’s audio automatically. + * - In manual subscription mode, you can call {@link muteRemoteAudio}(userid, NO) to play the user’s audio. + * + * @note The auto-subscription mode is used by default. You can switch to the manual subscription mode by calling {@link setDefaultStreamRecvMode}, but it must be called before room entry for the switch to take effect. + * + * @param userId User ID of the remote user + * @param available Whether the user published (or unpublished) audio. `YES`: published; `NO`: unpublished + */ +- (void)onUserAudioAvailable:(NSString *)userId available:(BOOL)available; + +/** + * 3.6 The SDK started rendering the first video frame of the local or a remote user + * + * The SDK returns this event callback when it starts rendering your first video frame or that of a remote user. The `userId` in the callback can help you determine whether the frame is yours or a remote user’s. + * - If `userId` is empty, it indicates that the SDK has started rendering your first video frame. The precondition is that you have called {@link startLocalPreview} or {@link startScreenCapture}. + * - If `userId` is not empty, it indicates that the SDK has started rendering the first video frame of a remote user. The precondition is that you have called {@link startRemoteView} to subscribe to the user’s video. + * + * @note + * 1. The callback of the first local video frame being rendered is triggered only after you call {@link startLocalPreview} or {@link startScreenCapture}. + * 2. The callback of the first video frame of a remote user being rendered is triggered only after you call {@link startRemoteView} or {@link startRemoteSubStreamView}. + * + * @param userId The user ID of the local or a remote user. If it is empty, it indicates that the first local video frame is available; if it is not empty, it indicates that the first video frame of a remote user is available. + * @param streamType Video stream type. The primary stream (`Main`) is usually used for camera images, and the substream (`Sub`) for screen sharing images. + * @param width Video width + * @param height Video height + */ +- (void)onFirstVideoFrame:(NSString *)userId streamType:(TRTCVideoStreamType)streamType width:(int)width height:(int)height; + +/** + * 3.7 The SDK started playing the first audio frame of a remote user + * + * The SDK returns this callback when it plays the first audio frame of a remote user. The callback is not returned for the playing of the first audio frame of the local user. + * + * @param userId User ID of the remote user + */ +- (void)onFirstAudioFrame:(NSString *)userId; + +/** + * 3.8 The first local video frame was published + * + * After you enter a room and call {@link startLocalPreview} or {@link startScreenCapture} to enable local video capturing (whichever happens first), + * the SDK will start video encoding and publish the local video data via its network module to the cloud. + * It returns the `onSendFirstLocalVideoFrame` callback after publishing the first local video frame. + * + * @param streamType Video stream type. The primary stream (`Main`) is usually used for camera images, and the substream (`Sub`) for screen sharing images. + */ +- (void)onSendFirstLocalVideoFrame:(TRTCVideoStreamType)streamType; + +/** + * 3.9 The first local audio frame was published + * + * After you enter a room and call {@link startLocalAudio} to enable audio capturing (whichever happens first), + * the SDK will start audio encoding and publish the local audio data via its network module to the cloud. + * The SDK returns the `onSendFirstLocalAudioFrame` callback after sending the first local audio frame. + */ +- (void)onSendFirstLocalAudioFrame; + +/** + * 3.10 Change of remote video status + * + * You can use this callback to get the status (`Playing`, `Loading`, or `Stopped`) of the video of each remote user and display it on the UI. + * @param userId User ID + * @param streamType Video stream type. The primary stream (`Main`) is usually used for camera images, and the substream (`Sub`) for screen sharing images. + * @param status Video status, which may be `Playing`, `Loading`, or `Stopped` + * @param reason Reason for the change of status + * @param extraInfo Extra information + */ +- (void)onRemoteVideoStatusUpdated:(NSString *)userId streamType:(TRTCVideoStreamType)streamType streamStatus:(TRTCAVStatusType)status reason:(TRTCAVStatusChangeReason)reason extrainfo:(nullable NSDictionary *)info; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Callback of statistics on network and technical metrics +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Callback of statistics on network and technical metrics +/// @{ + +/** + * 4.1 Real-time network quality statistics + * + * This callback is returned every 2 seconds and notifies you of the upstream and downstream network quality detected by the SDK. + * The SDK uses a built-in proprietary algorithm to assess the current latency, bandwidth, and stability of the network and returns a result. + * If the result is `1` (excellent), it means that the current network conditions are excellent; if it is `6` (down), it means that the current network conditions are too bad to support TRTC calls. + * + * @note In the returned parameters `localQuality` and `remoteQuality`, if `userId` is empty, it indicates that the network quality statistics of the local user are returned. Otherwise, the network quality statistics of a remote user are returned. + * + * @param localQuality Upstream network quality + * @param remoteQuality Downstream network quality + */ +- (void)onNetworkQuality:(TRTCQualityInfo *)localQuality remoteQuality:(NSArray *)remoteQuality; + +/** + * 4.2 Real-time statistics on technical metrics + * + * This callback is returned every 2 seconds and notifies you of the statistics on technical metrics related to video, audio, and network. The metrics are listed in {@link TRTCStatistics}: + * - Video statistics: video resolution (`resolution`), frame rate (`FPS`), bitrate (`bitrate`), etc. + * - Audio statistics: audio sample rate (`samplerate`), number of audio channels (`channel`), bitrate (`bitrate`), etc. + * - Network statistics: the round trip time (`rtt`) between the SDK and the cloud (SDK -> Cloud -> SDK), package loss rate (`loss`), upstream traffic (`sentBytes`), downstream traffic (`receivedBytes`), etc. + * + * @note If you want to learn about only the current network quality and do not want to spend much time analyzing the statistics returned by this callback, we recommend you use {@link onNetworkQuality}. + * @param statistics Statistics, including local statistics and the statistics of remote users. For details, please see {@link TRTCStatistics}. + */ +- (void)onStatistics:(TRTCStatistics *)statistics; + +/** + * 4.3 Callback of network speed test + * + * The callback is triggered by {@link startSpeedTest:}. + * + * @param result Speed test data, including loss rates, rtt and bandwidth rates, please refer to {@link TRTCSpeedTestResult} for details. + */ +- (void)onSpeedTestResult:(TRTCSpeedTestResult *)result; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Callback of connection to the cloud +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Callback of connection to the cloud +/// @{ + +/** + * 5.1 The SDK was disconnected from the cloud + * + * The SDK returns this callback when it is disconnected from the cloud, which may be caused by network unavailability or change of network, for example, when the user walks into an elevator. + * After returning this callback, the SDK will attempt to reconnect to the cloud, and will return the {@link onTryToReconnect} callback. When it is reconnected, it will return the {@link onConnectionRecovery} callback. + * In other words, the SDK proceeds from one event to the next in the following order: + *
+ *         [onConnectionLost] =====> [onTryToReconnect] =====> [onConnectionRecovery]
+ *               /|\                                                     |
+ *                |------------------------------------------------------|
+ * 
+ */ +- (void)onConnectionLost; + +/** + * 5.2 The SDK is reconnecting to the cloud + * + * When the SDK is disconnected from the cloud, it returns the {@link onConnectionLost} callback. It then attempts to reconnect and returns this callback ({@link onTryToReconnect}). After it is reconnected, it returns the {@link onConnectionRecovery} + * callback. + */ +- (void)onTryToReconnect; + +/** + * 5.3 The SDK is reconnected to the cloud + * + * When the SDK is disconnected from the cloud, it returns the {@link onConnectionLost} callback. It then attempts to reconnect and returns the {@link onTryToReconnect} callback. After it is reconnected, it returns this callback ({@link + * onConnectionRecovery}). + */ +- (void)onConnectionRecovery; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Callback of hardware events +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Callback of hardware events +/// @{ + +/** + * 6.1 The camera is ready + * + * After you call {@link startLocalPreivew}, the SDK will try to start the camera and return this callback if the camera is started. + * If it fails to start the camera, it’s probably because the application does not have access to the camera or the camera is being used. + * You can capture the {@link onError} callback to learn about the exception and let users know via UI messages. + */ +- (void)onCameraDidReady; + +/** + * 6.2 The mic is ready + * + * After you call {@link startLocalAudio}, the SDK will try to start the mic and return this callback if the mic is started. + * If it fails to start the mic, it’s probably because the application does not have access to the mic or the mic is being used. + * You can capture the {@link onError} callback to learn about the exception and let users know via UI messages. + */ +- (void)onMicDidReady; + +/** + * 6.3 The audio route changed (for mobile devices only) + * + * Audio route is the route (speaker or receiver) through which audio is played. + * - When audio is played through the receiver, the volume is relatively low, and the sound can be heard only when the phone is put near the ear. This mode has a high level of privacy and is suitable for answering calls. + * - When audio is played through the speaker, the volume is relatively high, and there is no need to put the phone near the ear. This mode enables the "hands-free" feature. + * + * @param route Audio route, i.e., the route (speaker or receiver) through which audio is played + * @param fromRoute The audio route used before the change + */ +#if TARGET_OS_IPHONE +- (void)onAudioRouteChanged:(TRTCAudioRoute)route fromRoute:(TRTCAudioRoute)fromRoute; +#endif + +/** + * 6.4 Volume + * + * The SDK can assess the volume of each channel and return this callback on a regular basis. You can display, for example, a waveform or volume bar on the UI based on the statistics returned. + * You need to first call {@link enableAudioVolumeEvaluation} to enable the feature and set the interval for the callback. + * Note that the SDK returns this callback at the specified interval regardless of whether someone is speaking in the room. When no one is speaking in the room, `userVolumes` is empty, and `totalVolume` is `0`. + * + * @note `userVolumes` is an array. If `userId` is empty, the elements in the array represent the volume of the local user’s audio. Otherwise, they represent the volume of a remote user’s audio. + * + * @param userVolumes An array that represents the volume of all users who are speaking in the room. Value range: 0-100 + * @param totalVolume The total volume of all remote users. Value range: 0-100 + */ +- (void)onUserVoiceVolume:(NSArray *)userVolumes totalVolume:(NSInteger)totalVolume; + +/** + * 6.5 The status of a local device changed (for desktop OS only) + * + * The SDK returns this callback when a local device (camera, mic, or speaker) is connected or disconnected. + * + * @param deviceId Device ID + * @param deviceType Device type + * @param state Device status. `0`: disconnected; `1`: connected + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)onDevice:(NSString *)deviceId type:(TRTCMediaDeviceType)deviceType stateChanged:(NSInteger)state; +#endif + +/** + * 6.6 The capturing volume of the mic changed + * + * On desktop OS such as macOS and Windows, users can set the capturing volume of the mic in the audio control panel. + * The higher volume a user sets, the higher the volume of raw audio captured by the mic. + * On some keyboards and laptops, users can also mute the mic by pressing a key (whose icon is a crossed out mic). + * + * When users set the mic capturing volume via the UI or a keyboard shortcut, the SDK will return this callback. + * + * @note You need to call {@link enableAudioVolumeEvaluation} and set the callback interval (`interval` > 0) to enable the callback. To disable the callback, set `interval` to `0`. + * + * @param volume System audio capturing volume, which users can set in the audio control panel. Value range: 0-100 + * @param muted Whether the mic is muted. `YES`: muted; `NO`: unmuted + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)onAudioDeviceCaptureVolumeChanged:(NSInteger)volume muted:(BOOL)muted; +#endif + +/** + * 6.7 The playback volume changed + * + * On desktop OS such as macOS and Windows, users can set the system’s playback volume in the audio control panel. + * On some keyboards and laptops, users can also mute the speaker by pressing a key (whose icon is a crossed out speaker). + * + * When users set the system’s playback volume via the UI or a keyboard shortcut, the SDK will return this callback. + * + * @note You need to call {@link enableAudioVolumeEvaluation} and set the callback interval (`interval` > 0) to enable the callback. To disable the callback, set `interval` to `0`. + * + * @param volume The system playback volume, which users can set in the audio control panel. Value range: 0-100 + * @param muted Whether the speaker is muted. `YES`: muted; `NO`: unmuted + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)onAudioDevicePlayoutVolumeChanged:(NSInteger)volume muted:(BOOL)muted; +#endif + +/** + * 6.8 Whether system audio capturing is enabled successfully (for macOS only) + * + * On macOS, you can call {@link startSystemAudioLoopback} to install an audio driver and have the SDK capture the audio played back by the system. + * In use cases such as video teaching and music live streaming, the teacher can use this feature to let the SDK capture the sound of the video played by his or her computer, so that students in the room can hear the sound too. + * The SDK returns this callback after trying to enable system audio capturing. To determine whether it is actually enabled, pay attention to the error parameter in the callback. + * + * @param err If it is `ERR_NULL`, system audio capturing is enabled successfully. Otherwise, it is not. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (void)onSystemAudioLoopbackError:(TXLiteAVError)err; +#endif + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Callback of the receipt of a custom message +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Callback of the receipt of a custom message +/// @{ + +/** + * 7.1 Receipt of custom message + * + * When a user in a room uses {@link sendCustomCmdMsg} to send a custom message, other users in the room can receive the message through the `onRecvCustomCmdMsg` callback. + * + * @param userId User ID + * @param cmdID Command ID + * @param seq Message serial number + * @param message Message data + */ +- (void)onRecvCustomCmdMsgUserId:(NSString *)userId cmdID:(NSInteger)cmdID seq:(UInt32)seq message:(NSData *)message; + +/** + * 7.2 Loss of custom message + * + * When you use {@link sendCustomCmdMsg} to send a custom UDP message, even if you enable reliable transfer (by setting `reliable` to `YES`), there is still a chance of message loss. Reliable transfer only helps maintain a low probability of message + * loss, which meets the reliability requirements in most cases. If the sender sets `reliable` to `YES`, the SDK will use this callback to notify the recipient of the number of custom messages lost during a specified time period (usually 5s) in the + * past. + * + * @note The recipient receives this callback only if the sender sets `reliable` to `YES`. + * @param userId User ID + * @param cmdID Command ID + * @param errCode Error code + * @param missed Number of lost messages + */ +- (void)onMissCustomCmdMsgUserId:(NSString *)userId cmdID:(NSInteger)cmdID errCode:(NSInteger)errCode missed:(NSInteger)missed; + +/** + * 7.3 Receipt of SEI message + * + * If a user in the room uses {@link sendSEIMsg} to send an SEI message via video frames, other users in the room can receive the message through the `onRecvSEIMsg` callback. + * + * @param userId User ID + * @param message Data + */ +- (void)onRecvSEIMsg:(NSString *)userId message:(NSData *)message; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// CDN event callback +// +///////////////////////////////////////////////////////////////////////////////// +/// @name CDN event callback +/// @{ + +/** + * 8.1 Started publishing to Tencent Cloud CSS CDN + * + * When you call {@link startPublishing} to publish streams to Tencent Cloud CSS CDN, the SDK will sync the command to the CVM immediately. + * The SDK will then receive the execution result from the CVM and return the result to you via this callback. + * + * @param err `0`: successful; other values: failed + * @param errMsg Error message + */ +- (void)onStartPublishing:(int)err errMsg:(NSString *)errMsg; + +/** + * 8.2 Stopped publishing to Tencent Cloud CSS CDN + * + * When you call {@link stopPublishing} to stop publishing streams to Tencent Cloud CSS CDN, the SDK will sync the command to the CVM immediately. + * The SDK will then receive the execution result from the CVM and return the result to you via this callback. + * + * @param err `0`: successful; other values: failed + * @param errMsg Error message + */ +- (void)onStopPublishing:(int)err errMsg:(NSString *)errMsg; + +/** + * 8.3 Started publishing to non-Tencent Cloud’s live streaming CDN + * + * When you call {@link startPublishCDNStream} to start publishing streams to a non-Tencent Cloud’s live streaming CDN, the SDK will sync the command to the CVM immediately. + * The SDK will then receive the execution result from the CVM and return the result to you via this callback. + * + * @note If you receive a callback that the command is executed successfully, it only means that your command was sent to Tencent Cloud’s backend server. If the CDN vendor does not accept your streams, the publishing will still fail. + * @param err `0`: successful; other values: failed + * @param errMsg Error message + */ +- (void)onStartPublishCDNStream:(int)err errMsg:(NSString *)errMsg; + +/** + * 8.4 Stopped publishing to non-Tencent Cloud’s live streaming CDN + * + * When you call {@link stopPublishCDNStream} to stop publishing to a non-Tencent Cloud’s live streaming CDN, the SDK will sync the command to the CVM immediately. + * The SDK will then receive the execution result from the CVM and return the result to you via this callback. + * + * @param err `0`: successful; other values: failed + * @param errMsg Error message + */ +- (void)onStopPublishCDNStream:(int)err errMsg:(NSString *)errMsg; + +/** + * 8.5 Set the layout and transcoding parameters for On-Cloud MixTranscoding + * + * When you call {@link setMixTranscodingConfig} to modify the layout and transcoding parameters for On-Cloud MixTranscoding, the SDK will sync the command to the CVM immediately. + * The SDK will then receive the execution result from the CVM and return the result to you via this callback. + * + * @param err `0`: successful; other values: failed + * @param errMsg Error message + */ +- (void)onSetMixTranscodingConfig:(int)err errMsg:(NSString *)errMsg; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Screen sharing event callback +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Screen sharing event callback +/// @{ + +/** + * 9.1 Screen sharing started + * + * The SDK returns this callback when you call {@link startScreenCapture} and other APIs to start screen sharing. + */ +- (void)onScreenCaptureStarted; + +/** + * 9.2 Screen sharing was paused + * + * The SDK returns this callback when you call {@link pauseScreenCapture} to pause screen sharing. + * @param reason Reason. + * - `0`: the user paused screen sharing. + * - `1`: screen sharing was paused because the shared window became invisible(Mac). screen sharing was paused because setting parameters(Windows). + * - `2`: screen sharing was paused because the shared window became minimum(only for Windows). + * - `3`: screen sharing was paused because the shared window became invisible(only for Windows). + */ +- (void)onScreenCapturePaused:(int)reason; + +/** + * 9.3 Screen sharing was resumed + * + * The SDK returns this callback when you call {@link resumeScreenCapture} to resume screen sharing. + * @param reason Reason. + * - `0`: the user resumed screen sharing. + * - `1`: screen sharing was resumed automatically after the shared window became visible again(Mac). screen sharing was resumed automatically after setting parameters(Windows). + * - `2`: screen sharing was resumed automatically after the shared window became minimize recovery(only for Windows). + * - `3`: screen sharing was resumed automatically after the shared window became visible again(only for Windows). + */ +- (void)onScreenCaptureResumed:(int)reason; + +/** + * 9.4 Screen sharing stopped + * + * The SDK returns this callback when you call {@link stopScreenCapture} to stop screen sharing. + * @param reason Reason. `0`: the user stopped screen sharing; `1`: screen sharing stopped because the shared window was closed. + */ +- (void)onScreenCaptureStoped:(int)reason; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Callback of local recording and screenshot events +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Callback of local recording and screenshot events +/// @{ + +/** + * 10.1 Local recording started + * + * When you call {@link startLocalRecording} to start local recording, the SDK returns this callback to notify you whether recording is started successfully. + * @param errCode Error code. `0`: recording started successfully; `-1`: failed to start recording; `-2`: incorrect file extension + * @param storagePath Storage path of recording file + */ +- (void)onLocalRecordBegin:(NSInteger)errCode storagePath:(NSString *)storagePath; + +/** + * 10.2 Local media is being recorded + * + * The SDK returns this callback regularly after local recording is started successfully via the calling of {@link startLocalRecording}. + * You can capture this callback to stay up to date with the status of the recording task. + * You can set the callback interval when calling {@link startLocalRecording}. + * + * @param duration Cumulative duration of recording, in milliseconds + * @param storagePath Storage path of recording file + */ +- (void)onLocalRecording:(NSInteger)duration storagePath:(NSString *)storagePath; + +/** + * 10.3 Local recording stopped + * + * When you call {@link stopLocalRecording} to stop local recording, the SDK returns this callback to notify you of the recording result. + * @param errCode Error code. `0`: recording succeeded; `-1`: recording failed; `-2`: recording was ended due to change of resolution or switch between the landscape and portrait mode. + * @param storagePath Storage path of recording file + */ +- (void)onLocalRecordComplete:(NSInteger)errCode storagePath:(NSString *)storagePath; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Disused callbacks (please use the new ones) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Disused callbacks (please use the new ones) +/// @{ + +/** + * An anchor entered the room (disused) + * + * @deprecated This callback is not recommended in the new version. Please use {@link onRemoteUserEnterRoom} instead. + */ +- (void)onUserEnter:(NSString *)userId __attribute__((deprecated("use onRemoteUserLeaveRoom instead"))); + +/** + * An anchor left the room (disused) + * + * @deprecated This callback is not recommended in the new version. Please use {@link onRemoteUserLeaveRoom} instead. + */ +- (void)onUserExit:(NSString *)userId reason:(NSInteger)reason __attribute__((deprecated("use onRemoteUserLeaveRoom instead"))); + +/** + * Audio effects ended (disused) + * + * @deprecated This callback is not recommended in the new version. Please use {@link ITXAudioEffectManager} instead. + * Audio effects and background music can be started using the same API ({@link startPlayMusic}) now instead of separate ones. + */ +- (void)onAudioEffectFinished:(int)effectId code:(int)code __attribute__((deprecated("use ITXAudioEffectManager.startPlayMusic instead"))); + +/// @} +@end // End of class TRTCCloudDelegate + +///////////////////////////////////////////////////////////////////////////////// +// +// Callback of custom video processing +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Callback of custom video processing +/// @{ + +@protocol TRTCVideoRenderDelegate + +/** + * Custom video rendering + * + * If you have configured the callback of custom rendering for local or remote video, the SDK will return to you via this callback video frames that are otherwise sent to the rendering control, so that you can customize rendering. + * @param frame Video frames to be rendered + * @param userId `userId` of the video source. This parameter can be ignored if the callback is for local video (`setLocalVideoRenderDelegate`). + * @param streamType Stream type. The primary stream (`Main`) is usually used for camera images, and the substream (`Sub`) for screen sharing images. + */ +@optional +- (void)onRenderVideoFrame:(TRTCVideoFrame *_Nonnull)frame userId:(NSString *__nullable)userId streamType:(TRTCVideoStreamType)streamType; + +@end // End of class TRTCVideoRenderDelegate + +@protocol TRTCVideoFrameDelegate + +/** + * Video processing by third-party beauty filters + * + * If you use a third-party beauty filter component, you need to configure this callback in `TRTCCloud` to have the SDK return to you video frames that are otherwise pre-processed by TRTC. + * You can then send the video frames to the third-party beauty filter component for processing. As the data returned can be read and modified, the result of processing can be synced to TRTC for subsequent encoding and publishing. + * + * @param srcFrame Used to carry images captured by TRTC via the camera + * @param dstFrame Used to receive video images processed by third-party beauty filters + * @note Currently, only the OpenGL texture scheme is supported(PC supports TRTCVideoBufferType_Buffer format Only) + * + * Case 1: the beauty filter component generates new textures + * If the beauty filter component you use generates a frame of new texture (for the processed image) during image processing, please set `dstFrame.textureId` to the ID of the new texture in the callback function. + *
+ * uint32_t onProcessVideoFrame(TRTCVideoFrame * _Nonnull)srcFrame dstFrame:(TRTCVideoFrame * _Nonnull)dstFrame{
+ *     self.frameID += 1;
+ *     dstFrame.pixelBuffer = [[FURenderer shareRenderer] renderPixelBuffer:srcFrame.pixelBuffer
+ *                                                              withFrameId:self.frameID
+ *                                                                    items:self.renderItems
+ *                                                                itemCount:self.renderItems.count];
+ *     return 0;
+ * }
+ * 
+ * + * + * Case 2: you need to provide target textures to the beauty filter component + * If the third-party beauty filter component you use does not generate new textures and you need to manually set an input texture and an output texture for the component, you can consider the following scheme: + * ```ObjectiveC + * uint32_t onProcessVideoFrame(TRTCVideoFrame * _Nonnull)srcFrame dstFrame:(TRTCVideoFrame * _Nonnull)dstFrame{ + * thirdparty_process(srcFrame.textureId, srcFrame.width, srcFrame.height, dstFrame.textureId); + * return 0; + * } + * ``` + * ```java + * int onProcessVideoFrame(TRTCCloudDef.TRTCVideoFrame srcFrame, TRTCCloudDef.TRTCVideoFrame dstFrame) { + * thirdparty_process(srcFrame.texture.textureId, srcFrame.width, srcFrame.height, dstFrame.texture.textureId); + * return 0; + * } + * ``` + */ +@optional +- (uint32_t)onProcessVideoFrame:(TRTCVideoFrame *_Nonnull)srcFrame dstFrame:(TRTCVideoFrame *_Nonnull)dstFrame; + +/** + * The OpenGL context in the SDK was destroyed + */ +@optional +- (void)onGLContextDestory; + +@end // End of class TRTCVideoFrameDelegate + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Callback of custom audio processing +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Callback of custom audio processing +/// @{ + +@protocol TRTCAudioFrameDelegate +@optional + +/** + * Audio data captured by the local mic and pre-processed by the audio module + * + * After you configure the callback of custom audio processing, the SDK will return via this callback the data captured and pre-processed (ANS, AEC, and AGC) in PCM format. + * - The audio returned is in PCM format and has a fixed frame length (time) of 0.02s. + * - The formula to convert a frame length in seconds to one in bytes is **sample rate * frame length in seconds * number of sound channels * audio bit depth**. + * - Assume that the audio is recorded on a single channel with a sample rate of 48,000 Hz and audio bit depth of 16 bits, which are the default settings of TRTC. The frame length in bytes will be **48000 * 0.02s * 1 * 16 bits = 15360 bits = 1920 + * bytes**. + * + * @param frame Audio frames in PCM format + * @note + * 1. Please avoid time-consuming operations in this callback function. The SDK processes an audio frame every 20 ms, so if your operation takes more than 20 ms, it will cause audio exceptions. + * 2. The audio data returned via this callback can be read and modified, but please keep the duration of your operation short. + * 3. The audio data is returned via this callback after ANS, AEC and AGC, but it **does not include** pre-processing effects like background music, audio effects, or reverb, and therefore has a short delay. + */ +- (void)onCapturedRawAudioFrame:(TRTCAudioFrame *)frame; + +/** + * Audio data captured by the local mic, pre-processed by the audio module, effect-processed and BGM-mixed + * + * After you configure the callback of custom audio processing, the SDK will return via this callback the data captured, pre-processed (ANS, AEC, and AGC), effect-processed and BGM-mixed in PCM format, before it is submitted to the network module for + * encoding. + * - The audio data returned via this callback is in PCM format and has a fixed frame length (time) of 0.02s. + * - The formula to convert a frame length in seconds to one in bytes is **sample rate * frame length in seconds * number of sound channels * audio bit depth**. + * - Assume that the audio is recorded on a single channel with a sample rate of 48,000 Hz and audio bit depth of 16 bits, which are the default settings of TRTC. The frame length in bytes will be **48000 * 0.02s * 1 * 16 bits = 15360 bits = 1920 + * bytes**. + * + * Instructions: + * You could write data to the `TRTCAudioFrame.extraData` filed, in order to achieve the purpose of transmitting signaling. + * Because the data block of the audio frame header cannot be too large, we recommend you limit the size of the signaling data to only a few bytes when using this API. If extra data more than 100 bytes, it won't be sent. + * Other users in the room can receive the message through the `TRTCAudioFrame.extraData` in `onRemoteUserAudioFrame` callback in {@link TRTCAudioFrameDelegate}. + * + * @param frame Audio frames in PCM format + * @note + * 1. Please avoid time-consuming operations in this callback function. The SDK processes an audio frame every 20 ms, so if your operation takes more than 20 ms, it will cause audio exceptions. + * 2. The audio data returned via this callback can be read and modified, but please keep the duration of your operation short. + * 3. Audio data is returned via this callback after ANS, AEC, AGC, effect-processing and BGM-mixing, and therefore the delay is longer than that with {@link onCapturedRawAudioFrame}. + */ +- (void)onLocalProcessedAudioFrame:(TRTCAudioFrame *)frame; + +/** + * Audio data of each remote user before audio mixing + * + * After you configure the callback of custom audio processing, the SDK will return via this callback the raw audio data (PCM format) of each remote user before mixing. + * - The audio data returned via this callback is in PCM format and has a fixed frame length (time) of 0.02s. + * - The formula to convert a frame length in seconds to one in bytes is **sample rate * frame length in seconds * number of sound channels * audio bit depth**. + * - Assume that the audio is recorded on a single channel with a sample rate of 48,000 Hz and audio bit depth of 16 bits, which are the default settings of TRTC. The frame length in bytes will be **48000 * 0.02s * 1 * 16 bits = 15360 bits = 1920 + * bytes**. + * + * @param frame Audio frames in PCM format + * @param userId User ID + * @note The audio data returned via this callback can be read but not modified. + */ +- (void)onRemoteUserAudioFrame:(TRTCAudioFrame *)frame userId:(NSString *)userId; + +/** + * Data mixed from each channel before being submitted to the system for playback + * + * After you configure the callback of custom audio processing, the SDK will return to you via this callback the data (PCM format) mixed from each channel before it is submitted to the system for playback. + * - The audio data returned via this callback is in PCM format and has a fixed frame length (time) of 0.02s. + * - The formula to convert a frame length in seconds to one in bytes is **sample rate * frame length in seconds * number of sound channels * audio bit depth**. + * - Assume that the audio is recorded on a single channel with a sample rate of 48,000 Hz and audio bit depth of 16 bits, which are the default settings of TRTC. The frame length in bytes will be **48000 * 0.02s * 1 * 16 bits = 15360 bits = 1920 + * bytes**. + * + * @param frame Audio frames in PCM format + * @note + * 1. Please avoid time-consuming operations in this callback function. The SDK processes an audio frame every 20 ms, so if your operation takes more than 20 ms, it will cause audio exceptions. + * 2. The audio data returned via this callback can be read and modified, but please keep the duration of your operation short. + * 3. The audio data returned via this callback is the audio data mixed from each channel before it is played. It does not include the in-ear monitoring data. + */ +- (void)onMixedPlayAudioFrame:(TRTCAudioFrame *)frame; + +/** + * Data mixed from all the captured and to-be-played audio in the SDK + * + * After you configure the callback of custom audio processing, the SDK will return via this callback the data (PCM format) mixed from all captured and to-be-played audio in the SDK, so that you can customize recording. + * - The audio data returned via this callback is in PCM format and has a fixed frame length (time) of 0.02s. + * - The formula to convert a frame length in seconds to one in bytes is **sample rate * frame length in seconds * number of sound channels * audio bit depth**. + * - Assume that the audio is recorded on a single channel with a sample rate of 48,000 Hz and audio bit depth of 16 bits, which are the default settings of TRTC. The frame length in bytes will be **48000 * 0.02s * 1 * 16 bits = 15360 bits = 1920 + * bytes**. + * + * @param frame Audio frames in PCM format + * @note + * 1. This data returned via this callback is mixed from all audio in the SDK, including local audio after pre-processing (ANS, AEC, and AGC), special effects application, and music mixing, as well as all remote audio, but it does not include the + * in-ear monitoring data. + * 2. The audio data returned via this callback cannot be modified. + */ +- (void)onMixedAllAudioFrame:(TRTCAudioFrame *)frame; + +@end // End of class TRTCAudioFrameDelegate + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Other event callbacks +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Other event callbacks +/// @{ + +@protocol TRTCLogDelegate + +@optional + +/** + * Printing of local log + * + * If you want to capture the local log printing event, you can configure the log callback to have the SDK return to you via this callback all logs that are to be printed. + * @param log Log content + * @param level Log level. For more information, please see `TRTC_LOG_LEVEL`. + * @param module Reserved field, which is not defined at the moment and has a fixed value of `TXLiteAVSDK`. + */ +- (void)onLog:(nullable NSString *)log LogLevel:(TRTCLogLevel)level WhichModule:(nullable NSString *)module; + +@end // End of class TRTCLogDelegate +/// @} +NS_ASSUME_NONNULL_END + +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TRTCStatistics.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TRTCStatistics.h new file mode 100644 index 0000000..b3141d8 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TRTCStatistics.h @@ -0,0 +1,177 @@ +/** + * Module: TRTC audio/video metrics (read-only) + * Function: the TRTC SDK reports to you the current real-time audio/video metrics (frame rate, bitrate, lag, etc.) once every two seconds + */ +/// @defgroup TRTCStatisic_ios TRTCStatisic +/// Tencent Cloud TRTC : audio, video and network related statistical indicators +/// @{ + +///////////////////////////////////////////////////////////////////////////////// +// +// Local audio/video metrics +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Local audio/video metrics +/// @{ + +@interface TRTCLocalStatistics : NSObject + +///**Field description:** local video width in px +@property(nonatomic, assign) uint32_t width; + +///**Field description:** local video height in px +@property(nonatomic, assign) uint32_t height; + +///**Field description:** local video frame rate in fps, i.e., how many video frames there are per second +@property(nonatomic, assign) uint32_t frameRate; + +///**Field description:** remote video bitrate in Kbps, i.e., how much video data is generated per second +@property(nonatomic, assign) uint32_t videoBitrate; + +///**Field description:** remote audio sample rate (Hz) +@property(nonatomic, assign) uint32_t audioSampleRate; + +///**Field description:** local audio bitrate in Kbps, i.e., how much audio data is generated per second +@property(nonatomic, assign) uint32_t audioBitrate; + +///**Field description:** video stream type (HD big image | smooth small image | substream image) +@property(nonatomic, assign) TRTCVideoStreamType streamType; + +///**Field description:**Audio equipment collection status( +/// 0:Normal;1:Long silence detected;2:Broken sound detected;3:Abnormal intermittent sound detected;) +@property(nonatomic, assign) uint32_t audioCaptureState; +@end + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Remote audio/video metrics +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Remote audio/video metrics +/// @{ + +@interface TRTCRemoteStatistics : NSObject + +///**Field description:** user ID +@property(nonatomic, retain) NSString* userId; + +///**Field description:** total packet loss rate (%) of the audio stream +///`audioPacketLoss ` represents the packet loss rate eventually calculated on the audience side after the audio/video stream goes through the complete transfer linkage of "anchor -> cloud -> audience". +/// The smaller the `audioPacketLoss `, the better. The packet loss rate of 0 indicates that all data of the audio stream has entirely reached the audience. +/// If `downLoss` is `0` but `audioPacketLoss ` isn't, there is no packet loss on the linkage of "cloud -> audience" for the audiostream, but there are unrecoverable packet losses on the linkage of "anchor -> cloud". +@property(nonatomic, assign) uint32_t audioPacketLoss; + +///**Field description:** total packet loss rate (%) of the video stream +///`videoPacketLoss ` represents the packet loss rate eventually calculated on the audience side after the audio/video stream goes through the complete transfer linkage of "anchor -> cloud -> audience". +/// The smaller the `videoPacketLoss `, the better. The packet loss rate of 0 indicates that all data of the video stream has entirely reached the audience. +/// If `downLoss` is `0` but `videoPacketLoss ` isn't, there is no packet loss on the linkage of "cloud -> audience" for the video stream, but there are unrecoverable packet losses on the linkage of "anchor -> cloud". +@property(nonatomic, assign) uint32_t videoPacketLoss; + +///**Field description:** remote video width in px +@property(nonatomic, assign) uint32_t width; + +///**Field description:** remote video height in px +@property(nonatomic, assign) uint32_t height; + +///**Field description:** remote video frame rate (fps) +@property(nonatomic, assign) uint32_t frameRate; + +///**Field description:** remote video bitrate (Kbps) +@property(nonatomic, assign) uint32_t videoBitrate; + +///**Field description:** local audio sample rate (Hz) +@property(nonatomic, assign) uint32_t audioSampleRate; + +///**Field description:** local audio bitrate (Kbps) +@property(nonatomic, assign) uint32_t audioBitrate; + +///**Field description:** playback delay (ms) +/// In order to avoid audio/video lags caused by network jitters and network packet disorders, TRTC maintains a playback buffer on the playback side to organize the received network data packets. +/// The size of the buffer is adaptively adjusted according to the current network quality and converted to the length of time in milliseconds, i.e., `jitterBufferDelay`. +@property(nonatomic, assign) uint32_t jitterBufferDelay; + +///**Field description:** end-to-end delay (ms) +///`point2PointDelay` represents the delay of "anchor -> cloud -> audience". To be more precise, it represents the delay of the entire linkage of "collection -> encoding -> network transfer -> receiving -> buffering -> decoding -> playback". +///`point2PointDelay` works only if both the local and remote SDKs are on version 8.5 or above. If the remote SDK is on a version below 8.5, this value will always be 0 and thus meaningless. +@property(nonatomic, assign) uint32_t point2PointDelay; + +///**Field description:** cumulative audio playback lag duration (ms) +@property(nonatomic, assign) uint32_t audioTotalBlockTime; + +///**Field description:** audio playback lag rate (%) +/// Audio playback lag rate (audioBlockRate) = cumulative audio playback lag duration (audioTotalBlockTime)/total audio playback duration +@property(nonatomic, assign) uint32_t audioBlockRate; + +///**Field description:** cumulative video playback lag duration (ms) +@property(nonatomic, assign) uint32_t videoTotalBlockTime; + +///**Field description:** video playback lag rate (%) +/// Video playback lag rate (videoBlockRate) = cumulative video playback lag duration (videoTotalBlockTime)/total video playback duration +@property(nonatomic, assign) uint32_t videoBlockRate; + +///**Field description:** total packet loss rate (%) of the audio/video stream +/// Deprecated, please use audioPacketLoss and videoPacketLoss instead. +@property(nonatomic, assign) uint32_t finalLoss __attribute__((deprecated("Use audioPacketLoss and videoPacketLoss instead."))); + +///**Field description:** video stream type (HD big image | smooth small image | substream image) +@property(nonatomic, assign) TRTCVideoStreamType streamType; +@end + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Network and performance metrics +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Network and performance metrics +/// @{ + +@interface TRTCStatistics : NSObject + +///**Field description:** CPU utilization (%) of the current application +@property(nonatomic, assign) uint32_t appCpu; + +///**Field description:** CPU utilization (%) of the current system +@property(nonatomic, assign) uint32_t systemCpu; + +///**Field description:** upstream packet loss rate (%) from the SDK to cloud +/// The smaller the value, the better. If `upLoss` is `0%`, the upstream network quality is very good, and the data packets uploaded to the cloud are basically not lost. +/// If `upLoss` is `30%`, 30% of the audio/video data packets sent to the cloud by the SDK are lost on the transfer linkage. +@property(nonatomic, assign) uint32_t upLoss; + +///**Field description:** downstream packet loss rate (%) from cloud to the SDK +/// The smaller the value, the better. If `downLoss` is `0%`, the downstream network quality is very good, and the data packets received from the cloud are basically not lost. +/// If `downLoss` is `30%`, 30% of the audio/video data packets sent to the SDK by the cloud are lost on the transfer linkage. +@property(nonatomic, assign) uint32_t downLoss; + +///**Field description:** round-trip delay (ms) from the SDK to cloud +/// This value represents the total time it takes to send a network packet from the SDK to the cloud and then send a network packet back from the cloud to the SDK, i.e., the total time it takes for a network packet to go through the linkage of "SDK +/// -> cloud -> SDK". The smaller the value, the better. If `rtt` is below 50 ms, it means a short audio/video call delay; if `rtt` is above 200 ms, it means a long audio/video call delay. It should be explained that `rtt` represents the total time +/// spent on the linkage of "SDK -> cloud -> SDK"; therefore, there is no need to distinguish between `upRtt` and `downRtt`. +@property(nonatomic, assign) uint32_t rtt; + +///**Field description:** round-trip delay (ms) from the SDK to gateway +/// This value represents the total time it takes to send a network packet from the SDK to the gateway and then send a network packet back from the gateway to the SDK, i.e., the total time it takes for a network packet to go through the linkage of +/// "SDK -> gateway -> SDK". The smaller the value, the better. If `gatewayRtt` is below 50 ms, it means a short audio/video call delay; if `gatewayRtt` is above 200 ms, it means a long audio/video call delay. It should be explained that `gatewayRtt` +/// is invalid for cellular network. +@property(nonatomic, assign) uint32_t gatewayRtt; + +///**Field description:** total number of sent bytes (including signaling data and audio/video data) +@property(nonatomic, assign) uint64_t sentBytes; + +///**Field description:** total number of received bytes (including signaling data and audio/video data) +@property(nonatomic, assign) uint64_t receivedBytes; + +///**Field description:** local audio/video statistics +/// As there may be three local audio/video streams (i.e., HD big image, smooth small image, and substream image), the local audio/video statistics are an array. +@property(nonatomic, strong) NSArray* localStatistics; + +///**Field description:** remote audio/video statistics +/// As there may be multiple concurrent remote users, and each of them may have multiple concurrent audio/video streams (i.e., HD big image, smooth small image, and substream image), the remote audio/video statistics are an array. +@property(nonatomic, strong) NSArray* remoteStatistics; + +@end +/// @} + +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXAudioCustomProcessDelegate.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXAudioCustomProcessDelegate.h new file mode 100644 index 0000000..9155c38 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXAudioCustomProcessDelegate.h @@ -0,0 +1,38 @@ +// +// TXAudioCustomProcessDelegate.h +// TXLiteAVSDK +// +// Created by realingzhou on 2018/1/15. +// Copyright © 2018年 Tencent. All rights reserved. +// + +#ifndef TXAudioCustomProcessDelegate_h +#define TXAudioCustomProcessDelegate_h +#import + +@protocol TXAudioCustomProcessDelegate + +/** + * 原始声音的回调 + * @param data pcm数据 + * @param timeStamp 时间戳 + * @param sampleRate 采样率 + * @param channels 声道数 + * @param withBgm 回调的数据是否包含bgm,当不开启回声消除时,回调的raw pcm会包含bgm + */ +@optional +- (void)onRecordRawPcmData:(NSData *)data timeStamp:(unsigned long long)timeStamp sampleRate:(int)sampleRate channels:(int)channels withBgm:(BOOL)withBgm; + +/** + * 经过特效处理的声音回调 + * @param data pcm数据 + * @param timeStamp 时间戳 + * @param sampleRate 采样率 + * @param channels 声道数 + */ +@optional +- (void)onRecordPcmData:(NSData *)data timeStamp:(unsigned long long)timeStamp sampleRate:(int)sampleRate channels:(int)channels; + +@end + +#endif /* TXAudioCustomProcessDelegate_h */ diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXAudioEffectManager.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXAudioEffectManager.h new file mode 100644 index 0000000..13ce621 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXAudioEffectManager.h @@ -0,0 +1,331 @@ +/** + * Module: management class for background music, short audio effects, and voice effects + * Description: sets background music, short audio effects, and voice effects + */ +/// @defgroup TXAudioEffectManager_ios TXAudioEffectManager +/// Tencent Cloud Audio Effect Management Module +/// @{ +#import + +///////////////////////////////////////////////////////////////////////////////// +// +// Definitions of enumerated values related to audio effects +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Definitions of enumerated values related to audio effects +/// @{ + +/** + * 1.1 Reverb effects + * + * Reverb effects can be applied to human voice. Based on acoustic algorithms, they can mimic voice in different environments. The following effects are supported currently: + * 0: original; 1: karaoke; 2: room; 3: hall; 4: low and deep; 5: resonant; 6: metal; 7: husky; 8: ethereal; 9: studio; 10: melodious; 11: phonograph; 12: nature + */ +typedef NS_ENUM(NSInteger, TXVoiceReverbType) { + TXVoiceReverbType_0 = 0, ///< Disable + TXVoiceReverbType_1 = 1, ///< Karaoke + TXVoiceReverbType_2 = 2, ///< Room + TXVoiceReverbType_3 = 3, ///< Hall + TXVoiceReverbType_4 = 4, ///< Low and deep + TXVoiceReverbType_5 = 5, ///< Resonant + TXVoiceReverbType_6 = 6, ///< Metal + TXVoiceReverbType_7 = 7, ///< Husky + TXVoiceReverbType_8 = 8, ///< ethereal + TXVoiceReverbType_9 = 9, ///< studio + TXVoiceReverbType_10 = 10, ///< melodious + TXVoiceReverbType_11 = 11, ///< phonograph + TXVoiceReverbType_12 = 12, ///< nature +}; + +/** + * 1.2 Voice changing effects + * + * Voice changing effects can be applied to human voice. Based on acoustic algorithms, they change the tone of voice. The following effects are supported currently: + * 0: original; 1: child; 2: little girl; 3: middle-aged man; 4: metal; 5: nasal; 6: foreign accent; 7: trapped beast; 8: otaku; 9: electric; 10: robot; 11: ethereal + */ +typedef NS_ENUM(NSInteger, TXVoiceChangeType) { + TXVoiceChangeType_0 = 0, ///< Disable + TXVoiceChangeType_1 = 1, ///< Child + TXVoiceChangeType_2 = 2, ///< Little girl + TXVoiceChangeType_3 = 3, ///< Middle-aged man + TXVoiceChangeType_4 = 4, ///< Metal + TXVoiceChangeType_5 = 5, ///< Nasal + TXVoiceChangeType_6 = 6, ///< Foreign accent + TXVoiceChangeType_7 = 7, ///< Trapped beast + TXVoiceChangeType_8 = 8, ///< Otaku + TXVoiceChangeType_9 = 9, ///< Electric + TXVoiceChangeType_10 = 10, ///< Robot + TXVoiceChangeType_11 = 11, ///< Ethereal +}; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Callback of playing background music +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Callback of playing background music +/// @{ + +// Playback progress block of background music + +/// Background music started. +typedef void (^TXAudioMusicStartBlock)(NSInteger errCode); + +/// Playback progress of background music +typedef void (^TXAudioMusicProgressBlock)(NSInteger progressMs, NSInteger durationMs); + +/// Background music ended +typedef void (^TXAudioMusicCompleteBlock)(NSInteger errCode); + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Background music playback information +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Background music playback information +/// @{ + +/** + * Background music playback information + * + * The information, including playback ID, file path, and loop times, is passed in the {@link startPlayMusic} API. + * 1. If you play the same music track multiple times, please use the same ID instead of a separate ID for each playback. + * 2. If you want to play different music tracks at the same time, use different IDs for them. + * 3. If you use the same ID to play a music track different from the current one, the SDK will stop the current one before playing the new one. + */ +@interface TXAudioMusicParam : NSObject + +///**Field description:** music ID
+///**Note:** the SDK supports playing multiple music tracks. IDs are used to distinguish different music tracks and control their start, end, volume, etc. +@property(nonatomic) int32_t ID; + +///**Field description:** absolute path of the music file or url.the mp3,aac,m4a,wav supported. +@property(nonatomic, copy) NSString *path; + +///**Field description:** number of times the music track is looped
+///**Valid values:** 0 or any positive integer. 0 (default) indicates that the music is played once, 1 twice, and so on. +@property(nonatomic) NSInteger loopCount; + +///**Field description:** whether to send the music to remote users
+///**Valid values:** `YES`: remote users can hear the music played locally; `NO` (default): only the local user can hear the music. +@property(nonatomic) BOOL publish; + +///**Field description:** whether the music played is a short music track
+///**Valid values:** `YES`: short music track that needs to be looped; `NO` (default): normal-length music track +@property(nonatomic) BOOL isShortFile; + +///**Field description:** the point in time in milliseconds for starting music playback +@property(nonatomic) NSInteger startTimeMS; + +///**Field description:** the point in time in milliseconds for ending music playback. 0 indicates that playback continues till the end of the music track. +@property(nonatomic) NSInteger endTimeMS; +@end +/// @} + +// Definition of audio effect management module +@interface TXAudioEffectManager : NSObject + +/** + * You cannot create a `TXAudioEffectManager` object. + * You need to obtain the object using the `getAudioEffectManager` API of `TRTCCloud` or `TXLivePush`. + */ +- (instancetype)init NS_UNAVAILABLE; + +///////////////////////////////////////////////////////////////////////////////// +// +// Voice effect APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Voice effect APIs +/// @{ + +/** + * 1.1 Enabling in-ear monitoring + * + * After enabling in-ear monitoring, anchors can hear in earphones their own voice captured by the mic. This is designed for singing scenarios. + * + * In-ear monitoring cannot be enabled for Bluetooth earphones. This is because Bluetooth earphones have high latency. Please ask anchors to use wired earphones via a UI reminder. + * Given that not all phones deliver excellent in-ear monitoring effects, we have blocked this feature on some phones. + * + * @note In-ear monitoring can be enabled only when earphones are used. Please remind anchors to use wired earphones. + * @param enable `YES:` enable; `NO`: disable + */ +- (void)enableVoiceEarMonitor:(BOOL)enable; + +/** + * 1.2 Setting in-ear monitoring volume + * + * This API is used to set the volume of in-ear monitoring. + * + * @param volume Volume. Value range: 0-100; default: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ +- (void)setVoiceEarMonitorVolume:(NSInteger)volume; + +/** + * 1.3 Setting voice reverb effects + * + * This API is used to set reverb effects for human voice. For the effects supported, please see {@link TXVoiceReverbType}. + * + * @note Effects become invalid after room exit. If you want to use the same effect after you enter the room again, you need to set the effect again using this API. + */ +- (void)setVoiceReverbType:(TXVoiceReverbType)reverbType; + +/** + * 1.4 Setting voice changing effects + * + * This API is used to set voice changing effects. For the effects supported, please see {@link TXVoiceChangeType}. + * + * @note Effects become invalid after room exit. If you want to use the same effect after you enter the room again, you need to set the effect again using this API. + */ +- (void)setVoiceChangerType:(TXVoiceChangeType)changerType; + +/** + * 1.5 Setting speech volume + * + * This API is used to set the volume of speech. It is often used together with the music volume setting API {@link setAllMusicVolume} to balance between the volume of music and speech. + * + * @param volume Volume. Value range: 0-100; default: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ +- (void)setVoiceVolume:(NSInteger)volume; + +/** + * 1.6 Setting speech pitch + * + * This API is used to set the pitch of speech. + * + * @param pitch Ptich,Value range: -1.0f~1.0f; default: 0.0f。 + */ +- (void)setVoicePitch:(double)pitch; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Background music APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Background music APIs +/// @{ + +/** + * 2.1 Starting background music + * + * You must assign an ID to each music track so that you can start, stop, or set the volume of music tracks by ID. + * + * @note + * 1. If you play the same music track multiple times, please use the same ID instead of a separate ID for each playback. + * 2. If you want to play different music tracks at the same time, use different IDs for them. + * 3. If you use the same ID to play a music track different from the current one, the SDK will stop the current one before playing the new one. + * + * @param musicParam Music parameter + * @param startBlock Callback of starting music + * @param progressBlock Callback of playback progress + * @param completeBlock Callback of ending music + */ +- (void)startPlayMusic:(TXAudioMusicParam *)musicParam onStart:(TXAudioMusicStartBlock _Nullable)startBlock onProgress:(TXAudioMusicProgressBlock _Nullable)progressBlock onComplete:(TXAudioMusicCompleteBlock _Nullable)completeBlock; + +/** + * 2.2 Stopping background music + * + * @param id Music ID + */ +- (void)stopPlayMusic:(int32_t)id; + +/** + * 2.3 Pausing background music + * + * @param id Music ID + */ +- (void)pausePlayMusic:(int32_t)id; + +/** + * 2.4 Resuming background music + * + * @param id Music ID + */ +- (void)resumePlayMusic:(int32_t)id; + +/** + * 2.5 Setting the local and remote playback volume of background music + * + * This API is used to set the local and remote playback volume of background music. + * - Local volume: the volume of music heard by anchors + * - Remote volume: the volume of music heard by audience + * + * @param volume Volume. Value range: 0-100; default: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ +- (void)setAllMusicVolume:(NSInteger)volume; + +/** + * 2.6 Setting the remote playback volume of a specific music track + * + * This API is used to control the remote playback volume (the volume heard by audience) of a specific music track. + * + * @param id Music ID + * @param volume Volume. Value range: 0-100; default: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ +- (void)setMusicPublishVolume:(int32_t)id volume:(NSInteger)volume; + +/** + * 2.7 Setting the local playback volume of a specific music track + * + * This API is used to control the local playback volume (the volume heard by anchors) of a specific music track. + * + * @param id Music ID + * @param volume Volume. Value range: 0-100. default: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ +- (void)setMusicPlayoutVolume:(int32_t)id volume:(NSInteger)volume; + +/** + * 2.8 Adjusting the pitch of background music + * + * @param id Music ID + * @param pitch Pitch. Value range: floating point numbers in the range of [-1, 1]; default: 0.0f + */ +- (void)setMusicPitch:(int32_t)id pitch:(double)pitch; + +/** + * 2.9 Changing the speed of background music + * + * @param id Music ID + * @param speedRate Music speed. Value range: floating point numbers in the range of [0.5, 2]; default: 1.0f + */ +- (void)setMusicSpeedRate:(int32_t)id speedRate:(double)speedRate; + +/** + * 2.10 Getting the playback progress (ms) of background music + * + * @param id Music ID + * @return The milliseconds that have passed since playback started. -1 indicates failure to get the the playback progress. + */ +- (NSInteger)getMusicCurrentPosInMS:(int32_t)id; + +/** + * 2.11 Getting the total length (ms) of background music + * + * @param path Path of the music file. + * @return The length of the specified music file is returned. -1 indicates failure to get the length. + */ +- (NSInteger)getMusicDurationInMS:(NSString *)path; + +/** + * 2.12 Setting the playback progress (ms) of background music + * + * @note Do not call this API frequently as the music file may be read and written to each time the API is called, which can be time-consuming. + * Wait till users finish dragging the progress bar before you call this API. + * The progress bar controller on the UI tends to update the progress at a high frequency as users drag the progress bar. This will result in poor user experience unless you limit the frequency. + * + * @param id Music ID + * @param pts Unit: millisecond + */ +- (void)seekMusicToPosInMS:(int32_t)id pts:(NSInteger)pts; + +/// @} +@end // End of interface TXAudioEffectManager +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXAudioRawDataDelegate.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXAudioRawDataDelegate.h new file mode 100644 index 0000000..9d834fe --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXAudioRawDataDelegate.h @@ -0,0 +1,38 @@ +// +// TXAudioRawDataDelegate.h +// TXLiteAVSDK +// +// Created by realingzhou on 2018/2/24. +// Copyright © 2018年 Tencent. All rights reserved. +// + +#ifndef TXAudioRawDataDelegate_h +#define TXAudioRawDataDelegate_h + +@protocol TXAudioRawDataDelegate + +/** + * 音频播放信息回调 + * + * @param sampleRate 采样率 + * @param channels 声道数 + */ +@optional +- (void)onAudioInfoChanged:(int)sampleRate channels:(int)channels; + +/** + * 音频播放数据回调,数据格式 :PCM + * + * <!!!注意!!!> 该函数内不要做耗时操作<!!!注意!!!> + * 音频播放器会在播放数据的前一刻,调用此函数,同步回调将要播放的数据。因此在函数内部做耗时操作可能会影响播放 + * + * + * @param data pcm数据 + * @param timestamp 时间戳。注 :会有连续相同的时间戳回调出来,超过2048字节,时间戳才会变化。 + */ +@optional +- (void)onPcmDataAvailable:(NSData *)data pts:(unsigned long long)timestamp; + +@end + +#endif /* TXAudioRawDataDelegate_h */ diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXBeautyManager.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXBeautyManager.h new file mode 100644 index 0000000..ff08ddb --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXBeautyManager.h @@ -0,0 +1,328 @@ +/** + * Module: beauty filter and image processing parameter configurations + * Function: you can modify parameters such as beautification, filter, and green screen + */ +#import +#import +#if TARGET_OS_IPHONE +#import +typedef UIImage TXImage; +#else +#import +typedef NSImage TXImage; +#endif + +NS_ASSUME_NONNULL_BEGIN + +/// @defgroup TXBeautyManager_ios TXBeautyManager +/// Beauty filter and animated effect parameter management +/// @{ + +/** + * Beauty (skin smoothing) filter algorithm + * TRTC has multiple built-in skin smoothing algorithms. You can select the one most suitable for your product needs. + */ +typedef NS_ENUM(NSInteger, TXBeautyStyle) { + + /// Smooth style, which uses a more radical algorithm for more obvious effect and is suitable for show live streaming. + TXBeautyStyleSmooth = 0, + + /// Natural style, which retains more facial details for more natural effect and is suitable for most live streaming use cases. + TXBeautyStyleNature = 1, + + /// Pitu style, which is provided by YouTu Lab. Its skin smoothing effect is between the smooth style and the natural style, that is, it retains more skin details than the smooth style and has a higher skin smoothing degree than the natural + /// style. + TXBeautyStylePitu = 2 +}; + +@interface TXBeautyManager : NSObject + +/** + * Sets the beauty (skin smoothing) filter algorithm. + * + * TRTC has multiple built-in skin smoothing algorithms. You can select the one most suitable for your product needs: + * + * @param beautyStyle Beauty filter style. `TXBeautyStyleSmooth`: smooth; `TXBeautyStyleNature`: natural; `TXBeautyStylePitu`: Pitu + */ +- (void)setBeautyStyle:(TXBeautyStyle)beautyStyle; + +/** + * Sets the strength of the beauty filter. + * + * @param beautyLevel Strength of the beauty filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + */ +- (void)setBeautyLevel:(float)beautyLevel; + +/** + * Sets the strength of the brightening filter. + * + * @param whitenessLevel Strength of the brightening filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + */ +- (void)setWhitenessLevel:(float)whitenessLevel; + +/** + * Enables clarity enhancement. + */ +- (void)enableSharpnessEnhancement:(BOOL)enable; + +/** + * Sets the strength of the rosy skin filter. + * + * @param ruddyLevel Strength of the rosy skin filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + */ +- (void)setRuddyLevel:(float)ruddyLevel; + +/** + * Sets color filter. + * + * The color filter is a color lookup table image containing color mapping relationships. You can find several predefined filter images in the official demo we provide. + * The SDK performs secondary processing on the original video image captured by the camera according to the mapping relationships in the lookup table to achieve the expected filter effect. + * @param image Color lookup table containing color mapping relationships. The image must be in PNG format. + */ +- (void)setFilter:(nullable TXImage *)image; + +/** + * Sets the strength of color filter. + * + * The larger this value, the more obvious the effect of the color filter, and the greater the color difference between the video image processed by the filter and the original video image. + * The default strength is 0.5, and if it is not sufficient, it can be adjusted to a value above 0.5. The maximum value is 1. + * + * @param strength Value range: 0–1. The greater the value, the more obvious the effect. Default value: 0.5 + */ +- (void)setFilterStrength:(float)strength; + +/** + * Sets green screen video. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * The green screen feature enabled by this API is not capable of intelligent keying. It requires that there be a green screen behind the videoed person or object for further chroma keying. + * + * @param path Path of the video file in MP4 format. An empty value indicates to disable the effect. + * @return 0: Success; -5: feature of license not supported. + */ +- (int)setGreenScreenFile:(nullable NSString *)path; + +/** + * Sets the strength of the eye enlarging filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param eyeScaleLevel Strength of the eye enlarging filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setEyeScaleLevel:(float)eyeScaleLevel; +#endif + +/** + * Sets the strength of the face slimming filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param faceSlimLevel Strength of the face slimming filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setFaceSlimLevel:(float)faceSlimLevel; +#endif + +/** + * Sets the strength of the chin slimming filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param faceVLevel Strength of the chin slimming filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setFaceVLevel:(float)faceVLevel; +#endif + +/** + * Sets the strength of the chin lengthening/shortening filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param chinLevel Strength of the chin lengthening/shortening filter. Value range: -9–9. `0` indicates to disable the filter, a value smaller than 0 indicates that the chin is shortened, and a value greater than 0 indicates that the chin is + * lengthened. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setChinLevel:(float)chinLevel; +#endif + +/** + * Sets the strength of the face shortening filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param faceShortLevel Strength of the face shortening filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setFaceShortLevel:(float)faceShortLevel; +#endif + +/** + * Sets the strength of the face narrowing filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param level Strength of the face narrowing filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setFaceNarrowLevel:(float)faceNarrowLevel; +#endif + +/** + * Sets the strength of the nose slimming filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param noseSlimLevel Strength of the nose slimming filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setNoseSlimLevel:(float)noseSlimLevel; +#endif + +/** + * Sets the strength of the eye brightening filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param eyeLightenLevel Strength of the eye brightening filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setEyeLightenLevel:(float)eyeLightenLevel; +#endif + +/** + * Sets the strength of the teeth whitening filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param toothWhitenLevel Strength of the teeth whitening filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setToothWhitenLevel:(float)toothWhitenLevel; +#endif + +/** + * Sets the strength of the wrinkle removal filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param wrinkleRemoveLevel Strength of the wrinkle removal filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setWrinkleRemoveLevel:(float)wrinkleRemoveLevel; +#endif + +/** + * Sets the strength of the eye bag removal filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param pounchRemoveLevel Strength of the eye bag removal filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setPounchRemoveLevel:(float)pounchRemoveLevel; +#endif + +/** + * Sets the strength of the smile line removal filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param smileLinesRemoveLevel Strength of the smile line removal filter. Value range: 0–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setSmileLinesRemoveLevel:(float)smileLinesRemoveLevel; +#endif + +/** + * Sets the strength of the hairline adjustment filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param foreheadLevel Strength of the hairline adjustment filter. Value range: -9–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setForeheadLevel:(float)foreheadLevel; +#endif + +/** + * Sets the strength of the eye distance adjustment filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param eyeDistanceLevel Strength of the eye distance adjustment filter. Value range: -9–9. `0` indicates to disable the filter, a value smaller than 0 indicates to widen, and a value greater than 0 indicates to narrow. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setEyeDistanceLevel:(float)eyeDistanceLevel; +#endif + +/** + * Sets the strength of the eye corner adjustment filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param eyeAngleLevel Strength of the eye corner adjustment filter. Value range: -9–9. `0` indicates to disable the filter, and `9` indicates the most obvious effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setEyeAngleLevel:(float)eyeAngleLevel; +#endif + +/** + * Sets the strength of the mouth shape adjustment filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param mouthShapeLevel Strength of the mouth shape adjustment filter. Value range: -9–9. `0` indicates to disable the filter, a value smaller than 0 indicates to widen, and a value greater than 0 indicates to narrow. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setMouthShapeLevel:(float)mouthShapeLevel; +#endif + +/** + * Sets the strength of the nose wing narrowing filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param noseWingLevel Strength of the nose wing adjustment filter. Value range: -9–9. `0` indicates to disable the filter, a value smaller than 0 indicates to widen, and a value greater than 0 indicates to narrow. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setNoseWingLevel:(float)noseWingLevel; +#endif + +/** + * Sets the strength of the nose position adjustment filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param nosePositionLevel Strength of the nose position adjustment filter. Value range: -9–9. `0` indicates to disable the filter, a value smaller than 0 indicates to lift, and a value greater than 0 indicates to lower. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setNosePositionLevel:(float)nosePositionLevel; +#endif + +/** + * Sets the strength of the lip thickness adjustment filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param lipsThicknessLevel Strength of the lip thickness adjustment filter. Value range: -9–9. `0` indicates to disable the filter, a value smaller than 0 indicates to thicken, and a value greater than 0 indicates to thin. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setLipsThicknessLevel:(float)lipsThicknessLevel; +#endif + +/** + * Sets the strength of the face shape adjustment filter. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param faceBeautyLevel Strength of the face shape adjustment filter. Value range: 0–9. `0` indicates to disable the filter, and the greater the value, the more obvious the effect. + * @return 0: Success; -5: feature of license not supported. + */ +#if TARGET_OS_IPHONE +- (int)setFaceBeautyLevel:(float)faceBeautyLevel; +#endif + +/** + * Selects the AI animated effect pendant. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * + * @param tmplName Animated effect pendant name + * @param tmplDir Directory of the animated effect material file + */ +#if TARGET_OS_IPHONE +- (void)setMotionTmpl:(nullable NSString *)tmplName inDir:(nullable NSString *)tmplDir; +#endif + +/** + * Sets whether to mute during animated effect playback. This API takes effect only in the [Enterprise Edition SDK](https://cloud.tencent.com/document/product/647/32689#Enterprise). + * Some animated effects have audio effects, which can be disabled through this API when they are played back. + * + * @param motionMute `YES`: mute; `NO`: unmute + */ +#if TARGET_OS_IPHONE +- (void)setMotionMute:(BOOL)motionMute; +#endif + +@end +/// @} + +NS_ASSUME_NONNULL_END diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXDeviceManager.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXDeviceManager.h new file mode 100644 index 0000000..30bf73c --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXDeviceManager.h @@ -0,0 +1,354 @@ +/** + * Module: audio/video device management module + * Description: manages audio/video devices such as camera, mic, and speaker. + */ +/// @defgroup TXDeviceManager_ios TXDeviceManager +/// Tencent Cloud Device Management Module +/// @{ +#import +#if TARGET_OS_IPHONE +#import +#elif TARGET_OS_MAC +#import +#endif + +///////////////////////////////////////////////////////////////////////////////// +// +// Type definitions of audio/video devices +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Type definitions of audio/video devices +/// @{ + +/** + * System volume type (for mobile devices only) + * + * @deprecated This API is not recommended after v9.5. + * + * Smartphones usually have two types of system volume: call volume and media volume. + * - Call volume is designed for call scenarios. It comes with acoustic echo cancellation (AEC) and supports audio capturing by Bluetooth earphones, but its sound quality is average. + * If you cannot turn the volume down to 0 (i.e., mute the phone) using the volume buttons, then your phone is using call volume. + * - Media volume is designed for media scenarios such as music playback. AEC does not work when media volume is used, and Bluetooth earphones cannot be used for audio capturing. However, media volume delivers better music listening experience. + * If you are able to mute your phone using the volume buttons, then your phone is using media volume. + * + * The SDK offers three system volume control modes: auto, call volume, and media volume. + */ +#if TARGET_OS_IPHONE +typedef NS_ENUM(NSInteger, TXSystemVolumeType) { + + /// Auto + TXSystemVolumeTypeAuto = 0, + + /// Media volume + TXSystemVolumeTypeMedia = 1, + + /// Call volume + TXSystemVolumeTypeVOIP = 2, + +}; +#endif + +/** + * Audio route (the route via which audio is played) + * + * Audio route is the route (speaker or receiver) via which audio is played. It applies only to mobile devices such as mobile phones. + * A mobile phone has two speakers: one at the top (receiver) and the other the bottom. + * - If the audio route is set to the receiver, the volume is relatively low, and audio can be heard only when the phone is put near the ear. This mode has a high level of privacy and is suitable for answering calls. + * - If the audio route is set to the speaker, the volume is relatively high, and there is no need to put the phone near the ear. This mode enables the "hands-free" feature. + */ +#if TARGET_OS_IPHONE +typedef NS_ENUM(NSInteger, TXAudioRoute) { + + /// Speakerphone: the speaker at the bottom is used for playback (hands-free). With relatively high volume, it is used to play music out loud. + TXAudioRouteSpeakerphone = 0, + + /// Earpiece: the receiver at the top is used for playback. With relatively low volume, it is suitable for call scenarios that require privacy. + TXAudioRouteEarpiece = 1, + +}; +#endif + +/** + * Device type (for desktop OS) + * + * This enumerated type defines three types of audio/video devices, namely camera, mic and speaker, so that you can use the same device management API to manage three types of devices. + */ +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +typedef NS_ENUM(NSInteger, TXMediaDeviceType) { + TXMediaDeviceTypeUnknown = -1, ///< undefined device type + TXMediaDeviceTypeAudioInput = 0, ///< microphone + TXMediaDeviceTypeAudioOutput = 1, ///< speaker or earpiece + TXMediaDeviceTypeVideoCamera = 2, ///< camera +}; +#endif + +/** + * Device operation + * + * This enumerated value is used to notify the status change of the local device {@link onDeviceChanged}. + */ +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +typedef NS_ENUM(NSInteger, TXMediaDeviceState) { + + /// The device has been plugged in + TXMediaDeviceStateAdd = 0, + + /// The device has been removed + TXMediaDeviceStateRemove = 1, + + /// The device has been enabled + TXMediaDeviceStateActive = 2, + +}; +#endif + +/** + * Audio/Video device information (for desktop OS) + * + * This structure describes key information (such as device ID and device name) of an audio/video device, so that users can choose on the UI the device to use. + */ +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +@interface TXMediaDeviceInfo : NSObject +/// device type +@property(assign, nonatomic) TXMediaDeviceType type; +/// device id +@property(copy, nonatomic, nullable) NSString *deviceId; +/// device name +@property(copy, nonatomic, nullable) NSString *deviceName; +/// device properties +@property(copy, nonatomic, nullable) NSString *deviceProperties; +@end +#endif +/// @} + +/** + * The status of a local device changed (for desktop OS only) + * + * The SDK returns this callback when a local device (camera, mic, or speaker) is connected or disconnected. + * + * @param deviceId Device ID + * @param type Device type + * @param state Device status. `0`: connected; `1`: disconnected; `2`: started + */ +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +@protocol TXDeviceObserver + +- (void)onDeviceChanged:(NSString *)deviceId type:(TXMediaDeviceType)mediaType state:(TXMediaDeviceState)mediaState; + +@end +#endif + +@interface TXDeviceManager : NSObject + +///////////////////////////////////////////////////////////////////////////////// +// +// Device APIs for mobile OS (iOS and Android) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Device APIs for mobile OS +/// @{ + +/** + * 1.1 Querying whether the front camera is being used + */ +#if TARGET_OS_IPHONE +- (BOOL)isFrontCamera; + +/** + * 1.2 Switching to the front/rear camera (for mobile OS) + */ +- (NSInteger)switchCamera:(BOOL)frontCamera; + +/** + * 1.3 Querying whether the current camera supports zooming (for mobile OS) + */ +- (BOOL)isCameraZoomSupported; + +/** + * 1.3 Getting the maximum zoom ratio of the camera (for mobile OS) + */ +- (CGFloat)getCameraZoomMaxRatio; + +/** + * 1.4 Setting the camera zoom ratio (for mobile OS) + * + * @param zoomRatio Value range: 1-5. 1 indicates the widest angle of view (original), and 5 the narrowest angle of view (zoomed in). + */ +- (NSInteger)setCameraZoomRatio:(CGFloat)zoomRatio; + +/** + * 1.5 Querying whether automatic face detection is supported (for mobile OS) + */ +- (BOOL)isAutoFocusEnabled; + +/** + * 1.6 Enabling auto focus (for mobile OS) + * + * After auto focus is enabled, the camera will automatically detect and always focus on faces. + */ +- (NSInteger)enableCameraAutoFocus:(BOOL)enabled; + +/** + * 1.7 Adjusting the focus (for mobile OS) + * + * This API can be used to achieve the following: + * 1. A user can tap on the camera preview. + * 2. A rectangle will appear where the user taps, indicating the spot the camera will focus on. + * 3. The user passes the coordinates of the spot to the SDK using this API, and the SDK will instruct the camera to focus as required. + * @note Before using this API, you must first disable auto focus using {@link enableCameraAutoFocus}. + * @param position The spot to focus on. Pass in the coordinates of the spot you want to focus on. + * @return 0: operation successful; negative number: operation failed. + */ +- (NSInteger)setCameraFocusPosition:(CGPoint)position; + +/** + * 1.8 Querying whether flash is supported (for mobile OS) + */ +- (BOOL)isCameraTorchSupported; + +/** + * 1.8 Enabling/Disabling flash, i.e., the torch mode (for mobile OS) + */ +- (NSInteger)enableCameraTorch:(BOOL)enabled; + +/** + * 1.9 Setting the audio route (for mobile OS) + * + * A mobile phone has two audio playback devices: the receiver at the top and the speaker at the bottom. + * If the audio route is set to the receiver, the volume is relatively low, and audio can be heard only when the phone is put near the ear. This mode has a high level of privacy and is suitable for answering calls. + * If the audio route is set to the speaker, the volume is relatively high, and there is no need to put the phone near the ear. This mode enables the "hands-free" feature. + */ +- (NSInteger)setAudioRoute:(TXAudioRoute)route; +#endif + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Device APIs for desktop OS (Windows & macOS) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Device APIs for desktop OS +/// @{ + +/** + * 2.1 Getting the device list (for desktop OS) + * + * @param type Device type. Set it to the type of device you want to get. For details, please see the definition of `TXMediaDeviceType`. + * @note + * - To ensure that the SDK can manage the lifecycle of the `ITXDeviceCollection` object, after using this API, please call the `release` method to release the resources. + * - Do not use `delete` to release the Collection object returned as deleting the ITXDeviceCollection* pointer will cause crash. + * - The valid values of `type` are `TXMediaDeviceTypeMic`, `TXMediaDeviceTypeSpeaker`, and `TXMediaDeviceTypeCamera`. + * - This API can be used only on macOS and Windows. + */ +#if !TARGET_OS_IPHONE && TARGET_OS_MAC +- (NSArray *_Nullable)getDevicesList:(TXMediaDeviceType)type; + +/** + * 2.2 Setting the device to use (for desktop OS) + * + * @param type Device type. For details, please see the definition of `TXMediaDeviceType`. + * @param deviceId Device ID. You can get the ID of a device using the {@link getDevicesList} API. + * @return 0: operation successful; negative number: operation failed. + */ +- (NSInteger)setCurrentDevice:(TXMediaDeviceType)type deviceId:(NSString *)deviceId; + +/** + * 2.3 Getting the device currently in use (for desktop OS) + */ +- (TXMediaDeviceInfo *_Nullable)getCurrentDevice:(TXMediaDeviceType)type; + +/** + * 2.4 Setting the volume of the current device (for desktop OS) + * + * This API is used to set the capturing volume of the mic or playback volume of the speaker, but not the volume of the camera. + * @param volume Volume. Value range: 0-100; default: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ +- (NSInteger)setCurrentDeviceVolume:(NSInteger)volume deviceType:(TXMediaDeviceType)type; + +/** + * 2.5 Getting the volume of the current device (for desktop OS) + * + * This API is used to get the capturing volume of the mic or playback volume of the speaker, but not the volume of the camera. + */ +- (NSInteger)getCurrentDeviceVolume:(TXMediaDeviceType)type; + +/** + * 2.6 Muting the current device (for desktop OS) + * + * This API is used to mute the mic or speaker, but not the camera. + */ +- (NSInteger)setCurrentDeviceMute:(BOOL)mute deviceType:(TXMediaDeviceType)type; + +/** + * 2.7 Querying whether the current device is muted (for desktop OS) + * + * This API is used to query whether the mic or speaker is muted. Camera muting is not supported. + */ +- (BOOL)getCurrentDeviceMute:(TXMediaDeviceType)type; + +/** + * 2.8 Starting camera testing (for desktop OS) + * + * @note You can use the {@link setCurrentDevice} API to switch between cameras during testing. + */ +- (NSInteger)startCameraDeviceTest:(NSView *)view; + +/** + * 2.9 Ending camera testing (for desktop OS) + */ +- (NSInteger)stopCameraDeviceTest; + +/** + * 2.10 Starting mic testing (for desktop OS) + * + * This API is used to test whether the mic functions properly. The mic volume detected (value range: 0-100) is returned via a callback. + * @param interval Interval of volume callbacks + */ +- (NSInteger)startMicDeviceTest:(NSInteger)interval testEcho:(void (^)(NSInteger volume))testEcho; + +/** + * 2.11 Ending mic testing (for desktop OS) + */ +- (NSInteger)stopMicDeviceTest; + +/** + * 2.12 Starting speaker testing (for desktop OS) + * + * This API is used to test whether the audio playback device functions properly by playing a specified audio file. If users can hear audio during testing, the device functions properly. + * @param filePath Path of the audio file + */ +- (NSInteger)startSpeakerDeviceTest:(NSString *)audioFilePath onVolumeChanged:(void (^)(NSInteger volume, BOOL isLastFrame))volumeBlock; + +/** + * 2.13 Ending speaker testing (for desktop OS) + */ +- (NSInteger)stopSpeakerDeviceTest; + +/** + * 2.14 set onDeviceChanged callback (for Mac) + */ +- (void)setObserver:(nullable id)observer; +#endif + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Disused APIs (the corresponding new APIs are recommended) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Disused APIs (the corresponding new APIs are recommended) +/// @{ + +/** + * Setting the system volume type (for mobile OS) + * + * @deprecated This API is not recommended after v9.5. Please use the `startLocalAudio(quality)` API in {@link TRTCCloud} instead, which param `quality` is used to decide audio quality. + */ +#if TARGET_OS_IPHONE +- (NSInteger)setSystemVolumeType:(TXSystemVolumeType)type __attribute__((deprecated("use TRTCCloud#startLocalAudio:quality instead"))); +#endif + +/// @} +@end +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVBuffer.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVBuffer.h new file mode 100644 index 0000000..5ca84ea --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVBuffer.h @@ -0,0 +1,49 @@ + + +#ifndef TXLiteAVBuffer_h +#define TXLiteAVBuffer_h + +#include +#include +#include + +namespace liteav { + +/** + * Buffer 数据类型 + */ +class TXLiteAVBuffer { + +public: + virtual ~TXLiteAVBuffer() {} + + /** + * 获取 buffer 的内存地址 + */ + virtual uint8_t * data() = 0; + + /** + * 获取 buffer 的内存地址 + */ + virtual const uint8_t * cdata() const = 0; + + /** + * 获取 buffer 的内存size + */ + virtual size_t size() const = 0; + + /** + * 设置 buffe 的有效数据 size + * 如果此 size 超过当前 capacity,会造成重新分配内存,并复制数据 + */ + virtual void SetSize(size_t size) = 0; + + /** + * 确保 buffer 分配的内存空间足够,不用多次分配拷贝内存。此方法会引起内存分配,data / cdata 方法获取的指针失效 + * @param capacity buffer 预分配的内存size + */ + virtual void EnsureCapacity(size_t capacity) = 0; +}; +} + +#endif /* TXLiteAVBuffer_h */ diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVCode.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVCode.h new file mode 100644 index 0000000..d518a35 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVCode.h @@ -0,0 +1,430 @@ +#ifndef __TXLITEAVCODE_H__ +#define __TXLITEAVCODE_H__ + +///////////////////////////////////////////////////////////////////////////////// +// +// 错误码 +// +///////////////////////////////////////////////////////////////////////////////// + +typedef enum TXLiteAVError +{ + ///////////////////////////////////////////////////////////////////////////////// + // + // 基础错误码 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_NULL = 0, ///< 无错误 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 进房(enterRoom)相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##onEnterRoom() 和 TRTCCloudDelegate##OnError() 通知 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_ROOM_ENTER_FAIL = -3301, ///< 进入房间失败,请查看 onError 中的 -3301 对应的 msg 提示确认失败原因 + ERR_ROOM_REQUEST_IP_TIMEOUT = -3307, ///< 请求 IP 和 sig 超时,请检查网络是否正常,或网络防火墙是否放行 UDP。可尝试访问下列 IP:162.14.22.165:8000 162.14.6.105:8000 和域名:default-query.trtc.tencent-cloud.com:8000 + ERR_ROOM_REQUEST_ENTER_ROOM_TIMEOUT = -3308, ///< 请求进房超时,请检查是否断网或者是否开启vpn,您也可以切换4G进行测试确认 + ERR_ENTER_ROOM_PARAM_NULL = -3316, ///< 进房参数为空,请检查: enterRoom:appScene: 接口调用是否传入有效的 param + ERR_SDK_APPID_INVALID = -3317, ///< 进房参数 sdkAppId 错误,请检查 TRTCParams.sdkAppId 是否为空 + ERR_ROOM_ID_INVALID = -3318, ///< 进房参数 roomId 错误,请检查 TRTCParams.roomId 或 TRTCParams.strRoomId 是否为空,注意 roomId 和 strRoomId 不可混用 + ERR_USER_ID_INVALID = -3319, ///< 进房参数 userId 不正确,请检查 TRTCParams.userId 是否为空 + ERR_USER_SIG_INVALID = -3320, ///< 进房参数 userSig 不正确,请检查 TRTCParams.userSig 是否为空 + ERR_ROOM_REQUEST_ENTER_ROOM_REFUSED = -3340, ///< 进房请求被拒绝,请检查是否连续调用 enterRoom 进入相同 Id 的房间 + ERR_SERVER_INFO_PRIVILEGE_FLAG_ERROR = -100006, ///< 您开启了高级权限控制,但参数 TRTCParams.privateMapKey 校验失败,您可参考 https://cloud.tencent.com/document/product/647/32240 进行检查 + ERR_SERVER_INFO_SERVICE_SUSPENDED = -100013, ///< 服务不可用。请检查:套餐包剩余分钟数是否大于0,腾讯云账号是否欠费。您可参考 https://cloud.tencent.com/document/product/647/50492 进行查看与配置 + ERR_SERVER_INFO_ECDH_GET_TINYID = -100018, ///< UserSig 校验失败,请检查参数 TRTCParams.userSig 是否填写正确,或是否已经过期。您可参考 https://cloud.tencent.com/document/product/647/50686 进行校验 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 退房(exitRoom)相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_ROOM_REQUEST_QUIT_ROOM_TIMEOUT = -3325, ///< 请求退房超时 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 设备(摄像头、麦克风、扬声器)相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 + // 区段:-6000 ~ -6999 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_CAMERA_START_FAIL = -1301, ///< 打开摄像头失败,例如在 Windows 或 Mac 设备,摄像头的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 + ERR_CAMERA_NOT_AUTHORIZED = -1314, ///< 摄像头设备未授权,通常在移动设备出现,可能是权限被用户拒绝了 + ERR_CAMERA_SET_PARAM_FAIL = -1315, ///< 摄像头参数设置出错(参数不支持或其它) + ERR_CAMERA_OCCUPY = -1316, ///< 摄像头正在被占用中,可尝试打开其他摄像头 + ERR_MIC_START_FAIL = -1302, ///< 打开麦克风失败,例如在 Windows 或 Mac 设备,麦克风的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 + ERR_MIC_NOT_AUTHORIZED = -1317, ///< 麦克风设备未授权,通常在移动设备出现,可能是权限被用户拒绝了 + ERR_MIC_SET_PARAM_FAIL = -1318, ///< 麦克风设置参数失败 + ERR_MIC_OCCUPY = -1319, ///< 麦克风正在被占用中,例如移动设备正在通话时,打开麦克风会失败 + ERR_MIC_STOP_FAIL = -1320, ///< 停止麦克风失败 + ERR_SPEAKER_START_FAIL = -1321, ///< 打开扬声器失败,例如在 Windows 或 Mac 设备,扬声器的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 + ERR_SPEAKER_SET_PARAM_FAIL = -1322, ///< 扬声器设置参数失败 + ERR_SPEAKER_STOP_FAIL = -1323, ///< 停止扬声器失败 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 系统声音采集相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##onSystemAudioLoopbackError() 通知 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_AUDIO_PLUGIN_START_FAIL = -1330, ///< 开启系统声音录制失败,例如音频驱动插件不可用 + ERR_AUDIO_PLUGIN_INSTALL_NOT_AUTHORIZED = -1331, ///< 安装音频驱动插件未授权 + ERR_AUDIO_PLUGIN_INSTALL_FAILED = -1332, ///< 安装音频驱动插件失败 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 屏幕分享相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_SCREEN_CAPTURE_START_FAIL = -1308, ///< 开始录屏失败,如果在移动设备出现,可能是权限被用户拒绝了,如果在 Windows 或 Mac 系统的设备出现,请检查录屏接口的参数是否符合要求 + ERR_SCREEN_CAPTURE_UNSURPORT = -1309, ///< 录屏失败,在 Android 平台,需要5.0以上的系统,在 iOS 平台,需要11.0以上的系统 + ERR_SERVER_CENTER_NO_PRIVILEDGE_PUSH_SUB_VIDEO = -102015, ///< 没有权限上行辅路 + ERR_SERVER_CENTER_ANOTHER_USER_PUSH_SUB_VIDEO = -102016, ///< 其他用户正在上行辅路 + ERR_SCREEN_CAPTURE_STOPPED = -7001, ///< 录屏被系统中止 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 编解码相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_VIDEO_ENCODE_FAIL = -1303, ///< 视频帧编码失败,例如 iOS 设备切换到其他应用时,硬编码器可能被系统释放,再切换回来时,硬编码器重启前,可能会抛出 + ERR_UNSUPPORTED_RESOLUTION = -1305, ///< 不支持的视频分辨率 + ERR_AUDIO_ENCODE_FAIL = -1304, ///< 音频帧编码失败,例如传入自定义音频数据,SDK 无法处理 + ERR_UNSUPPORTED_SAMPLERATE = -1306, ///< 不支持的音频采样率 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 自定义采集相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_PIXEL_FORMAT_UNSUPPORTED = -1327, ///< 设置的 pixel format 不支持 + ERR_BUFFER_TYPE_UNSUPPORTED = -1328, ///< 设置的 buffer type 不支持 + + ///////////////////////////////////////////////////////////////////////////////// + // + // CDN 绑定和混流相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##onStartPublishing() 和 TRTCCloudDelegate##onSetMixTranscodingConfig 通知。 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_PUBLISH_CDN_STREAM_REQUEST_TIME_OUT = -3321, ///< 旁路转推请求超时 + ERR_CLOUD_MIX_TRANSCODING_REQUEST_TIME_OUT = -3322, ///< 云端混流请求超时 + ERR_PUBLISH_CDN_STREAM_SERVER_FAILED = -3323, ///< 旁路转推回包异常 + ERR_CLOUD_MIX_TRANSCODING_SERVER_FAILED = -3324, ///< 云端混流回包异常 + ERR_ROOM_REQUEST_START_PUBLISHING_TIMEOUT = -3333, ///< 开始向腾讯云的直播 CDN 推流信令超时 + ERR_ROOM_REQUEST_START_PUBLISHING_ERROR = -3334, ///< 开始向腾讯云的直播 CDN 推流信令异常 + ERR_ROOM_REQUEST_STOP_PUBLISHING_TIMEOUT = -3335, ///< 停止向腾讯云的直播 CDN 推流信令超时 + ERR_ROOM_REQUEST_STOP_PUBLISHING_ERROR = -3336, ///< 停止向腾讯云的直播 CDN 推流信令异常 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 跨房连麦(ConnectOtherRoom)相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##onConnectOtherRoom() 通知。 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_ROOM_REQUEST_CONN_ROOM_TIMEOUT = -3326, ///< 请求连麦超时 + ERR_ROOM_REQUEST_DISCONN_ROOM_TIMEOUT = -3327, ///< 请求退出连麦超时 + ERR_ROOM_REQUEST_CONN_ROOM_INVALID_PARAM = -3328, ///< 无效参数 + ERR_CONNECT_OTHER_ROOM_AS_AUDIENCE = -3330, ///< 当前是观众角色,不能请求或断开跨房连麦,需要先 switchRole() 到主播 + ERR_SERVER_CENTER_CONN_ROOM_NOT_SUPPORT = -102031, ///< 不支持跨房间连麦 + ERR_SERVER_CENTER_CONN_ROOM_REACH_MAX_NUM = -102032, ///< 达到跨房间连麦上限 + ERR_SERVER_CENTER_CONN_ROOM_REACH_MAX_RETRY_TIMES = -102033, ///< 跨房间连麦重试次数耗尽 + ERR_SERVER_CENTER_CONN_ROOM_REQ_TIMEOUT = -102034, ///< 跨房间连麦请求超时 + ERR_SERVER_CENTER_CONN_ROOM_REQ = -102035, ///< 跨房间连麦请求格式错误 + ERR_SERVER_CENTER_CONN_ROOM_NO_SIG = -102036, ///< 跨房间连麦无签名 + ERR_SERVER_CENTER_CONN_ROOM_DECRYPT_SIG = -102037, ///< 跨房间连麦签名解密失败 + ERR_SERVER_CENTER_CONN_ROOM_NO_KEY = -102038, ///< 未找到跨房间连麦签名解密密钥 + ERR_SERVER_CENTER_CONN_ROOM_PARSE_SIG = -102039, ///< 跨房间连麦签名解析错误 + ERR_SERVER_CENTER_CONN_ROOM_INVALID_SIG_TIME = -102040, ///< 跨房间连麦签名时间戳错误 + ERR_SERVER_CENTER_CONN_ROOM_SIG_GROUPID = -102041, ///< 跨房间连麦签名不匹配 + ERR_SERVER_CENTER_CONN_ROOM_NOT_CONNED = -102042, ///< 本房间无连麦 + ERR_SERVER_CENTER_CONN_ROOM_USER_NOT_CONNED = -102043, ///< 本用户未发起连麦 + ERR_SERVER_CENTER_CONN_ROOM_FAILED = -102044, ///< 跨房间连麦失败 + ERR_SERVER_CENTER_CONN_ROOM_CANCEL_FAILED = -102045, ///< 取消跨房间连麦失败 + ERR_SERVER_CENTER_CONN_ROOM_CONNED_ROOM_NOT_EXIST = -102046, ///< 被连麦房间不存在 + ERR_SERVER_CENTER_CONN_ROOM_CONNED_REACH_MAX_ROOM = -102047, ///< 被连麦房间达到连麦上限 + ERR_SERVER_CENTER_CONN_ROOM_CONNED_USER_NOT_EXIST = -102048, ///< 被连麦用户不存在 + ERR_SERVER_CENTER_CONN_ROOM_CONNED_USER_DELETED = -102049, ///< 被连麦用户已被删除 + ERR_SERVER_CENTER_CONN_ROOM_CONNED_USER_FULL = -102050, ///< 被连麦用户达到资源上限 + ERR_SERVER_CENTER_CONN_ROOM_INVALID_SEQ = -102051, ///< 连麦请求序号错乱 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 客户无需关心的内部错误码 + // + ///////////////////////////////////////////////////////////////////////////////// + + // - Remove From Head + ERR_RTMP_PUSH_NET_DISCONNECT = -1307, ///< 直播,推流出现网络断开,且经过多次重试无法恢复 + ERR_RTMP_PUSH_INVALID_ADDRESS = -1313, ///< 直播,推流地址非法,例如不是 RTMP 协议的地址 + ERR_RTMP_PUSH_NET_ALLADDRESS_FAIL = -1324, ///< 直播,连接推流服务器失败(若支持智能选路,IP 全部失败) + ERR_RTMP_PUSH_NO_NETWORK = -1325, ///< 直播,网络不可用,请确认 Wi-Fi、移动数据或者有线网络是否正常 + ERR_RTMP_PUSH_SERVER_REFUSE = -1326, ///< 直播,服务器拒绝连接请求,可能是该推流地址已经被占用,或者 TXSecret 校验失败,或者是过期了,或者是欠费了 + + ERR_PLAY_LIVE_STREAM_NET_DISCONNECT = -2301, ///< 直播,网络断连,且经多次重连抢救无效,可以放弃治疗,更多重试请自行重启播放 + ERR_GET_RTMP_ACC_URL_FAIL = -2302, ///< 直播,获取加速拉流的地址失败 + ERR_FILE_NOT_FOUND = -2303, ///< 播放的文件不存在 + ERR_HEVC_DECODE_FAIL = -2304, ///< H265 解码失败 + ERR_VOD_DECRYPT_FAIL = -2305, ///< 点播,音视频流解密失败 + ERR_GET_VODFILE_MEDIAINFO_FAIL = -2306, ///< 点播,获取点播文件信息失败 + ERR_PLAY_LIVE_STREAM_SWITCH_FAIL = -2307, ///< 直播,切流失败(切流可以播放不同画面大小的视频) + ERR_PLAY_LIVE_STREAM_SERVER_REFUSE = -2308, ///< 直播,服务器拒绝连接请求 + ERR_RTMP_ACC_FETCH_STREAM_FAIL = -2309, ///< 直播,RTMPACC 低延时拉流失败,且经过多次重试无法恢复 + ERR_HEVC_ENCODE_FAIL = -2310, ///< 265编码失败 + ERR_HEVC_ENCODE_NOT_SUPPORT = -2311, ///< 265编码判断不支持 + ERR_HEVC_SOFTDECODER_START_FAIL = -2312, ///< 265软解启动失败 + + ERR_ROOM_HEARTBEAT_FAIL = -3302, ///< 心跳失败,客户端定时向服务器发送数据包,告诉服务器自己活着,这个错误通常是发包超时 + ERR_ROOM_REQUEST_IP_FAIL = -3303, ///< 拉取接口机服务器地址失败 + ERR_ROOM_CONNECT_FAIL = -3304, ///< 连接接口机服务器失败 + ERR_ROOM_REQUEST_AVSEAT_FAIL = -3305, ///< 请求视频位失败 + ERR_ROOM_REQUEST_TOKEN_HTTPS_TIMEOUT = -3306, ///< 请求 token HTTPS 超时,请检查网络是否正常,或网络防火墙是否放行 HTTPS 访问 official.opensso.tencent-cloud.com:443 + ERR_ROOM_REQUEST_VIDEO_FLAG_TIMEOUT = -3309, ///< 请求视频位超时 + ERR_ROOM_REQUEST_VIDEO_DATA_ROOM_TIMEOUT = -3310, ///< 请求视频数据超时 + ERR_ROOM_REQUEST_CHANGE_ABILITY_TIMEOUT = -3311, ///< 请求修改视频能力项超时 + ERR_ROOM_REQUEST_STATUS_REPORT_TIMEOUT = -3312, ///< 请求状态上报超时 + ERR_ROOM_REQUEST_CLOSE_VIDEO_TIMEOUT = -3313, ///< 请求关闭视频超时 + ERR_ROOM_REQUEST_SET_RECEIVE_TIMEOUT = -3314, ///< 请求接收视频项超时 + ERR_ROOM_REQUEST_TOKEN_INVALID_PARAMETER = -3315, ///< 请求 token 无效参数,请检查 TRTCParams.userSig 是否填写正确 + ERR_ROOM_REQUEST_EXIT_ROOM_WHEN_ENTERING_ROOM = -3341, ///< 进房尚未成功时,收到了退房请求 + + ERR_ROOM_REQUEST_AES_TOKEN_RETURN_ERROR = -3329, ///< 请求 AES TOKEN 时,server 返回的内容是空的 + ERR_ACCIP_LIST_EMPTY = -3331, ///< 请求接口机 IP 返回的列表为空的 + ERR_ROOM_REQUEST_SEND_JSON_CMD_TIMEOUT = -3332, ///< 请求发送Json 信令超时 + + // Info 服务器(查询接口机 IP), 服务器错误码,数值范围[-100000, -110000] + ERR_SERVER_INFO_UNPACKING_ERROR = -100000, ///< server 解包错误,可能请求数据被篡改 + ERR_SERVER_INFO_TOKEN_ERROR = -100001, ///< TOKEN 错误 + ERR_SERVER_INFO_ALLOCATE_ACCESS_FAILED = -100002, ///< 分配接口机错误 + ERR_SERVER_INFO_GENERATE_SIGN_FAILED = -100003, ///< 生成签名错误 + ERR_SERVER_INFO_TOKEN_TIMEOUT = -100004, ///< HTTPS token 超时 + ERR_SERVER_INFO_INVALID_COMMAND = -100005, ///< 无效的命令字 + ERR_SERVER_INFO_GENERATE_KEN_ERROR = -100007, ///< HTTPS 请求时,生成加密 key 错误 + ERR_SERVER_INFO_GENERATE_TOKEN_ERROR = -100008, ///< HTTPS 请求时,生成 token 错误 + ERR_SERVER_INFO_DATABASE = -100009, ///< 数据库查询失败(房间相关存储信息) + ERR_SERVER_INFO_BAD_ROOMID = -100010, ///< 房间号错误 + ERR_SERVER_INFO_BAD_SCENE_OR_ROLE = -100011, ///< 场景或角色错误 + ERR_SERVER_INFO_ROOMID_EXCHANGE_FAILED = -100012, ///< 房间号转换出错 + ERR_SERVER_INFO_STRGROUP_HAS_INVALID_CHARS = -100014, ///< 房间号非法 + ERR_SERVER_INFO_LACK_SDKAPPID = -100015, ///< 非法SDKAppid + ERR_SERVER_INFO_INVALID = -100016, ///< 无效请求, 分配接口机失败 + ERR_SERVER_INFO_ECDH_GET_KEY = -100017, ///< 生成公钥失败 + + // Access 接口机 + ERR_SERVER_ACC_TOKEN_TIMEOUT = -101000, ///< token 过期 + ERR_SERVER_ACC_SIGN_ERROR = -101001, ///< 签名错误 + ERR_SERVER_ACC_SIGN_TIMEOUT = -101002, ///< 签名超时 + ERR_SERVER_ACC_ROOM_NOT_EXIST = -101003, ///< 房间不存在 + ERR_SERVER_ACC_ROOMID = -101004, ///< 后台房间标识 roomId 错误 + ERR_SERVER_ACC_LOCATIONID = -101005, ///< 后台用户位置标识 locationId 错误 + ERR_SERVER_ACC_TOKEN_EORROR = -101006, ///< token里面的tinyid和进房信令tinyid不同 或是 进房信令没有token + + // Center 服务器(信令和流控处理等任务) + ERR_SERVER_CENTER_SYSTEM_ERROR = -102000, ///< 后台错误 + + ERR_SERVER_CENTER_INVALID_ROOMID = -102001, ///< 无效的房间 Id + ERR_SERVER_CENTER_CREATE_ROOM_FAILED = -102002, ///< 创建房间失败 + ERR_SERVER_CENTER_SIGN_ERROR = -102003, ///< 签名错误 + ERR_SERVER_CENTER_SIGN_TIMEOUT = -102004, ///< 签名过期 + ERR_SERVER_CENTER_ROOM_NOT_EXIST = -102005, ///< 房间不存在 + ERR_SERVER_CENTER_ADD_USER_FAILED = -102006, ///< 房间添加用户失败 + ERR_SERVER_CENTER_FIND_USER_FAILED = -102007, ///< 查找用户失败 + ERR_SERVER_CENTER_SWITCH_TERMINATION_FREQUENTLY = -102008, ///< 频繁切换终端 + ERR_SERVER_CENTER_LOCATION_NOT_EXIST = -102009, ///< locationid 错误 + ERR_SERVER_CENTER_NO_PRIVILEDGE_CREATE_ROOM = -102010, ///< 没有权限创建房间 + ERR_SERVER_CENTER_NO_PRIVILEDGE_ENTER_ROOM = -102011, ///< 没有权限进入房间 + ERR_SERVER_CENTER_INVALID_PARAMETER_SUB_VIDEO = -102012, ///< 辅路抢视频位、申请辅路请求类型参数错误 + ERR_SERVER_CENTER_NO_PRIVILEDGE_PUSH_VIDEO = -102013, ///< 没有权限上视频 + ERR_SERVER_CENTER_ROUTE_TABLE_ERROR = -102014, ///< 没有空闲路由表 + ERR_SERVER_CENTER_NOT_PUSH_SUB_VIDEO = -102017, ///< 当前用户没有上行辅路 + ERR_SERVER_CENTER_USER_WAS_DELETED = -102018, ///< 用户被删除状态 + ERR_SERVER_CENTER_NO_PRIVILEDGE_REQUEST_VIDEO = -102019, ///< 没有权限请求视频 + ERR_SERVER_CENTER_INVALID_PARAMETER = -102023, ///< 进房参数 bussInfo 错误 + ERR_SERVER_CENTER_I_FRAME_UNKNOW_TYPE = -102024, ///< 请求 I 帧未知 opType + ERR_SERVER_CENTER_I_FRAME_INVALID_PACKET = -102025, ///< 请求 I 帧包格式错误 + ERR_SERVER_CENTER_I_FRAME_DEST_USER_NOT_EXIST = -102026, ///< 请求 I 帧目标用户不存在 + ERR_SERVER_CENTER_I_FRAME_ROOM_TOO_BIG = -102027, ///< 请求 I 帧房间用户太多 + ERR_SERVER_CENTER_I_FRAME_RPS_INVALID_PARAMETER = -102028, ///< 请求 I 帧参数错误 + ERR_SERVER_CENTER_INVALID_ROOM_ID = -102029, ///< 房间号非法 + ERR_SERVER_CENTER_ROOM_ID_TOO_LONG = -102030, ///< 房间号超过限制 + ERR_SERVER_CENTER_ROOM_FULL = -102052, ///< 房间满员 + ERR_SERVER_CENTER_DECODE_JSON_FAIL = -102053, ///< JSON 串解析失败 + ERR_SERVER_CENTER_UNKNOWN_SUB_CMD = -102054, ///< 未定义命令字 + ERR_SERVER_CENTER_INVALID_ROLE = -102055, ///< 未定义角色 + ERR_SERVER_CENTER_REACH_PROXY_MAX = -102056, ///< 代理机超出限制 + ERR_SERVER_CENTER_RECORDID_STORE = -102057, ///< 无法保存用户自定义 recordId + ERR_SERVER_CENTER_PB_SERIALIZE = -102058, ///< Protobuf 序列化错误 + + ERR_SERVER_SSO_SIG_EXPIRED = -70001, ///< sig 过期,请尝试重新生成。如果是刚生成,就过期,请检查有效期填写的是否过小,或者填的 0 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_1 = -70003, ///< sig 校验失败,请确认下 sig 内容是否被截断,如缓冲区长度不够导致的内容截断 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_2 = -70004, ///< sig 校验失败,请确认下 sig 内容是否被截断,如缓冲区长度不够导致的内容截断 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_3 = -70005, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_4 = -70006, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_5 = -70007, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_6 = -70008, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_7 = -70009, ///< 用业务公钥验证 sig 失败,请确认生成的 usersig 使用的私钥和 sdkAppId 是否对应 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_8 = -70010, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 + ERR_SERVER_SSO_SIG_VERIFICATION_ID_NOT_MATCH = -70013, ///< sig 中 identifier 与请求时的 identifier 不匹配,请检查登录时填写的 identifier 与 sig 中的是否一致 + ERR_SERVER_SSO_APPID_NOT_MATCH = -70014, ///< sig 中 sdkAppId 与请求时的 sdkAppId 不匹配,请检查登录时填写的 sdkAppId 与 sig 中的是否一致 + ERR_SERVER_SSO_VERIFICATION_EXPIRED = -70017, ///< 内部第三方票据验证超时,请重试,如多次重试不成功,请@TLS 帐号支持,QQ 3268519604 + ERR_SERVER_SSO_VERIFICATION_FAILED = -70018, ///< 内部第三方票据验证超时,请重试,如多次重试不成功,请@TLS 帐号支持,QQ 3268519604 + + ERR_SERVER_SSO_APPID_NOT_FOUND = -70020, ///< sdkAppId 未找到,请确认是否已经在腾讯云上配置 + ERR_SERVER_SSO_ACCOUNT_IN_BLACKLIST = -70051, ///< 帐号已被拉入黑名单,请联系 TLS 帐号支持 QQ 3268519604 + ERR_SERVER_SSO_SIG_INVALID = -70052, ///< usersig 已经失效,请重新生成,再次尝试 + ERR_SERVER_SSO_LIMITED_BY_SECURITY = -70114, ///< 安全原因被限制 + ERR_SERVER_SSO_INVALID_LOGIN_STATUS = -70221, ///< 登录状态无效,请使用 usersig 重新鉴权 + ERR_SERVER_SSO_APPID_ERROR = -70252, ///< sdkAppId 填写错误 + ERR_SERVER_SSO_TICKET_VERIFICATION_FAILED = -70346, ///< 票据校验失败,请检查各项参数是否正确 + ERR_SERVER_SSO_TICKET_EXPIRED = -70347, ///< 票据因过期原因校验失败 + ERR_SERVER_SSO_ACCOUNT_EXCEED_PURCHASES = -70398, ///< 创建账号数量超过已购买预付费数量限制 + ERR_SERVER_SSO_INTERNAL_ERROR = -70500, ///< 服务器内部错误,请重试 + + //秒级监控上报错误码 + ERR_REQUEST_QUERY_CONFIG_TIMEOUT = -4001, ///< 请求通用配置超时 + ERR_CUSTOM_STREAM_INVALID = -4002, ///< 自定义流id错误 + ERR_USER_DEFINE_RECORD_ID_INVALID = -4003, ///< userDefineRecordId错误 + ERR_MIX_PARAM_INVALID = -4004, ///< 混流参数校验失败 + ERR_REQUEST_ACC_BY_HOST_IP = -4005, ///< 通过域名进行0x1请求 + // - /Remove From Head +} TXLiteAVError; + +///////////////////////////////////////////////////////////////////////////////// +// +// 警告码 +// +//> 不需要特别关注,但您可以根据其中某些感兴趣的警告码,对当前用户进行相应的提示 +// +///////////////////////////////////////////////////////////////////////////////// + +typedef enum TXLiteAVWarning +{ + WARNING_HW_ENCODER_START_FAIL = 1103, ///< 硬编码启动出现问题,自动切换到软编码 + WARNING_CURRENT_ENCODE_TYPE_CHANGED = 1104, ///< 当前编码格式, 通过key 为type获取,值为1时是265编码,值为0时是264编码 + WARNING_VIDEO_ENCODER_SW_TO_HW = 1107, ///< 当前 CPU 使用率太高,无法满足软件编码需求,自动切换到硬件编码 + WARNING_INSUFFICIENT_CAPTURE_FPS = 1108, ///< 摄像头采集帧率不足,部分自带美颜算法的 Android 手机上会出现 + WARNING_SW_ENCODER_START_FAIL = 1109, ///< 软编码启动失败 + WARNING_REDUCE_CAPTURE_RESOLUTION = 1110, ///< 摄像头采集分辨率被降低,以满足当前帧率和性能最优解。 + WARNING_CAMERA_DEVICE_EMPTY = 1111, ///< 没有检测到可用的摄像头设备 + WARNING_CAMERA_NOT_AUTHORIZED = 1112, ///< 用户未授权当前应用使用摄像头 + WARNING_MICROPHONE_DEVICE_EMPTY = 1201, ///< 没有检测到可用的麦克风设备 + WARNING_SPEAKER_DEVICE_EMPTY = 1202, ///< 没有检测到可用的扬声器设备 + WARNING_MICROPHONE_NOT_AUTHORIZED = 1203, ///< 用户未授权当前应用使用麦克风 + WARNING_MICROPHONE_DEVICE_ABNORMAL = 1204, ///< 音频采集设备不可用(例如被占用或者PC判定无效设备) + WARNING_SPEAKER_DEVICE_ABNORMAL = 1205, ///< 音频播放设备不可用(例如被占用或者PC判定无效设备) + WARNING_SCREEN_CAPTURE_NOT_AUTHORIZED = 1206, ///< 用户未授权当前应用使用屏幕录制 + WARNING_VIDEO_FRAME_DECODE_FAIL = 2101, ///< 当前视频帧解码失败 + WARNING_AUDIO_FRAME_DECODE_FAIL = 2102, ///< 当前音频帧解码失败 + WARNING_VIDEO_PLAY_LAG = 2105, ///< 当前视频播放出现卡顿 + WARNING_HW_DECODER_START_FAIL = 2106, ///< 硬解启动失败,采用软解码 + WARNING_VIDEO_DECODER_HW_TO_SW = 2108, ///< 当前流硬解第一个 I 帧失败,SDK 自动切软解 + WARNING_SW_DECODER_START_FAIL = 2109, ///< 软解码器启动失败 + WARNING_VIDEO_RENDER_FAIL = 2110, ///< 视频渲染失败 + WARNING_START_CAPTURE_IGNORED = 4000, ///< 已经在采集,启动采集被忽略 + WARNING_AUDIO_RECORDING_WRITE_FAIL = 7001, ///< 音频录制写入文件失败 + WARNING_ROOM_DISCONNECT = 5101, ///< 网络断开连接 + WARNING_IGNORE_UPSTREAM_FOR_AUDIENCE = 6001, ///< 当前是观众角色,忽略上行音视频数据 + WARNING_MICROPHONE_HOWLING_DETECTED = 7002, ///< 录制音频时监测到啸叫。请调节两台客户端之间的距离或降低播放音量,检测到啸叫后,5s后会再次进行重新检测 + + // - Remove From Head + WARNING_NET_BUSY = 1101, ///< 网络状况不佳:上行带宽太小,上传数据受阻 + WARNING_RTMP_SERVER_RECONNECT = 1102, ///< 直播,网络断连, 已启动自动重连(自动重连连续失败超过三次会放弃) + WARNING_LIVE_STREAM_SERVER_RECONNECT = 2103, ///< 直播,网络断连, 已启动自动重连(自动重连连续失败超过三次会放弃) + WARNING_RECV_DATA_LAG = 2104, ///< 网络来包不稳:可能是下行带宽不足,或由于主播端出流不均匀 + WARNING_RTMP_DNS_FAIL = 3001, ///< 直播,DNS 解析失败 + WARNING_RTMP_SEVER_CONN_FAIL = 3002, ///< 直播,服务器连接失败 + WARNING_RTMP_SHAKE_FAIL = 3003, ///< 直播,与 RTMP 服务器握手失败 + WARNING_RTMP_SERVER_BREAK_CONNECT = 3004, ///< 直播,服务器主动断开 + WARNING_RTMP_READ_WRITE_FAIL = 3005, ///< 直播,RTMP 读/写失败,将会断开连接 + WARNING_RTMP_WRITE_FAIL = 3006, ///< 直播,RTMP 写失败(SDK 内部错误码,不会对外抛出) + WARNING_RTMP_READ_FAIL = 3007, ///< 直播,RTMP 读失败(SDK 内部错误码,不会对外抛出) + WARNING_RTMP_NO_DATA = 3008, ///< 直播,超过30s 没有数据发送,主动断开连接 + WARNING_PLAY_LIVE_STREAM_INFO_CONNECT_FAIL = 3009, ///< 直播,connect 服务器调用失败(SDK 内部错误码,不会对外抛出) + WARNING_NO_STEAM_SOURCE_FAIL = 3010, ///< 直播,连接失败,该流地址无视频(SDK 内部错误码,不会对外抛出) + WARNING_ROOM_RECONNECT = 5102, ///< 网络断连,已启动自动重连 + WARNING_ROOM_NET_BUSY = 5103, ///< 网络状况不佳:上行带宽太小,上传数据受阻 + // - /Remove From Head +} TXLiteAVWarning; + +// - Remove From Head +///////////////////////////////////////////////////////////////////////////////// +// +// (三)事件列表 +// +///////////////////////////////////////////////////////////////////////////////// + +typedef enum TXLiteAVEvent +{ + EVT_RTMP_PUSH_CONNECT_SUCC = 1001, ///< 直播,已经连接 RTMP 推流服务器 + EVT_RTMP_PUSH_BEGIN = 1002, ///< 直播,已经与 RTMP 服务器握手完毕,开始推流 + EVT_CAMERA_START_SUCC = 1003, ///< 打开摄像头成功 + EVT_SCREEN_CAPTURE_SUCC = 1004, ///< 录屏启动成功 + EVT_UP_CHANGE_RESOLUTION = 1005, ///< 上行动态调整分辨率 + EVT_UP_CHANGE_BITRATE = 1006, ///< 码率动态调整 + EVT_FIRST_FRAME_AVAILABLE = 1007, ///< 首帧画面采集完成 + EVT_START_VIDEO_ENCODER = 1008, ///< 编码器启动成功 + EVT_SNAPSHOT_COMPLETE = 1022, ///< 一帧截图完成 + EVT_CAMERA_REMOVED = 1023, ///< 摄像头设备已被移出(Windows 和 Mac 版 SDK 使用) + EVT_CAMERA_AVAILABLE = 1024, ///< 摄像头设备重新可用(Windows 和 Mac 版 SDK 使用) + EVT_CAMERA_CLOSE = 1025, ///< 关闭摄像头完成(Windows 和 Mac 版 SDK 使用) + EVT_RTMP_PUSH_PUBLISH_START = 1026, ///< 直播,与 RTMP 服务器连接后,收到 NetStream.Publish.Start 消息,表明流发布成功(SDK 内部事件,不会对外抛出) + EVT_HW_ENCODER_START_SUCC = 1027, ///< 硬编码器启动成功 + EVT_SW_ENCODER_START_SUCC = 1028, ///< 软编码器启动成功 + EVT_LOCAL_RECORD_RESULT = 1029, ///< 本地录制结果 + EVT_LOCAL_RECORD_PROGRESS = 1030, ///< 本地录制状态通知 + + EVT_PLAY_LIVE_STREAM_CONNECT_SUCC = 2001, ///< 直播,已经连接 RTMP 拉流服务器 + EVT_PLAY_LIVE_STREAM_BEGIN = 2002, ///< 直播,已经与 RTMP 服务器握手完毕,开始拉流 + EVT_RENDER_FIRST_I_FRAME = 2003, ///< 渲染首个视频数据包(IDR) + EVT_VIDEO_PLAY_BEGIN = 2004, ///< 视频播放开始 + EVT_VIDEO_PLAY_PROGRESS = 2005, ///< 视频播放进度 + EVT_VIDEO_PLAY_END = 2006, ///< 视频播放结束 + EVT_VIDEO_PLAY_LOADING = 2007, ///< 视频播放 loading + EVT_START_VIDEO_DECODER = 2008, ///< 解码器启动 + EVT_DOWN_CHANGE_RESOLUTION = 2009, ///< 下行视频分辨率改变 + EVT_GET_VODFILE_MEDIAINFO_SUCC = 2010, ///< 点播,获取点播文件信息成功 + EVT_VIDEO_CHANGE_ROTATION = 2011, ///< 视频旋转角度发生改变 + EVT_PLAY_GET_MESSAGE = 2012, ///< 消息事件 + EVT_VOD_PLAY_PREPARED = 2013, ///< 点播,视频加载完毕 + EVT_VOD_PLAY_LOADING_END = 2014, ///< 点播,loading 结束 + EVT_PLAY_LIVE_STREAM_SWITCH_SUCC = 2015, ///< 直播,切流成功(切流可以播放不同画面大小的视频) + EVT_VOD_PLAY_TCP_CONNECT_SUCC = 2016, ///< 点播,TCP 连接成功(SDK 内部事件,不会对外抛出) + EVT_VOD_PLAY_FIRST_VIDEO_PACKET = 2017, ///< 点播,收到首帧数据(SDK 内部事件,不会对外抛出) + EVT_VOD_PLAY_DNS_RESOLVED = 2018, ///< 点播,DNS 解析完成(SDK 内部事件,不会对外抛出) + EVT_VOD_PLAY_SEEK_COMPLETE = 2019, ///< 点播,视频播放 Seek 完成(SDK 内部事件,不会对外抛出) + EVT_VIDEO_DECODER_CACHE_TOO_MANY_FRAMES = 2020, ///< 视频解码器缓存帧数过多,超过40帧(SDK 内部事件,不会对外抛出) + EVT_HW_DECODER_START_SUCC = 2021, ///< 硬解码器启动成功(SDK 内部事件,不会对外抛出) + EVT_SW_DECODER_START_SUCC = 2022, ///< 软解码器启动成功(SDK 内部事件,不会对外抛出) + EVT_AUDIO_JITTER_STATE_FIRST_LOADING = 2023, ///< 音频首次加载(SDK 内部事件,不会对外抛出) + EVT_AUDIO_JITTER_STATE_LOADING = 2024, ///< 音频正在加载(SDK 内部事件,不会对外抛出) + EVT_AUDIO_JITTER_STATE_PLAYING = 2025, ///< 音频正在播放(SDK 内部事件,不会对外抛出) + EVT_AUDIO_JITTER_STATE_FIRST_PLAY = 2026, ///< 音频首次播放(SDK 内部事件,不会对外抛出) + EVT_MIC_START_SUCC = 2027, ///< 麦克风启动成功 + EVT_PLAY_GET_METADATA = 2028, ///< 视频流MetaData事件 + EVT_MIC_RELEASE_SUCC = 2029, ///< 释放麦克风占用 + EVT_AUDIO_DEVICE_ROUTE_CHANGED = 2030, ///< 音频设备的route发生改变,即当前的输入输出设备发生改变,比如耳机被拔出 + EVT_PLAY_GET_FLVSESSIONKEY = 2031, ///< TXLivePlayer 接收到http响应头中的 flvSessionKey 信息 + EVT_AUDIO_SESSION_INTERRUPT = 2032, ///< Audio Session Interrupt事件 + + + EVT_ROOM_ENTER = 1018, ///< 进入房间成功 + EVT_ROOM_EXIT = 1019, ///< 退出房间 + EVT_ROOM_USERLIST = 1020, ///< 下发房间成员列表(不包括自己) + EVT_ROOM_NEED_REENTER = 1021, ///< WiFi 切换到4G 会触发断线重连,此时需要重新进入房间(拉取最优的服务器地址) + EVT_ROOM_ENTER_FAILED = 1022, ///< 自己进入房间失败 + EVT_ROOM_USER_ENTER = 1031, ///< 进房通知 + EVT_ROOM_USER_EXIT = 1032, ///< 退房通知 + EVT_ROOM_USER_VIDEO_STATE = 1033, ///< 视频状态位变化通知 + EVT_ROOM_USER_AUDIO_STATE = 1034, ///< 音频状态位变化通知 + + EVT_ROOM_REQUEST_IP_SUCC = 8001, ///< 拉取接口机服务器地址成功 + EVT_ROOM_CONNECT_SUCC = 8002, ///< 连接接口机服务器成功 + EVT_ROOM_REQUEST_AVSEAT_SUCC = 8003, ///< 请求视频位成功 +} TXLiteAVEvent; +// - /Remove From Head + +#endif /* __TXLITEAVCODE_H__ */ diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVEncodedDataProcessingListener.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVEncodedDataProcessingListener.h new file mode 100644 index 0000000..33e79d2 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVEncodedDataProcessingListener.h @@ -0,0 +1,54 @@ +/* +* Module: live 编码数据回调 +* +* Function: 回调推流端编码完,和 播放端解码前的数据 +* +*/ + + +#ifndef TXLiteAVEncodedDataProcessingListener_h +#define TXLiteAVEncodedDataProcessingListener_h + +#include +#include "TXLiteAVBuffer.h" + +namespace liteav { + +struct TXLiteAVEncodedData { + const char * userId; // didEncodeVideo 和 didEncodeAudio 回调时,此字段为null; + int streamType; // 视频流类型,参考 TRTCVideoStreamType,audio时,此字段为0 + const liteav::TXLiteAVBuffer * originData; // 原始数据 + liteav::TXLiteAVBuffer * processedData; // 写回处理后的数据 +}; + +class ITXLiteAVEncodedDataProcessingListener { +public: + virtual ~ITXLiteAVEncodedDataProcessingListener() {} + + /** + * 回调编码完的视频数据。 + * @note videoData.userId = nullptr + */ + virtual bool didEncodeVideo(TXLiteAVEncodedData & videoData) { return false; } + + /** + * 回调解码前的视频数据。 + * @note videoData.userId 表示对应的user,当userId 为 nullptr时,表示此时先接收到数据了,对应的userId还未完成同步。获取到userId之后会回调正确的userId + */ + virtual bool willDecodeVideo(TXLiteAVEncodedData & videoData) { return false; } + + /** + * 回调编码完的音频数据。 + * @note audioData.userId = nullptr + */ + virtual bool didEncodeAudio(TXLiteAVEncodedData & audioData) { return false; } + + /** + * 回调解码前的音频数据。 + * @note audioData.userId 表示对应的user,当userId 为 nullptr时,表示此时先接收到数据了,对应的userId还未完成同步。获取到userId之后会回调正确的userId + */ + virtual bool willDecodeAudio(TXLiteAVEncodedData & audioData) { return false; } +}; +} + +#endif /* TXLiteAVEncodedDataProcessingListener_h */ diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSDK.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSDK.h new file mode 100644 index 0000000..a949948 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiteAVSDK.h @@ -0,0 +1,35 @@ +// +// TXLiteAVSDK.h +// TXLiteAVSDK +// +// Created by alderzhang on 2017/6/9. +// Copyright © 2017年 Tencent. All rights reserved. +// + + + +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveAudioSessionDelegate.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveAudioSessionDelegate.h new file mode 100644 index 0000000..b59fcb7 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveAudioSessionDelegate.h @@ -0,0 +1,43 @@ +#ifndef TXLiveAudioSessionDelegate_h +#define TXLiveAudioSessionDelegate_h + +#import + +@protocol TXLiveAudioSessionDelegate +#if TARGET_OS_IPHONE + +@optional +- (BOOL)setActive:(BOOL)active error:(NSError **)outError; + +@optional +- (BOOL)setActive:(BOOL)active withOptions:(AVAudioSessionSetActiveOptions)options error:(NSError **)outError; + +@optional +- (BOOL)setMode:(NSString *)mode error:(NSError **)outError; + +@optional +- (BOOL)setCategory:(NSString *)category error:(NSError **)outError; + +@optional +- (BOOL)setCategory:(NSString *)category withOptions:(AVAudioSessionCategoryOptions)options error:(NSError **)outError; + +@optional +- (BOOL)setCategory:(NSString *)category mode:(NSString *)mode options:(AVAudioSessionCategoryOptions)options error:(NSError **)outError; +@optional +- (BOOL)setPreferredIOBufferDuration:(NSTimeInterval)duration error:(NSError **)outError; + +@optional +- (BOOL)setPreferredSampleRate:(double)sampleRate error:(NSError **)outError; + +@optional +- (BOOL)setPreferredOutputNumberOfChannels:(NSInteger)count error:(NSError **)outError; + +@optional +- (BOOL)overrideOutputAudioPort:(AVAudioSessionPortOverride)portOverride error:(NSError **)outError; + +@optional +- (BOOL)setPreferredInput:(nullable AVAudioSessionPortDescription *)inPort error:(NSError **)outError; + +#endif +@end +#endif /* TXLiveAudioSessionDelegate_h */ diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveBase.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveBase.h new file mode 100644 index 0000000..c72695b --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveBase.h @@ -0,0 +1,141 @@ +#import "TXLiveAudioSessionDelegate.h" + +typedef NS_ENUM(NSInteger, TX_Enum_Type_LogLevel) { + ///输出所有级别的log + LOGLEVEL_VERBOSE = 0, + /// 输出 DEBUG,INFO,WARNING,ERROR 和 FATAL 级别的log + LOGLEVEL_DEBUG = 1, + /// 输出 INFO,WARNING,ERROR 和 FATAL 级别的log + LOGLEVEL_INFO = 2, + /// 只输出WARNING,ERROR 和 FATAL 级别的log + LOGLEVEL_WARN = 3, + /// 只输出ERROR 和 FATAL 级别的log + LOGLEVEL_ERROR = 4, + /// 只输出 FATAL 级别的log + LOGLEVEL_FATAL = 5, + /// 不输出任何sdk log + LOGLEVEL_NULL = 6, +}; + +@protocol TXLiveBaseDelegate +@optional + +/** + @brief Log回调  + @discussion + 1.实现TXLiveBaseDelegate,建议在一个比较早的初始化类中如AppDelegate + 2.在初始化中设置此回调,eg:[TXLiveBase sharedInstance].delegate = self; + 3.level类型参见TX_Enum_Type_LogLevel + 4.module值暂无具体意义,目前为固定值TXLiteAVSDK + */ +- (void)onLog:(NSString*)log LogLevel:(int)level WhichModule:(NSString*)module; + +/** + * @brief NTP 校时回调,调用 TXLiveBase updateNetworkTime 后会触发 + * @param errCode 0:表示校时成功且偏差在30ms以内,1:表示校时成功但偏差可能在 30ms 以上,-1:表示校时失败 + */ +- (void)onUpdateNetworkTime:(int)errCode message:(NSString *)errMsg; + +/** + @brief setLicenceURL 接口回调, result = 0 成功,负数失败。 + @discussion + 需在调用 setLicenceURL 前设置 delegate + */ +- (void)onLicenceLoaded:(int)result Reason:(NSString *)reason; + +@end + +@interface TXLiveBase : NSObject + +/// 通过这个delegate将全部log回调给SDK使用者,由SDK使用者来决定log如何处理 +@property (nonatomic, weak) id delegate; + ++ (instancetype)sharedInstance; + +/** + * 设置 liteav SDK 接入的环境。 + * 腾讯云在全球各地区部署的环境,按照各地区政策法规要求,需要接入不同地区接入点。 + * + * @param env_config 需要接入的环境,SDK 默认接入的环境是:默认正式环境。 + * @return 0:成功;其他:错误 + * + * @note 目标市场为中国大陆的客户请不要调用此接口,如果目标市场为海外用户,请通过技术支持联系我们,了解 env_config 的配置方法,以确保 App 遵守 GDPR 标准。 + */ ++ (int)setGlobalEnv:(const char *)env_config; + +/** + * 设置 log 输出级别 + * @param level 参见 LOGLEVEL + */ ++ (void)setLogLevel:(TX_Enum_Type_LogLevel)level; + +/** + * 启用或禁用控制台日志打印 + * @param enabled 指定是否启用 + */ ++ (void)setConsoleEnabled:(BOOL)enabled; + ++ (void)setAppVersion:(NSString *)verNum; + ++ (void)setAudioSessionDelegate:(id)delegate; + +/** + * @brief 获取 SDK 版本信息 + * @return SDK 版本信息 + */ ++ (NSString *)getSDKVersionStr; + +/** + * @brief 获取 pitu 版本信息 + * @return pitu 版本信息 + */ ++ (NSString *)getPituSDKVersion; + +/** + * @brief 设置 appID,云控使用 + */ ++ (void)setAppID:(NSString*)appID; + +/** + * @brief 设置 sdk 的 Licence 下载 url 和 key + */ ++ (void)setLicenceURL:(NSString *)url key:(NSString *)key; + +/** + * @brief 设置 userId,用于数据上报 + */ ++ (void)setUserId:(NSString *)userId; + +/** + * @brief 获取 Licence 信息 + * @return Licence 信息 + */ ++ (NSString *)getLicenceInfo; + +/** + * @brief 设置外部扩展Dev ID + * @brief 采用键、值对的方式来进行数据传输 + * @return 不合法的 ‘extKey’ 会返回 NO + */ ++ (BOOL)setExtDevID:(NSString *)extKey value:(NSString *)extValue; + +/** + * @brief 设置 HEVC 外部解码器工厂实例 + */ ++ (void)setExternalDecoderFactory:(id)decoderFactory; + +/** + * 启动 NTP 校时服务 + * + * @return 0:启动成功;< 0:启动失败 + */ ++ (NSInteger)updateNetworkTime; + +/** + * 获取 NTP 时间戳(毫秒),请在收到 onUpdateNetworkTime 回调后使用 + * + * @return NTP 时间戳(毫秒),若返回 0:未启动 NTP 校时或校时失败,请重启校时 + */ ++ (NSInteger)getNetworkTimestamp; + +@end diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayConfig.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayConfig.h new file mode 100644 index 0000000..c86260b --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayConfig.h @@ -0,0 +1,103 @@ +/* + * Module: TXLivePlayConfig @ TXLiteAVSDK + * + * Function: 腾讯云直播播放器的参数配置模块 + * + * Version: <:Version:> + */ + +#import + +/// @defgroup TXLivePlayConfig_ios TXLivePlayConfig +/// 腾讯云直播播放器的参数配置模块 +/// @{ + +/** + * 腾讯云直播播放器的参数配置模块 + * + * 主要负责 TXLivePlayer 对应的参数设置,其中绝大多数设置项在播放开始之后再设置是无效的。 + */ +@interface TXLivePlayConfig : NSObject + +///////////////////////////////////////////////////////////////////////////////// +// +// 常用设置项 +// +///////////////////////////////////////////////////////////////////////////////// + +///【字段含义】播放器缓存时间,单位秒,取值需要大于0,默认值:5 +@property(nonatomic, assign) float cacheTime; + +///【字段含义】是否自动调整播放器缓存时间,默认值:YES +/// YES:启用自动调整,自动调整的最大值和最小值可以分别通过修改 maxCacheTime 和 minCacheTime 来设置 +/// NO:关闭自动调整,采用默认的指定缓存时间(1s),可以通过修改 cacheTime 来调整缓存时间 +@property(nonatomic, assign) BOOL bAutoAdjustCacheTime; + +///【字段含义】播放器缓存自动调整的最大时间,单位秒,取值需要大于0,默认值:5 +@property(nonatomic, assign) float maxAutoAdjustCacheTime; + +///【字段含义】播放器缓存自动调整的最小时间,单位秒,取值需要大于0,默认值为1 +@property(nonatomic, assign) float minAutoAdjustCacheTime; + +///【字段含义】播放器视频卡顿报警阈值,单位毫秒 +///【推荐取值】800 +///【特别说明】只有渲染间隔超过这个阈值的卡顿才会有 PLAY_WARNING_VIDEO_PLAY_LAG 通知 +@property(nonatomic, assign) int videoBlockThreshold; + +///【字段含义】播放器遭遇网络连接断开时 SDK 默认重试的次数,取值范围1 - 10,默认值:3。 +@property(nonatomic, assign) int connectRetryCount; + +///【字段含义】网络重连的时间间隔,单位秒,取值范围3 - 30,默认值:3。 +@property(nonatomic, assign) int connectRetryInterval; + +///【字段含义】是否开启回声消除, 默认值为 NO +@property(nonatomic, assign) BOOL enableAEC; + +///【字段含义】是否开启消息通道, 默认值为 NO +@property(nonatomic, assign) BOOL enableMessage; + +///【字段含义】是否开启 MetaData 数据回调,默认值为 NO。 +/// YES:SDK 通过 EVT_PLAY_GET_METADATA 消息抛出视频流的 MetaData 数据; +/// NO:SDK 不抛出视频流的 MetaData 数据。 +/// 标准直播流都会在最开始的阶段有一个 MetaData 数据头,该数据头支持定制。 +/// 您可以通过 TXLivePushConfig 中的 metaData 属性设置一些自定义数据,再通过 TXLivePlayListener 中的 +/// onPlayEvent(EVT_PLAY_GET_METADATA) 消息接收到这些数据。 +///【特别说明】每条音视频流中只能设置一个 MetaData 数据头,除非断网重连,否则 TXLivePlayer 的 EVT_PLAY_GET_METADATA 消息也只会收到一次。 +@property(nonatomic, assign) BOOL enableMetaData; + +///【字段含义】是否开启 HTTP 头信息回调,默认值为 @“” +/// HTTP 响应头中除了“content-length”、“content-type”等标准字段,不同云服务商还可能会添加一些非标准字段。 +/// 比如腾讯云会在直播 CDN 的 HTTP-FLV 格式的直播流中增加 “X-Tlive-SpanId” 响应头,并在其中设置一个随机字符串,用来唯一标识一次直播。 +/// +/// 如果您在使用腾讯云的直播 CDN,可以设置 flvSessionKey 为 @“X-Tlive-SpanId”,SDK 会在 HTTP 响应头里解析这个字段, +/// 并通过 TXLivePlayListener 中的 onPlayEvent(EVT_PLAY_GET_FLVSESSIONKEY) 事件通知给您的 App。 +/// +///【特别说明】每条音视频流中只能解析一个 flvSessionKey,除非断网重连,否则 EVT_PLAY_GET_FLVSESSIONKEY 只会抛送一次。 +@property(nonatomic, copy) NSString* flvSessionKey; + +///【字段含义】视频渲染对象回调的视频格式,默认值:kCVPixelFormatType_420YpCbCr8Planar +///【特别说明】支持:kCVPixelFormatType_420YpCbCr8Planar 和 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange +@property(nonatomic, assign) OSType playerPixelFormatType; + + +///////////////////////////////////////////////////////////////////////////////// +// +// 待废弃设置项 +// +///////////////////////////////////////////////////////////////////////////////// + +///【字段含义】是否开启就近选路,待废弃,默认值:YES +@property(nonatomic, assign) BOOL enableNearestIP; + +///【字段含义】RTMP 传输通道的类型,待废弃,默认值为:RTMP_CHANNEL_TYPE_AUTO +@property (nonatomic, assign) int rtmpChannelType; + +///【字段含义】视频缓存目录,点播 MP4、HLS 有效 +@property NSString *cacheFolderPath; +///【字段含义】最多缓存文件个数,默认值:0 +@property int maxCacheItems; +///【字段含义】自定义 HTTP Headers +@property NSDictionary *headers; + +@end +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayListener.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayListener.h new file mode 100644 index 0000000..03e8629 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayListener.h @@ -0,0 +1,31 @@ +/* + * Module: TXLivePlayListener @ TXLiteAVSDK + * + * Function: 腾讯云直播播放的回调通知 + * + * Version: <:Version:> + */ + +#import +#import "TXLiveSDKTypeDef.h" + +/// @defgroup TXLivePlayListener_ios TXLivePlayListener +/// 腾讯云直播播放的回调通知 +/// @{ +@protocol TXLivePlayListener + +/** + * 直播事件通知 + * @param EvtID 参见 TXLiveSDKEventDef.h + * @param param 参见 TXLiveSDKTypeDef.h + */ +- (void)onPlayEvent:(int)EvtID withParam:(NSDictionary *)param; + +/** + * 网络状态通知 + * @param param 参见 TXLiveSDKTypeDef.h + */ +- (void)onNetStatus:(NSDictionary *)param; + +@end +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayer.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayer.h new file mode 100644 index 0000000..80f5b16 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLivePlayer.h @@ -0,0 +1,373 @@ +/* + * + * Module: TXLivePlayer @ TXLiteAVSDK + * + * Function: 腾讯云直播播放器 + * + * Version: <:Version:> + */ + +#import +#import "TXLiveSDKTypeDef.h" +#import "TXLivePlayListener.h" +#import "TXLivePlayConfig.h" +#import "TXVideoCustomProcessDelegate.h" +#import "TXLiveRecordTypeDef.h" +#import "TXLiveRecordListener.h" +#import "TXAudioRawDataDelegate.h" + +/// @defgroup TXLivePlayer_ios TXLivePlayer +/// 腾讯云直播播放器接口类 +/// @{ + +/** + * 支持的直播和点播类型 + * + * @note 新版本的点播 SDK,推荐参考 TXVodPlayer.h + */ +typedef NS_ENUM(NSInteger, TX_Enum_PlayType) { + /// RTMP 直播 + PLAY_TYPE_LIVE_RTMP = 0, + /// FLV 直播 + PLAY_TYPE_LIVE_FLV = 1, + /// FLV 点播 + PLAY_TYPE_VOD_FLV = 2, + /// HLS 点播 + PLAY_TYPE_VOD_HLS = 3, + /// MP4点播 + PLAY_TYPE_VOD_MP4 = 4, + /// RTMP 直播加速播放 + PLAY_TYPE_LIVE_RTMP_ACC = 5, + /// 本地视频文件 + PLAY_TYPE_LOCAL_VIDEO = 6, +}; + + +/** + * 视频播放器 + * + * 主要负责将直播流的音视频画面进行解码和本地渲染,包含如下技术特点: + * - 针对腾讯云的拉流地址,可使用低延时拉流,实现直播连麦等相关场景。 + * - 针对腾讯云的拉流地址,可使用直播时移功能,能够实现直播观看与时移观看的无缝切换。 + * - 支持自定义的音视频数据处理,让您可以根据项目需要处理直播流中的音视频数据后,进行渲染以及播放。 + */ +@interface TXLivePlayer : NSObject + +///////////////////////////////////////////////////////////////////////////////// +// +// (一)SDK 基础函数 +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name SDK 基础函数 +/// @{ +/** + * 1.1 设置播放回调,见 “TXLivePlayListener.h” 文件中的详细定义 + */ +@property(nonatomic, weak) id delegate; + +/** + * 1.2 设置视频处理回调,见 “TXVideoCustomProcessDelegate.h” 文件中的详细定义 + */ +@property(nonatomic, weak) id videoProcessDelegate; + +/** + * 1.3 设置音频处理回调,见 “TXAudioRawDataDelegate.h” 文件中的详细定义 + */ +@property(nonatomic, weak) id audioRawDataDelegate; + +/** + * 1.4 是否开启硬件加速,默认值:NO + */ +@property(nonatomic, assign) BOOL enableHWAcceleration; + +/** + * 1.5 设置 TXLivePlayConfig 播放配置项,见 “TXLivePlayConfig.h” 文件中的详细定义 + */ +@property(nonatomic, copy) TXLivePlayConfig *config; + +/** + * 1.6 设置短视频录制回调,见 “TXLiveRecordListener.h” 文件中的详细定义 + */ +@property (nonatomic, weak) id recordDelegate; + +/** + * 1.7 startPlay 后是否立即播放,默认 YES,只有点播有效 + */ +@property (nonatomic) BOOL isAutoPlay; + + +/// @} + +///////////////////////////////////////////////////////////////////////////////// +// +// (二)播放基础接口 +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name 播放基础接口 +/// @{ +/** + * 2.1 创建 Video 渲染 View,该控件承载着视频内容的展示。 + * + * 变更历史:1.5.2版本将参数 frame 废弃,设置此参数无效,控件大小与参数 view 的大小保持一致,如需修改控件的大小及位置,请调整父 view 的大小及位置。 参考文档:[绑定渲染界面](https://www.qcloud.com/doc/api/258/4736#step-3.3A-.E7.BB.91.E5.AE.9A.E6.B8.B2.E6.9F.93.E7.95.8C.E9.9D.A2) + * + * @param frame Widget 在父 view 中的 frame + * @param view 父 view + * @param idx Widget 在父 view 上 的层级位置 + */ +- (void)setupVideoWidget:(CGRect)frame containView:(TXView *)view insertIndex:(unsigned int)idx; + +/* + * 修改 VideoWidget frame + * 变更历史:1.5.2版本将此方法废弃,调用此方法无效,如需修改控件的大小及位置,请调整父 view 的大小及位置 + * 参考文档:https://www.qcloud.com/doc/api/258/4736#step-3.3A-.E7.BB.91.E5.AE.9A.E6.B8.B2.E6.9F.93.E7.95.8C.E9.9D.A2 + */ +//- (void)resetVideoWidgetFrame:(CGRect)frame; + +/** + * 2.2 移除 Video 渲染 Widget + */ +- (void)removeVideoWidget; + +/** + * 2.3 启动从指定 URL 播放 RTMP 音视频流 + * + * @param url 完整的 URL(如果播放的是本地视频文件,这里传本地视频文件的完整路径) + * @param playType 播放类型 + * @return 0表示成功,其它为失败 + */ +- (int)startPlay:(NSString *)url type:(TX_Enum_PlayType)playType; + +/** + * 2.4 停止播放音视频流 + * + * @return 0:成功;其它:失败 + */ +- (int)stopPlay; + +/** + * 2.5 是否正在播放 + * + * @return YES 拉流中,NO 没有拉流 + */ +- (BOOL)isPlaying; + +/** + * 2.6 暂停播放 + * + * 适用于点播,直播(此接口会暂停数据拉流,不会销毁播放器,暂停后,播放器会显示最后一帧数据图像) + */ +- (void)pause; + +/** + * 2.6 继续播放,适用于点播,直播 + */ +- (void)resume; + +/// @} + +///////////////////////////////////////////////////////////////////////////////// +// +// (三)视频相关接口 +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name 视频相关接口 +/// @{ +/** + * 3.1 设置画面的方向 + * + * @param rotation 方向 + * @see TX_Enum_Type_HomeOrientation + */ +- (void)setRenderRotation:(TX_Enum_Type_HomeOrientation)rotation; + +/** + * 3.2 设置画面的裁剪模式 + * + * @param renderMode 裁剪 + * @see TX_Enum_Type_RenderMode + */ +- (void)setRenderMode:(TX_Enum_Type_RenderMode)renderMode; + +/** + * 3.3 截屏 + * + * @param snapshotCompletionBlock 通过回调返回当前图像 + */ +- (void)snapshot:(void (^)(TXImage *))snapshotCompletionBlock; + +/** + * 3.4 获取当前渲染帧 pts + * + * @return 0:当前未处于正在播放状态(例如:未起播) + * >0:当前渲染视频帧的 pts,处于正在播放状态 (单位: 毫秒) + */ +- (uint64_t)getCurrentRenderPts; + +/// @} + +///////////////////////////////////////////////////////////////////////////////// +// +// (四)音频相关接口 +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name 音频相关接口 +/// @{ +/** + * 4.1 设置静音 + */ +- (void)setMute:(BOOL)bEnable; + +/** + * 4.2 设置音量 + * + * @param volume 音量大小,取值范围0 - 100 + */ +- (void)setVolume:(int)volume; + +#if TARGET_OS_IPHONE +/** + * 4.3 设置声音播放模式(切换扬声器,听筒) + * @param audioRoute 声音播放模式 + */ ++ (void)setAudioRoute:(TXAudioRouteType)audioRoute; +#endif + +/** + * 4.4 设置音量大小回调接口 + * + * @param volumeEvaluationListener 音量大小回调接口,音量取值范围0 - 100 + */ +- (void)setAudioVolumeEvaluationListener:(void(^)(int))volumeEvaluationListener; + +/** + * 4.5 启用音量大小提示 + * + * 开启后会在 volumeEvaluationListener 中获取到 SDK 对音量大小值的评估。 + * + * @param interval 决定了 volumeEvaluationListener 回调的触发间隔,单位为ms,最小间隔为100ms,如果小于等于0则会关闭回调,建议设置为300ms; + */ +- (void)enableAudioVolumeEvaluation:(NSUInteger)interval; + +/// @} + +///////////////////////////////////////////////////////////////////////////////// +// +// (五)直播时移相关接口 +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name 直播时移相关接口 +/// @{ +/** + * 5.1 直播时移准备,拉取该直播流的起始播放时间。 + * + * 使用时移功能需在播放开始后调用此方法,否则时移失败。时移的使用请参考文档 [超级播放器](https://cloud.tencent.com/document/product/881/20208#.E6.97.B6.E7.A7.BB.E6.92.AD.E6.94.BE) + * + * @warning 非腾讯云直播地址不能时移 + * + * @param domain 时移域名 + * @param bizId 流 bizId + * + * @return 0:OK;-1:无播放地址;-2:appId 未配置 + */ +- (int)prepareLiveSeek:(NSString*)domain bizId:(NSInteger)bizId; + +/** + * 5.2 停止时移播放,返回直播 + * + * @return 0:成功;其它:失败 + */ +- (int)resumeLive; + +#if TARGET_OS_IPHONE +/** + * 5.3 播放跳转到音视频流某个时间 + * @param time 流时间,单位为秒 + * @return 0:成功;其它:失败 + */ +- (int)seek:(float)time; +#endif + +/// @} + +///////////////////////////////////////////////////////////////////////////////// +// +// (六)视频录制相关接口 +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name 视频录制相关接口 +/// @{ +#if TARGET_OS_IPHONE +/** + * 6.1 开始录制短视频 + * + * @param recordType 参见 TXRecordType 定义 + * @return 0:成功;1:正在录制短视频;-2:videoRecorder 初始化失败。 + */ +- (int)startRecord:(TXRecordType)recordType; + +/** + * 6.2 结束录制短视频 + * + * @return 0:成功;1:不存在录制任务;-2:videoRecorder 未初始化。 + */ +- (int)stopRecord; + +/** + * 6.3 设置播放速率 + * + * @param rate 正常速度为1.0;小于为慢速;大于为快速。最大建议不超过2.0 + */ +- (void)setRate:(float)rate; +#endif + +/// @} + +///////////////////////////////////////////////////////////////////////////////// +// +// (七)更多实用接口 +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name 更多实用接口 +/// @{ +/** + * 7.1 设置状态浮层 view 在渲染 view 上的边距 + * + * @param margin 边距 + */ +- (void)setLogViewMargin:(TXEdgeInsets)margin; + +/** + * 7.2 是否显示播放状态统计及事件消息浮层 view + * + * @param isShow 是否显示 + */ +- (void)showVideoDebugLog:(BOOL)isShow; + +/** + * 7.3 FLV 直播无缝切换 + * + * @param playUrl 播放地址 + * @return 0:成功;其它:失败 + * @warning playUrl 必须是当前播放直播流的不同清晰度,切换到无关流地址可能会失败 + */ +- (int)switchStream:(NSString *)playUrl; + +/** + * 7.4 调用实验性 API 接口 + * + * @note 该接口用于调用一些实验性功能 + * @param jsonStr 接口及参数描述的 JSON 字符串 + */ +- (void)callExperimentalAPI:(NSString*)jsonStr; + +/// @} + +@end +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordListener.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordListener.h new file mode 100644 index 0000000..fd86b08 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordListener.h @@ -0,0 +1,27 @@ +#import "TXLiveRecordTypeDef.h" + + +/** + * 短视频录制回调定义 + */ +@protocol TXLiveRecordListener +@optional + +/** + * 短视频录制进度 + */ +-(void) onRecordProgress:(NSInteger)milliSecond; + +/** + * 短视频录制完成 + */ +-(void) onRecordComplete:(TXRecordResult*)result; + +/** + * 短视频录制事件通知 + */ +-(void) onRecordEvent:(NSDictionary*)evt; + +@end + + diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordTypeDef.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordTypeDef.h new file mode 100644 index 0000000..7159c30 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveRecordTypeDef.h @@ -0,0 +1,36 @@ +#import +#import "TXLiveSDKTypeDef.h" + +/// PlayRecord 录制类型定义 +typedef NS_ENUM(NSInteger, TXRecordType) +{ + ///视频源为正在播放的视频流 + RECORD_TYPE_STREAM_SOURCE = 1, +}; + + + +/// 录制结果错误码定义 +typedef NS_ENUM(NSInteger, TXRecordResultCode) +{ + /// 录制成功(业务层主动结束录制) + RECORD_RESULT_OK = 0, + /// 录制成功(sdk自动结束录制,可能原因:1,app进入后台,2,app被闹钟或电话打断,3,网络断连接) + RECORD_RESULT_OK_INTERRUPT = 1, + /// 录制失败 + RECORD_RESULT_FAILED = 1001, +}; + + +/// 录制结果 +@interface TXRecordResult : NSObject +/// 错误码 +@property (nonatomic, assign) TXRecordResultCode retCode; +/// 错误描述信息 +@property (nonatomic, strong) NSString* descMsg; +/// 视频文件path +@property (nonatomic, strong) NSString* videoPath; +/// 视频封面 +@property (nonatomic, strong) TXImage* coverImage; +@end + diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKEventDef.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKEventDef.h new file mode 100644 index 0000000..6c6b0af --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKEventDef.h @@ -0,0 +1,106 @@ +#ifndef __TX_LIVE_SDK_EVENT_DEF_H__ +#define __TX_LIVE_SDK_EVENT_DEF_H__ + +#include "TXLiteAVCode.h" + +enum EventID +{ + /********************************************************************************** + * 推流事件列表 + **********************************************************************************/ + PUSH_EVT_CONNECT_SUCC = EVT_RTMP_PUSH_CONNECT_SUCC, ///< 直播: 已经连接RTMP推流服务器 + PUSH_EVT_PUSH_BEGIN = EVT_RTMP_PUSH_BEGIN, ///< 直播: 已经与RTMP服务器握手完毕,开始推流 + PUSH_EVT_OPEN_CAMERA_SUCC = EVT_CAMERA_START_SUCC, ///< 打开摄像头成功 + PUSH_EVT_CHANGE_RESOLUTION = EVT_UP_CHANGE_RESOLUTION, ///< 推流动态调整分辨率 + PUSH_EVT_CHANGE_BITRATE = EVT_UP_CHANGE_BITRATE, ///< 推流动态调整码率 + PUSH_EVT_FIRST_FRAME_AVAILABLE = EVT_FIRST_FRAME_AVAILABLE, ///< 首帧画面采集完成 + PUSH_EVT_START_VIDEO_ENCODER = EVT_START_VIDEO_ENCODER, ///< 编码器启动 + PUSH_EVT_ROOM_IN = EVT_ROOM_ENTER, ///< 已经在webrtc房间里面,进房成功后通知 + PUSH_EVT_ROOM_IN_FAILED = EVT_ROOM_ENTER_FAILED, ///< 进房失败通知 + PUSH_EVT_ROOM_OUT = EVT_ROOM_EXIT, ///< 不在webrtc房间里面,进房失败或者中途退出房间时通知 + PUSH_EVT_ROOM_USERLIST = EVT_ROOM_USERLIST, ///< 下发webrtc房间成员列表(不包括自己) + PUSH_EVT_ROOM_NEED_REENTER = EVT_ROOM_NEED_REENTER, ///< WiFi切换到4G会触发断线重连,此时需要重新进入webrtc房间(拉取最优的服务器地址) + PUSH_EVT_ROOM_USER_ENTER = EVT_ROOM_USER_ENTER, ///< 进房通知 + PUSH_EVT_ROOM_USER_EXIT = EVT_ROOM_USER_EXIT, ///< 退房通知 + PUSH_EVT_ROOM_USER_VIDEO_STATE = EVT_ROOM_USER_VIDEO_STATE, ///< 视频状态位变化通知 + PUSH_EVT_ROOM_USER_AUDIO_STATE = EVT_ROOM_USER_AUDIO_STATE, ///< 音频状态位变化通知 + + PUSH_ERR_OPEN_CAMERA_FAIL = ERR_CAMERA_START_FAIL, ///< 打开摄像头失败 + PUSH_ERR_OPEN_MIC_FAIL = ERR_MIC_START_FAIL, ///< 打开麦克风失败 + PUSH_ERR_VIDEO_ENCODE_FAIL = ERR_VIDEO_ENCODE_FAIL, ///< 视频编码失败 + PUSH_ERR_AUDIO_ENCODE_FAIL = ERR_AUDIO_ENCODE_FAIL, ///< 音频编码失败 + PUSH_ERR_UNSUPPORTED_RESOLUTION = ERR_UNSUPPORTED_RESOLUTION, ///< 不支持的视频分辨率 + PUSH_ERR_UNSUPPORTED_SAMPLERATE = ERR_UNSUPPORTED_SAMPLERATE, ///< 不支持的音频采样率 + PUSH_ERR_NET_DISCONNECT = ERR_RTMP_PUSH_NET_DISCONNECT, ///< 网络断连,且经多次重连抢救无效,可以放弃治疗,更多重试请自行重启推流 + PUSH_ERR_AUDIO_SYSTEM_NOT_WORK = -1308, ///< 系统异常,录音失败 + PUSH_ERR_INVALID_ADDRESS = ERR_RTMP_PUSH_INVALID_ADDRESS, ///< 推流地址非法 + + PUSH_WARNING_NET_BUSY = WARNING_NET_BUSY, ///< 网络状况不佳:上行带宽太小,上传数据受阻 + PUSH_WARNING_RECONNECT = WARNING_RTMP_SERVER_RECONNECT, ///< 网络断连, 已启动自动重连 (自动重连连续失败超过三次会放弃) + PUSH_WARNING_HW_ACCELERATION_FAIL = WARNING_HW_ENCODER_START_FAIL, ///< 硬编码启动失败,采用软编码 + PUSH_WARNING_VIDEO_ENCODE_FAIL = 1104, ///< 视频编码失败,非致命错,内部会重启编码器 + PUSH_WARNING_BEAUTYSURFACE_VIEW_INIT_FAIL = 1105, ///< 视频编码码率异常,警告 + PUSH_WARNING_VIDEO_ENCODE_BITRATE_OVERFLOW = 1106, ///< 视频编码码率异常,警告 + PUSH_WARNING_DNS_FAIL = WARNING_RTMP_DNS_FAIL, ///< RTMP -DNS解析失败 + PUSH_WARNING_SEVER_CONN_FAIL = WARNING_RTMP_SEVER_CONN_FAIL, ///< RTMP服务器连接失败 + PUSH_WARNING_SHAKE_FAIL = WARNING_RTMP_SHAKE_FAIL, ///< RTMP服务器握手失败 + PUSH_WARNING_SERVER_DISCONNECT = WARNING_RTMP_SERVER_BREAK_CONNECT, ///< RTMP服务器主动断开,请检查推流地址的合法性或防盗链有效期 + PUSH_WARNING_READ_WRITE_FAIL = WARNING_RTMP_READ_WRITE_FAIL, ///< RTMP 读/写失败,将会断开连接。 + PUSH_WARNING_HEVC_ENCODE_NOT_SUPPORT = ERR_HEVC_ENCODE_NOT_SUPPORT, ///< 265编码判断不支持 + + /*内部事件*/INNER_EVT_SET_BITRATE_4_SCREEN_CAPTURE = 100001, ///< 动态设置录屏编码码率 + /*内部事件*/INNER_EVT_BGM_PLAY_FINISH = 100002, ///< BGM播放完毕 + + + + /********************************************************************************** + * 播放事件列表 + **********************************************************************************/ + PLAY_EVT_CONNECT_SUCC = EVT_PLAY_LIVE_STREAM_CONNECT_SUCC, ///< 直播,已经连接RTMP拉流服务器 + PLAY_EVT_RTMP_STREAM_BEGIN = EVT_PLAY_LIVE_STREAM_BEGIN, ///< 直播,已经与RTMP服务器握手完毕,开始拉流 + PLAY_EVT_RCV_FIRST_I_FRAME = EVT_RENDER_FIRST_I_FRAME, ///< 渲染首个视频数据包(IDR) + PLAY_EVT_RCV_FIRST_AUDIO_FRAME = EVT_AUDIO_JITTER_STATE_FIRST_PLAY, ///< 音频首次播放 + PLAY_EVT_PLAY_BEGIN = EVT_VIDEO_PLAY_BEGIN, ///< 视频播放开始 + PLAY_EVT_PLAY_PROGRESS = EVT_VIDEO_PLAY_PROGRESS, ///< 视频播放进度 + PLAY_EVT_PLAY_END = EVT_VIDEO_PLAY_END, ///< 视频播放结束 + PLAY_EVT_PLAY_LOADING = EVT_VIDEO_PLAY_LOADING, ///< 视频播放loading + PLAY_EVT_START_VIDEO_DECODER = EVT_START_VIDEO_DECODER, ///< 解码器启动 + PLAY_EVT_CHANGE_RESOLUTION = EVT_DOWN_CHANGE_RESOLUTION, ///< 视频分辨率改变 + PLAY_EVT_GET_PLAYINFO_SUCC = EVT_GET_VODFILE_MEDIAINFO_SUCC, ///< 获取点播文件信息成功 + PLAY_EVT_CHANGE_ROTATION = EVT_VIDEO_CHANGE_ROTATION, ///< MP4视频旋转角度 + PLAY_EVT_GET_MESSAGE = EVT_PLAY_GET_MESSAGE, ///< 消息事件 + PLAY_EVT_VOD_PLAY_PREPARED = EVT_VOD_PLAY_PREPARED, ///< 点播,视频加载完毕 + PLAY_EVT_VOD_LOADING_END = EVT_VOD_PLAY_LOADING_END, ///< 点播,loading结束 + PLAY_EVT_STREAM_SWITCH_SUCC = EVT_PLAY_LIVE_STREAM_SWITCH_SUCC, ///< 直播,切流成功(切流可以播放不同画面大小的视频) + PLAY_EVT_GET_METADATA = EVT_PLAY_GET_METADATA, ///< TXLivePlayer 接收到视频流中的 metadata 头信息(一条视频流仅触发一次) + PLAY_EVT_GET_FLVSESSIONKEY = EVT_PLAY_GET_FLVSESSIONKEY, ///< TXLivePlayer 接收到http响应头中的 flvSessionKey 信息 + PLAY_EVT_AUDIO_SESSION_INTERRUPT = EVT_AUDIO_SESSION_INTERRUPT, ///< Audio Session Interrupt事件 + + + PLAY_ERR_NET_DISCONNECT = ERR_PLAY_LIVE_STREAM_NET_DISCONNECT, ///< 直播,网络断连,且经多次重连抢救无效,可以放弃治疗,更多重试请自行重启播放 + + PLAY_ERR_GET_RTMP_ACC_URL_FAIL = ERR_GET_RTMP_ACC_URL_FAIL, ///< 直播,获取加速拉流地址失败。这是由于您传给 liveplayer 的加速流地址中没有携带 txTime 和 txSecret 签名,或者是签名计算的不对。出现这个错误时,liveplayer 会放弃拉取加速流转而拉取 CDN 上的视频流,从而导致延迟很大。 + PLAY_ERR_FILE_NOT_FOUND = ERR_FILE_NOT_FOUND, ///< 播放文件不存在 + PLAY_ERR_HEVC_DECODE_FAIL = ERR_HEVC_DECODE_FAIL, ///< H265解码失败 + PLAY_ERR_HLS_KEY = ERR_VOD_DECRYPT_FAIL, ///< HLS解码key获取失败 + PLAY_ERR_GET_PLAYINFO_FAIL = ERR_GET_VODFILE_MEDIAINFO_FAIL, ///< 获取点播文件信息失败 + PLAY_ERR_STREAM_SWITCH_FAIL = ERR_PLAY_LIVE_STREAM_SWITCH_FAIL, ///< 直播,切流失败(切流可以播放不同画面大小的视频) + + PLAY_WARNING_VIDEO_DECODE_FAIL = WARNING_VIDEO_FRAME_DECODE_FAIL, ///< 当前视频帧解码失败 + PLAY_WARNING_AUDIO_DECODE_FAIL = WARNING_AUDIO_FRAME_DECODE_FAIL, ///< 当前音频帧解码失败 + PLAY_WARNING_RECONNECT = WARNING_LIVE_STREAM_SERVER_RECONNECT, ///< 网络断连, 已启动自动重连 (自动重连连续失败超过三次会放弃) + PLAY_WARNING_RECV_DATA_LAG = WARNING_RECV_DATA_LAG, ///< 网络来包不稳:可能是下行带宽不足,或由于主播端出流不均匀 + PLAY_WARNING_VIDEO_PLAY_LAG = WARNING_VIDEO_PLAY_LAG, ///< 当前视频播放出现卡顿(用户直观感受) + PLAY_WARNING_HW_ACCELERATION_FAIL = WARNING_HW_DECODER_START_FAIL, ///< 硬解启动失败,采用软解 + PLAY_WARNING_VIDEO_DISCONTINUITY = 2107, ///< 当前视频帧不连续,可能丢帧 + PLAY_WARNING_FIRST_IDR_HW_DECODE_FAIL = WARNING_VIDEO_DECODER_HW_TO_SW, ///< 当前流硬解第一个I帧失败,SDK自动切软解 + PLAY_WARNING_DNS_FAIL = WARNING_RTMP_DNS_FAIL, ///< RTMP -DNS解析失败 + PLAY_WARNING_SEVER_CONN_FAIL = WARNING_RTMP_SEVER_CONN_FAIL, ///< RTMP服务器连接失败 + PLAY_WARNING_SHAKE_FAIL = WARNING_RTMP_SHAKE_FAIL, ///< RTMP服务器握手失败 + PLAY_WARNING_SERVER_DISCONNECT = WARNING_RTMP_SERVER_BREAK_CONNECT, ///< RTMP服务器主动断开 + PLAY_WARNING_READ_WRITE_FAIL = WARNING_RTMP_READ_WRITE_FAIL, ///< RTMP 读/写失败,将会断开连接。 + + /*UGC*/UGC_WRITE_FILE_FAIL = 4001, ///< UGC写文件失败 +}; + +#endif // __TX_LIVE_SDK_TYPE_DEF_H__ diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKTypeDef.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKTypeDef.h new file mode 100644 index 0000000..f37a6d2 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXLiveSDKTypeDef.h @@ -0,0 +1,357 @@ +#ifndef __TX_LIVE_SDK_TYPE_DEF_H__ +#define __TX_LIVE_SDK_TYPE_DEF_H__ + +#include "TXLiveSDKEventDef.h" +#import + +#if TARGET_OS_IPHONE +#import +typedef UIView TXView; +typedef UIImage TXImage; +typedef UIEdgeInsets TXEdgeInsets; +#elif TARGET_OS_MAC +#import +typedef NSView TXView; +typedef NSImage TXImage; +typedef NSEdgeInsets TXEdgeInsets; +#endif + + + +///////////////////////////////////////////////////////////////////////////////// +// +// 【视频相关枚举值定义】 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 1.1 视频分辨率 + * + * 在普通模式下,TXLivePusher 只支持三种固定的分辨率,即:360 × 640、540 × 960 以及 720 × 1280。 + * + *【如何横屏推流】 + * 如果希望使用 640 × 360、960 × 540、1280 × 720 这样的横屏分辨率,需要设置 TXLivePushConfig 中的 homeOrientation 属性, + * 并使用 TXLivePusher 中的 setRenderRotation 接口进行画面旋转。 + * + *【自定义分辨率】 + * 如果希望使用其他分辨率,可以设置 TXLivePushConfig 中的 customModeType 为 CUSTOM_MODE_VIDEO_CAPTURE, + * 自己采集 SampleBuffer 送给 TXLivePusher 的 sendVideoSampleBuffer 接口。 + * + *【建议的分辨率】 + * 手机直播场景下最常用的分辨率为 9:16 的竖屏分辨率 540 × 960。 + * 从清晰的角度,540 × 960 比 360 × 640 要清晰,同时跟 720 × 1280 相当。 + * 从性能的角度,540 × 960 可以避免前置摄像头开启 720 × 1280 的采集分辨率,对于美颜开销很大的场景能节省不少的计算量。 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_VideoResolution) { + + /// 竖屏分辨率,宽高比为 9:16 + VIDEO_RESOLUTION_TYPE_360_640 = 0, ///< 建议码率 800kbps + VIDEO_RESOLUTION_TYPE_540_960 = 1, ///< 建议码率 1200kbps + VIDEO_RESOLUTION_TYPE_720_1280 = 2, ///< 建议码率 1800kbps + VIDEO_RESOLUTION_TYPE_1080_1920 = 30, ///< 建议码率 3000kbps + + + /// 如下均为内建分辨率,为 SDK 内部使用,不支持通过接口进行设置 + VIDEO_RESOLUTION_TYPE_640_360 = 3, + VIDEO_RESOLUTION_TYPE_960_540 = 4, + VIDEO_RESOLUTION_TYPE_1280_720 = 5, + VIDEO_RESOLUTION_TYPE_1920_1080 = 31, + + VIDEO_RESOLUTION_TYPE_320_480 = 6, + VIDEO_RESOLUTION_TYPE_180_320 = 7, + VIDEO_RESOLUTION_TYPE_270_480 = 8, + VIDEO_RESOLUTION_TYPE_320_180 = 9, + VIDEO_RESOLUTION_TYPE_480_270 = 10, + + VIDEO_RESOLUTION_TYPE_240_320 = 11, + VIDEO_RESOLUTION_TYPE_360_480 = 12, + VIDEO_RESOLUTION_TYPE_480_640 = 13, + VIDEO_RESOLUTION_TYPE_320_240 = 14, + VIDEO_RESOLUTION_TYPE_480_360 = 15, + VIDEO_RESOLUTION_TYPE_640_480 = 16, + + VIDEO_RESOLUTION_TYPE_480_480 = 17, + VIDEO_RESOLUTION_TYPE_270_270 = 18, + VIDEO_RESOLUTION_TYPE_160_160 = 19, +}; + +/** + * 1.2 画面质量挡位 + * + * 如果您希望调整直播的编码参数,建议您直接使用 TXLivePusher 提供的 setVideoQuality 接口。 + * 由于视频编码参数中的分辨率,码率和帧率对最终效果都有着复杂的影响,如果您之前没有相关操作经验,不建议直接修改这些编码参数。 + * 我们在 setVideoQuality 接口中提供了如下几个挡位供您选择: + * + * 1. 标清:采用 360 × 640 的分辨率,码率调控范围 300kbps - 800kbps,关闭网络自适应时的码率为 800kbps,适合网络较差的直播环境。 + * 2. 高清:采用 540 × 960 的分辨率,码率调控范围 600kbps - 1500kbps,关闭网络自适应时的码率为 1200kbps,常规手机直播的推荐挡位。 + * 3. 超清:采用 720 × 1280 的分辨率,码率调控范围 600kbps - 1800kbps,关闭网络自适应时的码率为 1800kbps,能耗高,但清晰度较标清提升并不明显。 + * 4. 连麦(大主播):主播从原来的“推流状态”进入“连麦状态”后,可以通过 setVideoQuality 接口调整自 MAIN_PUBLISHER 挡位。 + * 5. 连麦(小主播):观众从原来的“播放状态”进入“连麦状态”后,可以通过 setVideoQuality 接口调整自 SUB_PUBLISHER 挡位。 + * 6. 视频通话:该选项后续会逐步废弃,如果您希望实现纯视频通话而非直播功能,推荐使用腾讯云 [TRTC](https://cloud.tencent.com/product/trtc) 服务。 + * + * 【推荐设置】如果您对整个平台的清晰度要求比较高,推荐使用 setVideoQuality(HIGH_DEFINITION, NO, NO) 的组合。 + * 如果您的主播有很多三四线城市的网络适配要求,推荐使用 setVideoQuality(HIGH_DEFINITION, YES, NO) 的组合。 + * + * @note 在开启硬件加速后,您可能会发现诸如 368 × 640 或者 544 × 960 这样的“不完美”分辨率。 + * 这是由于部分硬编码器要求像素能被 16 整除所致,属于正常现象,您可以通过播放端的填充模式解决“小黑边”问题。 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_VideoQuality) { + VIDEO_QUALITY_STANDARD_DEFINITION = 1, ///< 标清:采用 360 × 640 的分辨率 + VIDEO_QUALITY_HIGH_DEFINITION = 2, ///< 高清:采用 540 × 960 的分辨率 + VIDEO_QUALITY_SUPER_DEFINITION = 3, ///< 超清:采用 720 × 1280 的分辨率 + VIDEO_QUALITY_ULTRA_DEFINITION = 7, ///< 蓝光:采用 1080 × 1920 的分辨率 + VIDEO_QUALITY_LINKMIC_MAIN_PUBLISHER = 4, ///< 连麦场景下的大主播使用 + VIDEO_QUALITY_LINKMIC_SUB_PUBLISHER = 5, ///< 连麦场景下的小主播(连麦的观众)使用 + VIDEO_QUALITY_REALTIME_VIDEOCHAT = 6, ///< 纯视频通话场景使用(已废弃) +}; + +/** + * 1.3 画面旋转方向 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_HomeOrientation) { + HOME_ORIENTATION_RIGHT = 0, ///< HOME 键在右边,横屏模式 + HOME_ORIENTATION_DOWN = 1, ///< HOME 键在下面,手机直播中最常见的竖屏直播模式 + HOME_ORIENTATION_LEFT = 2, ///< HOME 键在左边,横屏模式 + HOME_ORIENTATION_UP = 3, ///< HOME 键在上边,竖屏直播(适合小米 MIX2) +}; + +/** + * 1.4 画面填充模式 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_RenderMode) { + + RENDER_MODE_FILL_SCREEN = 0, ///< 图像铺满屏幕,不留黑边,如果图像宽高比不同于屏幕宽高比,部分画面内容会被裁剪掉。 + RENDER_MODE_FILL_EDGE = 1, ///< 图像适应屏幕,保持画面完整,但如果图像宽高比不同于屏幕宽高比,会有黑边的存在。 +}; + +/** + * 1.5 美颜风格 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_BeautyStyle) { + BEAUTY_STYLE_SMOOTH = 0, ///< 光滑,磨皮程度较高,更适合秀场直播类场景下使用。 + BEAUTY_STYLE_NATURE = 1, ///< 自然,磨皮算法会最大限度保留皮肤细节。 + BEAUTY_STYLE_PITU = 2, ///< 由上海优图实验室提供的美颜算法,磨皮效果介于光滑和自然之间,比光滑保留更多皮肤细节,比自然磨皮程度更高。 +}; + +/** + * 1.6 美颜程度,取值范围1 - 9,该枚举值定义了关闭和最大值。 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_BeautyFilterDepth) { + BEAUTY_FILTER_DEPTH_CLOSE = 0, ///< 关闭美颜 + BEAUTY_FILTER_DEPTH_MAX = 9, ///< 最大美颜强度 +}; + + +/** + * 1.6 网络自适应算法,推荐选项:AUTO_ADJUST_LIVEPUSH_STRATEGY + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_AutoAdjustStrategy) { + AUTO_ADJUST_NONE = -1, ///< 非法数值,用于 SDK 内部做合法性检查 + + AUTO_ADJUST_LIVEPUSH_STRATEGY = 0, ///< 最适合直播模式下的流控算法 + AUTO_ADJUST_LIVEPUSH_RESOLUTION_STRATEGY = 1, ///< 不推荐:SDK 内部会调整视频分辨率,如果有 H5 分享的需求请勿使用 + AUTO_ADJUST_REALTIME_VIDEOCHAT_STRATEGY = 5, ///< 待废弃,请使用腾讯云 TRTC 服务 + + AUTO_ADJUST_BITRATE_STRATEGY_1 = 0, ///< 已经废弃 + AUTO_ADJUST_BITRATE_RESOLUTION_STRATEGY_1 = 1, ///< 已经废弃 + AUTO_ADJUST_BITRATE_STRATEGY_2 = 2, ///< 已经废弃 + AUTO_ADJUST_BITRATE_RESOLUTION_STRATEGY_2 = 3, ///< 已经废弃 + AUTO_ADJUST_REALTIME_BITRATE_STRATEGY = 4, ///< 已经废弃 + AUTO_ADJUST_REALTIME_BITRATE_RESOLUTION_STRATEGY = 5, ///< 已经废弃 +}; + +/** + * 1.7 视频帧的数据格式(未压缩前的) + */ +typedef NS_ENUM(NSInteger, TXVideoType) { + + VIDEO_TYPE_420SP = 1, ///< Android 视频采集格式,PixelFormat.YCbCr_420_SP 17 + VIDEO_TYPE_420YpCbCr = 2, ///< iOS 视频采集格式,kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange + VIDEO_TYPE_420P = 3, ///< yuv420p格式 + VIDEO_TYPE_BGRA8888 = 4, ///< BGRA8888 + VIDEO_TYPE_RGBA8888 = 5, ///< RGBA8888 + VIDEO_TYPE_NV12 = 6, ///< NV12(iOS) +}; + +/** + * 1.8 本地视频预览镜像类型 + * + * iOS 的本地画面提供三种设置模式 + */ +typedef NS_ENUM(NSUInteger, TXLocalVideoMirrorType) { + LocalVideoMirrorType_Auto = 0, ///< 前置摄像头镜像,后置摄像头不镜像 + LocalVideoMirrorType_Enable = 1, ///< 前后置摄像头画面均镜像 + LocalVideoMirrorType_Disable = 2, ///< 前后置摄像头画面均不镜像 +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// 【音频相关枚举值定义】 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 2.1 音频采样率 + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_AudioSampleRate) { + + AUDIO_SAMPLE_RATE_8000 = 0, ///< 8k采样率 + AUDIO_SAMPLE_RATE_16000 = 1, ///< 16k采样率 + AUDIO_SAMPLE_RATE_32000 = 2, ///< 32k采样率 + AUDIO_SAMPLE_RATE_44100 = 3, ///< 44.1k采样率 + AUDIO_SAMPLE_RATE_48000 = 4, ///< 48k采样率 +}; + +/** + * 2.2 混响类型 + */ +typedef NS_ENUM(NSInteger, TXReverbType) { + REVERB_TYPE_0 = 0, ///< 关闭混响 + REVERB_TYPE_1 = 1, ///< KTV + REVERB_TYPE_2 = 2, ///< 小房间 + REVERB_TYPE_3 = 3, ///< 大会堂 + REVERB_TYPE_4 = 4, ///< 低沉 + REVERB_TYPE_5 = 5, ///< 洪亮 + REVERB_TYPE_6 = 6, ///< 金属声 + REVERB_TYPE_7 = 7, ///< 磁性 +}; + +/** + * 2.3 变声选项 + */ +typedef NS_ENUM(NSInteger, TXVoiceChangerType) { + + VOICECHANGER_TYPE_0 = 0, ///< 关闭变声 + VOICECHANGER_TYPE_1 = 1, ///< 熊孩子 + VOICECHANGER_TYPE_2 = 2, ///< 萝莉 + VOICECHANGER_TYPE_3 = 3, ///< 大叔 + VOICECHANGER_TYPE_4 = 4, ///< 重金属 + VOICECHANGER_TYPE_5 = 5, ///< 感冒 + VOICECHANGER_TYPE_6 = 6, ///< 外国人 + VOICECHANGER_TYPE_7 = 7, ///< 困兽 + VOICECHANGER_TYPE_8 = 8, ///< 死肥仔 + VOICECHANGER_TYPE_9 = 9, ///< 强电流 + VOICECHANGER_TYPE_10 = 10, ///< 重机械 + VOICECHANGER_TYPE_11 = 11, ///< 空灵 +}; + +/** + * 2.4 声音播放模式(音频路由) + * + * 一般手机都有两个扬声器,设置音频路由的作用就是要决定声音从哪个扬声器播放出来。 + * - Speakerphone:扬声器,位于手机底部,声音偏大,适合外放音乐。 + * - Earpiece:听筒,位于手机顶部,声音偏小,适合通话。 + */ +typedef NS_ENUM(NSInteger, TXAudioRouteType) { + AUDIO_ROUTE_SPEAKER = 0, ///< 扬声器,位于手机底部,声音偏大,适合外放音乐。 + AUDIO_ROUTE_RECEIVER = 1, ///< 听筒,位于手机顶部,声音偏小,适合通话。 +}; + +/** + * 2.5 系统音量类型 + * + * 该枚举值用于控制推流过程中使用何种系统音量类型 + */ +typedef NS_ENUM(NSInteger, TXSystemAudioVolumeType) { + SYSTEM_AUDIO_VOLUME_TYPE_AUTO = 0, ///< 默认类型,SDK会自动选择合适的音量类型 + SYSTEM_AUDIO_VOLUME_TYPE_MEDIA = 1, ///< 仅使用媒体音量,SDK不再使用通话音量 + SYSTEM_AUDIO_VOLUME_TYPE_VOIP = 2, ///< 仅使用通话音量,SDK一直使用通话音量 +}; + +/** + * 2.6 推流用网络通道(待废弃) + */ +typedef NS_ENUM(NSInteger, TX_Enum_Type_RTMPChannel) { + + RTMP_CHANNEL_TYPE_AUTO = 0, ///< 自动:推腾讯云使用加速协议,推友商云使用标准 RTMP 协议。 + RTMP_CHANNEL_TYPE_STANDARD = 1, ///< 标准 RTMP 协议 + RTMP_CHANNEL_TYPE_PRIVATE = 2, ///< 腾讯云专属加速协议 +}; + + +/** + * 2.7 屏幕采集源(用于录屏推流) + */ +#if TARGET_OS_OSX +typedef NS_ENUM(NSInteger, TXCaptureVideoInputSource) { + TXCaptureVideoInputSourceCamera, + TXCaptureVideoInputSourceScreen, + TXCaptureVideoInputSourceWindow +}; +#endif + + + + +///////////////////////////////////////////////////////////////////////////////// +// +// 【状态通知字段名 onNetStatus】 +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * TXLivePushListener 和 TXLivePlayListener 的 onNetStatus() 会以 2s 一次的时间间隔,定时通知网络状态和内部指标, + * 这些数值采用 key-value 的组织格式,其中 key 值的定义如下: + */ + +#define NET_STATUS_CPU_USAGE @"CPU_USAGE" ///> 进程 CPU 占用率 +#define NET_STATUS_CPU_USAGE_D @"CPU_USAGE_DEVICE" ///> 系统 CPU 占用率 + +#define NET_STATUS_VIDEO_WIDTH @"VIDEO_WIDTH" ///> 视频分辨率宽度 +#define NET_STATUS_VIDEO_HEIGHT @"VIDEO_HEIGHT" ///> 视频分辨率高度 +#define NET_STATUS_VIDEO_FPS @"VIDEO_FPS" ///> 视频帧率:也就是视频编码器每秒生产了多少帧画面。 +#define NET_STATUS_VIDEO_GOP @"VIDEO_GOP" ///> 关键帧间隔:即每两个关键帧(I帧)间隔时长,单位:秒。 +#define NET_STATUS_VIDEO_BITRATE @"VIDEO_BITRATE" ///> 视频码率:即视频编码器每秒生产了多少视频数据,单位:kbps。 +#define NET_STATUS_AUDIO_BITRATE @"AUDIO_BITRATE" ///> 音频码率:即音频编码器每秒生产了多少音频数据,单位:kbps。 +#define NET_STATUS_NET_SPEED @"NET_SPEED" ///> 传输速度:即每秒钟发送或接收了多少字节的数据。 + +#define NET_STATUS_VIDEO_CACHE @"VIDEO_CACHE" ///> TXLivePusher:主播端堆积的视频帧数;TXLivePlayer:播放端缓冲的视频总时长。 +#define NET_STATUS_AUDIO_CACHE @"AUDIO_CACHE" ///> TXLivePusher:主播端堆积的音频帧数;TXLivePlayer:播放端缓冲的音频总时长。 +#define NET_STATUS_VIDEO_DROP @"VIDEO_DROP" ///> TXLivePusher:主播端主动丢弃的视频帧数;TXLivePlayer: N/A。 +#define NET_STATUS_AUDIO_DROP @"AUDIO_DROP" ///> 暂未使用 + +#define NET_STATUS_V_DEC_CACHE_SIZE @"V_DEC_CACHE_SIZE" ///> TXLivePlayer:播放端解码器中缓存的视频帧数(Android 端硬解码时存在)。 +#define NET_STATUS_V_SUM_CACHE_SIZE @"V_SUM_CACHE_SIZE" ///> TXLivePlayer:播放端缓冲的总视频帧数,该数值越大,播放延迟越高。 +#define NET_STATUS_AV_PLAY_INTERVAL @"AV_PLAY_INTERVAL" ///> TXLivePlayer:音画同步错位时间(播放),单位 ms,此数值越小,音画同步越好。 +#define NET_STATUS_AV_RECV_INTERVAL @"AV_RECV_INTERVAL" ///> TXLivePlayer:音画同步错位时间(网络),单位 ms,此数值越小,音画同步越好。 +#define NET_STATUS_AUDIO_CACHE_THRESHOLD @"AUDIO_CACHE_THRESHOLD" ///> TXLivePlayer:音频缓冲时长阈值,缓冲超过该阈值后,播放器会开始调控延时。 +#define NET_STATUS_AUDIO_BLOCK_TIME @"AUDIO_BLOCK_TIME" ///> 拉流专用:音频卡顿时长,单位ms +#define NET_STATUS_AUDIO_INFO @"AUDIO_INFO" ///> 音频信息:包括采样率信息和声道数信息 +#define NET_STATUS_NET_JITTER @"NET_JITTER" ///> 网络抖动:数值越大表示抖动越大,网络越不稳定 +#define NET_STATUS_QUALITY_LEVEL @"NET_QUALITY_LEVEL" ///> 网络质量:0:未定义 1:最好 2:好 3:一般 4:差 5:很差 6:不可用 +#define NET_STATUS_SERVER_IP @"SERVER_IP" ///> 连接的Server IP地址 + + +///////////////////////////////////////////////////////////////////////////////// +// +// 【事件通知字段名 onPushEvent onPlayEvent】 +// +///////////////////////////////////////////////////////////////////////////////// + + +/** + * 腾讯云 LiteAVSDK 通过 TXLivePushListener 中的 onPushEvent(),TXLivePlayListener 中的 onPlayEvent() 向您通知内部错误、警告和事件: + * - 错误:严重且不可恢复的错误,会中断 SDK 的正常逻辑。 + * - 警告:非致命性的提醒和警告,可以不理会。 + * - 事件:SDK 的流程和状态通知,比如开始推流,开始播放,等等。 + * + * 这些数值采用 key-value 的组织格式,其中 key 值的定义如下: + */ +#define EVT_MSG @"EVT_MSG" ///> 事件ID +#define EVT_TIME @"EVT_TIME" ///> 事件发生的UTC毫秒时间戳 +#define EVT_UTC_TIME @"EVT_UTC_TIME" ///> 事件发生的UTC毫秒时间戳(兼容性) +#define EVT_BLOCK_DURATION @"EVT_BLOCK_DURATION" ///> 卡顿时间(毫秒) +#define EVT_PARAM1 @"EVT_PARAM1" ///> 事件参数1 +#define EVT_PARAM2 @"EVT_PARAM2" ///> 事件参数2 +#define EVT_GET_MSG @"EVT_GET_MSG" ///> 消息内容,收到PLAY_EVT_GET_MESSAGE事件时,通过该字段获取消息内容 +#define EVT_PLAY_PROGRESS @"EVT_PLAY_PROGRESS" ///> 点播:视频播放进度 +#define EVT_PLAY_DURATION @"EVT_PLAY_DURATION" ///> 点播:视频总时长 +#define EVT_PLAYABLE_DURATION @"PLAYABLE_DURATION" ///> 点播:视频可播放时长 +#define EVT_PLAY_COVER_URL @"EVT_PLAY_COVER_URL" ///> 点播:视频封面 +#define EVT_PLAY_URL @"EVT_PLAY_URL" ///> 点播:视频播放地址 +#define EVT_PLAY_NAME @"EVT_PLAY_NAME" ///> 点播:视频名称 +#define EVT_PLAY_DESCRIPTION @"EVT_PLAY_DESCRIPTION" ///> 点播:视频简介 + +#define STREAM_ID @"STREAM_ID" + +#endif diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXVideoCustomProcessDelegate.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXVideoCustomProcessDelegate.h new file mode 100644 index 0000000..98be4fa --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/TXVideoCustomProcessDelegate.h @@ -0,0 +1,51 @@ +// +// TXVideoCustomProcessDelegate.h +// TXRTMPSDK +// +// Created by annidyfeng on 2017/3/20. +// +// + +#import +#if TARGET_OS_IPHONE +#include +#include +#elif TARGET_OS_MAC +#import +#import +#endif + +@protocol TXVideoCustomProcessDelegate +@optional +#pragma mark - Pusher & UGC Record +/** + * 在OpenGL线程中回调,在这里可以进行采集图像的二次处理 + * @param texture 纹理ID + * @param width 纹理的宽度 + * @param height 纹理的高度 + * @return 返回给SDK的纹理 + * 说明:SDK回调出来的纹理类型是GL_TEXTURE_2D,接口返回给SDK的纹理类型也必须是GL_TEXTURE_2D; 该回调在SDK美颜之后. 纹理格式为GL_RGBA + */ +- (GLuint)onPreProcessTexture:(GLuint)texture width:(CGFloat)width height:(CGFloat)height; + +/** + * 在OpenGL线程中回调,可以在这里释放创建的OpenGL资源 + */ +- (void)onTextureDestoryed; + +/** + * 人脸数据回调(增值版且启用了pitu模块才有效) + * @prama points 人脸坐标 + * 说明:开启pitu模块必须是打开动效或大眼瘦脸。此回调在onPreProcessTexture:width:height:之前 + */ +- (void)onDetectFacePoints:(NSArray *)points; + +#pragma mark - Player +/** + * 视频渲染对象回调 + * @param pixelBuffer 渲染图像 + * @return 返回YES则SDK不再显示;返回NO则SDK渲染模块继续渲染 + * 说明:渲染图像的数据类型为config中设置的renderPixelFormatType + */ +- (BOOL)onPlayerPixelBuffer:(CVPixelBufferRef)pixelBuffer; +@end diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveCode.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveCode.h new file mode 100644 index 0000000..d6d529c --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveCode.h @@ -0,0 +1,103 @@ +// +// Copyright © 2020 Tencent. All rights reserved. +// +// Module: V2TXLive +// + +#import + +/// @defgroup V2TXLiveCode_ios V2TXLiveCode +/// Definitions of error codes and warning codes of Tencent Cloud LVB +/// @{ + +/** + * @brief Error codes and warning codes. + */ +typedef NS_ENUM(NSInteger, V2TXLiveCode) { + + /// No error. + V2TXLIVE_OK = 0, + + /// Unclassified error. + V2TXLIVE_ERROR_FAILED = -1, + + /// An invalid parameter was input during the API call. + V2TXLIVE_ERROR_INVALID_PARAMETER = -2, + + /// The API call was rejected. + V2TXLIVE_ERROR_REFUSED = -3, + + /// The API is currently not suppoted. + V2TXLIVE_ERROR_NOT_SUPPORTED = -4, + + /// Failed to call the API because the license was invalid. + V2TXLIVE_ERROR_INVALID_LICENSE = -5, + + /// The server request timed out. + V2TXLIVE_ERROR_REQUEST_TIMEOUT = -6, + + /// The server cannot process the request. + V2TXLIVE_ERROR_SERVER_PROCESS_FAILED = -7, + + /// Disconnect. + V2TXLIVE_ERROR_DISCONNECTED = -8, + + ///////////////////////////////////////////////////////////////////////////////// + // + // Network warning codes + // + ///////////////////////////////////////////////////////////////////////////////// + + /// Data upload was jammed because the upstream bandwidth was too low. + V2TXLIVE_WARNING_NETWORK_BUSY = 1101, + + /// Blocking occurred during video playback. + V2TXLIVE_WARNING_VIDEO_BLOCK = 2105, + + ///////////////////////////////////////////////////////////////////////////////// + // + // Camera-related warning codes + // + ///////////////////////////////////////////////////////////////////////////////// + + /// Failed to start the camera. + V2TXLIVE_WARNING_CAMERA_START_FAILED = -1301, + + /// The camera is being occupied. + V2TXLIVE_WARNING_CAMERA_OCCUPIED = -1316, + + /// The camera is not authorized. This warning usually occurs on mobile devices due to the camera permission is denied by the user. + V2TXLIVE_WARNING_CAMERA_NO_PERMISSION = -1314, + + ///////////////////////////////////////////////////////////////////////////////// + // + // Mic-related warning codes + // + ///////////////////////////////////////////////////////////////////////////////// + + /// Failed to enable the mic. + V2TXLIVE_WARNING_MICROPHONE_START_FAILED = -1302, + + /// The mic is being used. If a call is in progress on the mobile device, the mic cannot be enabled. + V2TXLIVE_WARNING_MICROPHONE_OCCUPIED = -1319, + + /// The mic is not authorized. This warning usually occurs on mobile devices due to the mic permission is denied by the user. + V2TXLIVE_WARNING_MICROPHONE_NO_PERMISSION = -1317, + + ///////////////////////////////////////////////////////////////////////////////// + // + // ScreenCapture-related warning codes + // + ///////////////////////////////////////////////////////////////////////////////// + + /// Screen capture is not supported in curent system. + V2TXLIVE_WARNING_SCREEN_CAPTURE_NOT_SUPPORTED = -1309, + + /// Failed to enable the screen capture. + V2TXLIVE_WARNING_SCREEN_CAPTURE_START_FAILED = -1308, + + /// Screen capture is interrupted by system. + V2TXLIVE_WARNING_SCREEN_CAPTURE_INTERRUPTED = -7001, + +}; +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveDef.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveDef.h new file mode 100644 index 0000000..98a7009 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLiveDef.h @@ -0,0 +1,591 @@ +// +// Copyright © 2020 Tencent. All rights reserved. +// +// Module: V2TXLive +// +/// @defgroup V2TXLiveDef_ios V2TXLiveDef +/// Key type definitions for Tencent Cloud LVB. +/// @{ +#import "V2TXLiveCode.h" + +#if TARGET_OS_IPHONE +#import +typedef UIView TXView; +typedef UIImage TXImage; +#elif TARGET_OS_MAC +#import +typedef NSView TXView; +typedef NSImage TXImage; +#endif + +/** + * @brief Supported protocol + */ +typedef NS_ENUM(NSUInteger, V2TXLiveMode) { + + /// RTMP protocol + V2TXLiveMode_RTMP, + + /// TRTC protocol + V2TXLiveMode_RTC + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// (1) Video type definitions +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Video type definitions +/// @{ + +/** + * @brief Video resolution. + */ +typedef NS_ENUM(NSInteger, V2TXLiveVideoResolution) { + + /// Resolution: 160×160. Bitrate range: 100 Kbps to 150 Kbps. Frame rate: 15 fps. + V2TXLiveVideoResolution160x160, + + /// Resolution: 270×270. Bitrate range: 200 Kbps to 300 Kbps. Frame rate: 15 fps. + V2TXLiveVideoResolution270x270, + + /// Resolution: 480×480. Bitrate range: 350 Kbps to 525 Kbps. Frame rate: 15 fps. + V2TXLiveVideoResolution480x480, + + /// Resolution: 320×240. Bitrate range: 250 Kbps to 375 Kbps. Frame rate: 15 fps. + V2TXLiveVideoResolution320x240, + + /// Resolution: 480×360. Bitrate range: 400 Kbps to 600 Kbps. Frame rate: 15 fps. + V2TXLiveVideoResolution480x360, + + /// Resolution: 640×480. Bitrate range: 600 Kbps to 900 Kbps. Frame rate: 15 fps. + V2TXLiveVideoResolution640x480, + + /// Resolution: 320×180. Bitrate range: 250 Kbps to 400 Kbps. Frame rate: 15 fps. + V2TXLiveVideoResolution320x180, + + /// Resolution: 480×270. Bitrate range: 350 Kbps to 550 Kbps. Frame rate: 15 fps. + V2TXLiveVideoResolution480x270, + + /// Resolution: 640×360. Bitrate range: 500 Kbps to 900 Kbps. Frame rate: 15 fps. + V2TXLiveVideoResolution640x360, + + /// Resolution: 960×540. Bitrate range: 800 Kbps to 1500 Kbps. Frame rate: 15 fps. + V2TXLiveVideoResolution960x540, + + /// Resolution: 1280×720. Bitrate range: 1000 Kbps to 1800 Kbps. Frame rate: 15 fps. + V2TXLiveVideoResolution1280x720, + + /// Resolution: 1920×1080. Bitrate range: 2500 Kbps to 3000 Kbps. Frame rate: 15 fps. + V2TXLiveVideoResolution1920x1080 + +}; + +/** + * @brief Video aspect ratio mode. + * + * @note + * - Landscape resolution: V2TXLiveVideoResolution640x360 + V2TXLiveVideoResolutionModeLandscape = 640 × 360 + * - Portrait resolution: V2TXLiveVideoResolution640x360 + V2TXLiveVideoResolutionModePortrait = 360 × 640 + */ +typedef NS_ENUM(NSInteger, V2TXLiveVideoResolutionMode) { + + /// Landscape resolution. + V2TXLiveVideoResolutionModeLandscape = 0, + + /// Portrait resolution. + V2TXLiveVideoResolutionModePortrait = 1, + +}; + +/** + * Video encoding parameters + * + * These settings determine the quality of image viewed by remote users + */ +@interface V2TXLiveVideoEncoderParam : NSObject + +///**Field description:** video resolution +/// **Recommended value** +/// - For desktop platforms (Windows and macOS), we recommend you select a resolution of 640x360 or above and select `Landscape` (landscape resolution) for `videoResolutionMode`. +///**Note:** to use a portrait resolution, please specify `videoResolutionMode` as `Portrait`; for example, when used together with `Portrait`, 640x360 represents 360x640. +@property(nonatomic, assign) V2TXLiveVideoResolution videoResolution; + +///**Field description:** resolution mode (landscape/portrait) +///**Recommended value:** for desktop platforms (Windows and macOS), `Landscape` is recommended. +///**Note:** to use a portrait resolution, please specify `videoResolutionMode` as `Portrait`; for example, when used together with `Portrait`, 640x360 represents 360x640. +@property(nonatomic, assign) V2TXLiveVideoResolutionMode videoResolutionMode; + +///**Field description:** video capturing frame rate +///**Recommended value:** 15 or 20 fps. If the frame rate is lower than 5 fps, there will be obvious lagging; if lower than 10 fps but higher than 5 fps, there will be slight lagging; if higher than 20 fps, the bandwidth will be wasted (the frame +/// rate of movies is generally 24 fps). +@property(nonatomic, assign) int videoFps; + +///**Field description:** target video bitrate. The SDK encodes streams at the target video bitrate and will actively reduce the bitrate only in weak network environments. +///**Recommended value:** please see the optimal bitrate for each specification in `V2TXLiveVideoResolution`. You can also slightly increase the optimal bitrate. +/// For example, `V2TXLiveVideoResolution1280x720` corresponds to the target bitrate of 1,200 Kbps. You can also set the bitrate to 1,500 Kbps for higher definition. +///**Note:** you can set the `videoBitrate` and `minVideoBitrate` parameters at the same time to restrict the SDK's adjustment range of the video bitrate: +/// - If you set `videoBitrate` and `minVideoBitrate` to the same value, it is equivalent to disabling the adaptive adjustment capability of the SDK for the video bitrate. +@property(nonatomic, assign) int videoBitrate; + +///**Field description:** minimum video bitrate. The SDK will reduce the bitrate to as low as the value specified by `minVideoBitrate` to ensure the smoothness only if the network conditions are poor. +///**Recommended value:** you can set the `videoBitrate` and `minVideoBitrate` parameters at the same time to restrict the SDK's adjustment range of the video bitrate: +/// - If you set `videoBitrate` and `minVideoBitrate` to the same value, it is equivalent to disabling the adaptive adjustment capability of the SDK for the video bitrate. +@property(nonatomic, assign) int minVideoBitrate; + +- (instancetype _Nonnull)initWith:(V2TXLiveVideoResolution)resolution; + +@end + +/** + * @brief Local camera mirror type. + */ +typedef NS_ENUM(NSInteger, V2TXLiveMirrorType) { + + /// Default mirror type. Images from the front camera are mirrored, and images from the rear camera are not mirrored. + V2TXLiveMirrorTypeAuto, + + /// Both the front and rear cameras are switched to the mirror mode. + V2TXLiveMirrorTypeEnable, + + /// Both the front and rear cameras are switched to the non-mirror mode. + V2TXLiveMirrorTypeDisable + +}; + +/** + * @brief Image fill mode. + */ +typedef NS_ENUM(NSInteger, V2TXLiveFillMode) { + + /// The entire screen is covered by the image, without black edges. If the aspect ratio of the image is different from that of the screen, part of the image will be cropped. + V2TXLiveFillModeFill, + + /// The image adapts to the screen and is not cropped. If the aspect ratio of the image is different from that of the screen, black edges will appear. + V2TXLiveFillModeFit + +}; + +/** + * @brief Clockwise rotation of the video image. + */ +typedef NS_ENUM(NSInteger, V2TXLiveRotation) { + + /// No rotation. + V2TXLiveRotation0, + + /// Rotate 90 degrees clockwise. + V2TXLiveRotation90, + + /// Rotate 180 degrees clockwise. + V2TXLiveRotation180, + + /// Rotate 270 degrees clockwise. + V2TXLiveRotation270 + +}; + +/** + * @brief Pixel format of video frames. + */ +typedef NS_ENUM(NSInteger, V2TXLivePixelFormat) { + + /// Unknown + V2TXLivePixelFormatUnknown, + + /// YUV420P I420 + V2TXLivePixelFormatI420, + + /// YUV420SP NV12 + V2TXLivePixelFormatNV12, + + /// BGRA8888 + V2TXLivePixelFormatBGRA32, + + /// Texture2D + V2TXLivePixelFormatTexture2D + +}; + +/** + * @brief Video data container format. + * + * @note In the custom capture and rendering features, you need to use the following enumerated values to specify the format for containing video data. + * - PixelBuffer: this is most efficient when used directly. The iOS system provides various APIs to obtain or process PixelBuffer. + * - NSData: when this is applied to custom rendering, PixelBuffer is copied once to NSData. When it is applied to custom capture, NSData is copied once to PixelBuffer. Therefore, the performance is affected to some extent. + */ +typedef NS_ENUM(NSInteger, V2TXLiveBufferType) { + + /// Unknown + V2TXLiveBufferTypeUnknown, + + /// This is most efficient when used directly. The iOS system provides various APIs to obtain or process PixelBuffer. + V2TXLiveBufferTypePixelBuffer, + + /// The performance is affected to some extent. As the SDK internally processes PixelBuffer directly, type switching between NSData and PixelBuffer results in memory copy overhead. + V2TXLiveBufferTypeNSData, + + /// Texture + V2TXLiveBufferTypeTexture + +}; + +/** + * @brief Video frame information. + * V2TXLiveVideoFrame describes the raw data of a video image frame, which can be the image before frame encoding or the image after frame decoding. + * @note Used during custom capture and rendering. During custom capture, you need to use V2TXLiveVideoFrame to contain the video frame to be sent. During custom rendering, the video frame contained by V2TXLiveVideoFrame will be returned. + */ +@interface V2TXLiveVideoFrame : NSObject + +/// **Field description:** Video pixel format. +/// **Recommended value:** V2TXLivePixelFormatNV12 +@property(nonatomic, assign) V2TXLivePixelFormat pixelFormat; + +/// **Field description:** Video data container format. +/// **Recommended value:** V2TXLiveBufferTypePixelBuffer +@property(nonatomic, assign) V2TXLiveBufferType bufferType; + +/// **Field description:** Video data when bufferType is V2TXLiveBufferTypeNSData. +@property(nonatomic, strong, nullable) NSData *data; + +/// **Field description:** Video data when bufferType is V2TXLiveBufferTypePixelBuffer. +@property(nonatomic, assign, nullable) CVPixelBufferRef pixelBuffer; + +/// **Field description:** Video width +@property(nonatomic, assign) NSUInteger width; + +/// **Field description:** Video height. +@property(nonatomic, assign) NSUInteger height; + +/// **Field description:** Clockwise rotation angle of video frames. +@property(nonatomic, assign) V2TXLiveRotation rotation; + +/// **Field description:** Texture ID +@property(nonatomic, assign) GLuint textureId; + +@end + +/// @} + +///////////////////////////////////////////////////////////////////////////////// +// +// (2) Audio type definitions +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name Audio type definitions +/// @{ + +/** + * @brief Audio quality. + */ +typedef NS_ENUM(NSInteger, V2TXLiveAudioQuality) { + + /// Audio: 16k sample rate, mono-channel, 16 Kbps audio raw bitrate. This quality is suitable for scenarios that mainly involve voice calls, such as online meetings and voice calls. + V2TXLiveAudioQualitySpeech, + + /// General: 48k sample rate, mono-channel, 50 Kbps audio raw bitrate. This quality is the default audio quality of the SDK. We recommend that you choose this option unless you have special requirements. + V2TXLiveAudioQualityDefault, + + /// Music: 48k sample rate, dual-channel + full-band, 128 Kbps audio raw bitrate. This quality is suitable for scenarios that require Hi-Fi music transmission, such as karaoke and music livestreams. + V2TXLiveAudioQualityMusic + +}; +/// @} + +/** + * @brief audio frame + */ +@interface V2TXLiveAudioFrame : NSObject + +/// **Field description:** audio data +@property(nonatomic, strong, nullable) NSData *data; + +/// **Field description:** audio sample rate +@property(nonatomic, assign) int sampleRate; + +/// **Field description:** number of sound channels +@property(nonatomic, assign) int channel; + +@end + +///////////////////////////////////////////////////////////////////////////////// +// +// (3) Definitions of statistical metrics for pushers and players +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name Definitions of statistical metrics for pushers and players +/// @{ + +/** + * @brief Pusher statistics. + */ +@interface V2TXLivePusherStatistics : NSObject + +/// **Field description:** CPU utilization of the current app (%) +@property(nonatomic, assign) NSUInteger appCpu; + +/// **Field description:** CPU utilization of the current system (%) +@property(nonatomic, assign) NSUInteger systemCpu; + +/// **Field description:** Video width +@property(nonatomic, assign) NSUInteger width; + +/// **Field description:** Video height +@property(nonatomic, assign) NSUInteger height; + +/// **Field description:** Frame rate (fps) +@property(nonatomic, assign) NSUInteger fps; + +/// **Field description:** Video bitrate (Kbps) +@property(nonatomic, assign) NSUInteger videoBitrate; + +/// **Field description:** Audio bitrate (Kbps) +@property(nonatomic, assign) NSUInteger audioBitrate; + +@end + +/** + * @brief Player statistics. + */ +@interface V2TXLivePlayerStatistics : NSObject + +/// **Field description:** CPU utilization of the current app (%) +@property(nonatomic, assign) NSUInteger appCpu; + +/// **Field description:** CPU utilization of the current system (%) +@property(nonatomic, assign) NSUInteger systemCpu; + +/// **Field description:** Video width +@property(nonatomic, assign) NSUInteger width; + +/// **Field description:** Video height +@property(nonatomic, assign) NSUInteger height; + +/// **Field description:** Frame rate (fps) +@property(nonatomic, assign) NSUInteger fps; + +/// **Field description:** Video bitrate (Kbps) +@property(nonatomic, assign) NSUInteger videoBitrate; + +/// **Field description:** Audio bitrate (Kbps) +@property(nonatomic, assign) NSUInteger audioBitrate; + +@end +/// @} + +///////////////////////////////////////////////////////////////////////////////// +// +// (4) Definitions of connection-status-related enumerated values +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name Definitions of connection-status-related enumerated values +/// @{ + +/** + * @brief Livestream connection status. + */ +typedef NS_ENUM(NSInteger, V2TXLivePushStatus) { + + /// Disconnected from the server + V2TXLivePushStatusDisconnected, + + /// Connecting to the server + V2TXLivePushStatusConnecting, + + /// Connected to the server successfully + V2TXLivePushStatusConnectSuccess, + + /// Reconnecting to the server + V2TXLivePushStatusReconnecting, + +}; + +/** + * @brief Playback mode. + */ +typedef NS_ENUM(NSInteger, V2TXAudioRoute) { + + /// Speaker + V2TXAudioModeSpeakerphone, + + /// Earpiece + V2TXAudioModeEarpiece, + +}; + +/** + * @brief Specify the type of streams to mix + */ +typedef NS_ENUM(NSInteger, V2TXLiveMixInputType) { + + /// Audio and video + V2TXLiveMixInputTypeAudioVideo, + + /// Video only + V2TXLiveMixInputTypePureVideo, + + /// Audio only + V2TXLiveMixInputTypePureAudio, + +}; + +/** + * @brief Position of each subimage in On-Cloud MixTranscoding + */ +@interface V2TXLiveMixStream : NSObject + +/// **Field description:** `userId` of users whose streams are mixed +@property(nonatomic, copy, nonnull) NSString *userId; + +/// **Field description:** push `streamId` of users whose streams are mixed. `nil` indicates the current push `streamId`. +@property(nonatomic, copy, nullable) NSString *streamId; + +/// **Field description:** x-axis (absolute pixels) of the image layer +@property(nonatomic, assign) NSInteger x; + +/// **Field description:** y-axis (absolute pixels) of the image layer +@property(nonatomic, assign) NSInteger y; + +/// **Field description:** width (absolute pixels) of the image layer +@property(nonatomic, assign) NSInteger width; + +/// **Field description:** height (absolute pixels) of the image layer +@property(nonatomic, assign) NSInteger height; + +/// **Field description:** layer number (1-15), which must be unique +@property(nonatomic, assign) NSUInteger zOrder; + +/// **Field description:** input type of the live stream +@property(nonatomic, assign) V2TXLiveMixInputType inputType; + +@end + +/** + * @brief Configure On-Cloud MixTranscoding + */ +@interface V2TXLiveTranscodingConfig : NSObject + +/// **Field description:** width of transcoded video +/// **Recommended value:** 360 px. If audio-only streams are mixed, the mixing result will carry a video stream that shows a canvas background. To avoid this, set both the width and height to 0 px. +@property(nonatomic, assign) NSUInteger videoWidth; + +/// **Field description:** height of transcoded video +/// **Recommended value:** 640 px. If audio-only streams are mixed, the mixing result will carry a video stream that shows a canvas background. To avoid this, set both the width and height to 0 px. +@property(nonatomic, assign) NSUInteger videoHeight; + +/// **Field description:** bitrate (Kbps) for the resolution of the transcoded video +/// **Recommended value:** if you set it to 0, the backend will calculate a bitrate based on `videoWidth` and `videoHeight`. You can also refer to the remarks for the enumerated value `V2TXLiveVideoResolution`. +@property(nonatomic, assign) NSUInteger videoBitrate; + +/// **Field description:** frame rate (fps) for the resolution of the transcoded video +/// **Value range:** (0,30]; default: 15 +@property(nonatomic, assign) NSUInteger videoFramerate; + +/// **Field description:** keyframe interval (GOP) for the resolution of the transcoded video +/// **Value range:** [1,8]; default value: 2 (sec) +@property(nonatomic, assign) NSUInteger videoGOP; + +/// **Field description:** background color of the mixed video image. The default color is black, and the value is a hex number. For example: "0x61B9F1" represents the RGB color (97,158,241). +/// **Default value:** 0x000000 (black) +@property(nonatomic, assign) NSUInteger backgroundColor; + +/// **Field description:** background image of the mixed video +/// **Default value:** `nil`, which means that no background image is set +/// **Note:** you need to first upload the image in **Application Management** > **Function Configuration** > **Material Management** in the [console](https://console.cloud.tencent.com/trtc). +/// You will get an image ID for the image uploaded, which you need to convert to a string and use it as the value of `backgroundImage`. +/// For example, if the image ID is 63, you should set `backgroundImage` to `63`. +@property(nonatomic, copy, nullable) NSString *backgroundImage; + +/// **Field description:** audio sample rate of the transcoded stream +/// **Valid values:** 12000 Hz, 16000 Hz, 22050 Hz, 24000 Hz, 32000 Hz, 44100 Hz, 48000 Hz (default) +@property(nonatomic, assign) NSUInteger audioSampleRate; + +/// **Field description:** audio bitrate of the transcoded stream +/// **Value range:** [32,192]; default value: 64 (Kbps) +@property(nonatomic, assign) NSUInteger audioBitrate; + +/// **Field description:** number of sound channels of the transcoded stream +/// **Valid values:** 1 (default), 2 +@property(nonatomic, assign) NSUInteger audioChannels; + +/// **Field description:** position of each channel of subimage +@property(nonatomic, copy, nonnull) NSArray *mixStreams; + +/// **Field description:** ID of the live stream pushed to CDN +/// If you do not set this parameter, the SDK will execute the default logic, that is, it will mix multiple streams in the room into the video stream of the API caller, i.e., A + B => A. +/// If you set this parameter, the SDK will mix multiple streams in the room into the live stream whose ID you have specified, i.e., A + B => C. +/// **Default value**: `nil`, which indicates that multiple streams in the room are mixed into the video stream of the API caller +@property(nonatomic, copy, nullable) NSString *outputStreamId; + +@end +/// @} + +///////////////////////////////////////////////////////////////////////////////// +// +// (5) Definitions of common configuration components +// +///////////////////////////////////////////////////////////////////////////////// + +/// @name Definitions of common configuration components +/// @{ + +/** + * @brief Log level + */ +typedef NS_ENUM(NSInteger, V2TXLiveLogLevel) { + + /// Output all levels of log + V2TXLiveLogLevelAll = 0, + + /// Output DEBUG, INFO, WARNING, ERROR and FATAL level log + V2TXLiveLogLevelDebug = 1, + + /// Output INFO, WARNING, ERROR and FATAL level log + V2TXLiveLogLevelInfo = 2, + + /// Output WARNING, ERROR and FATAL level log + V2TXLiveLogLevelWarning = 3, + + /// Output ERROR and FATAL level log + V2TXLiveLogLevelError = 4, + + /// Only output FATAL level log + V2TXLiveLogLevelFatal = 5, + + /// Does not output any sdk log + V2TXLiveLogLevelNULL = 6, + +}; + +@interface V2TXLiveLogConfig : NSObject + +/// **Field description:** Set Log level +/// **Recommended value:** Default value: V2TXLiveLogLevelAll +@property(nonatomic, assign) V2TXLiveLogLevel logLevel; + +/// **Field description:** Whether to receive the log information to be printed through V2TXLivePremierObserver +/// **Special Instructions:** If you want to implement Log writing by yourself, you can turn on this switch, Log information will be called back to you V2TXLivePremierObserver#onLog. +/// **Recommended value:** Default value: NO +@property(nonatomic, assign) BOOL enableObserver; + +/// **Field description:** Whether to allow the SDK to print Log on the console of the editor (XCoder, Android Studio, Visual Studio, etc.) +/// **Recommended value:** Default value: NO +@property(nonatomic, assign) BOOL enableConsole; + +/// **Field description:** Whether to enable local log file +/// **Special Instructions:** If not for special needs, please do not close the local log file, otherwise the Tencent Cloud technical team will not be able to track and locate problems when they occur. +/// **Recommended value:** Default value: YES +@property(nonatomic, assign) BOOL enableLogFile; + +/// **Field description:** Set the storage directory of the local log, default Log storage location: +/// iOS & Mac: sandbox Documents/log +@property(nonatomic, copy, nullable) NSString *logPath; + +@end +/// @} + +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayer.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayer.h new file mode 100644 index 0000000..cc9340d --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayer.h @@ -0,0 +1,224 @@ +// +// Copyright © 2020 Tencent. All rights reserved. +// +// Module: V2TXLive +// + +#import "V2TXLivePlayerObserver.h" + +/// @defgroup V2TXLivePlayer_ios V2TXLivePlayer +/// Tencent Cloud live player.
+/// This player pulls audio and video data from the specified livestreaming URL and plays the data after decoding and local rendering. +/// +/// The player has the following capabilities: +/// - Supports RTMP, HTTP-FLV, TRTC and WebRTC. +/// - View capturing, which allows you to capture the video images of the current livestream. +/// - Delay adjustment, which allows you to set the minimum time and maximum time for auto adjustment of the player cache. +/// - Custom video data processing, which allows you to perform rendering and play video data after processing video data in the livestream based on the project requirements. +/// +/// @{ + +@protocol V2TXLivePlayer + +///////////////////////////////////////////////////////////////////////////////// +// +// V2TXLivePlayer Interface +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * Sets the player callback. + * + * By setting the callback, you can listen to some callback events of V2TXLivePlayer, + * including the player status, playback volume callback, first frame audio/video callback, statistics, warnings, and error messages. + * + * @param observer Callback target of the player. For more information, see {@link V2TXLivePlayerObserver}. + */ +- (void)setObserver:(id)observer; + +/** + * Sets the rendering view of the player. This control is responsible for presenting the video content. + * + * @param view Player rendering view. + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + */ +- (V2TXLiveCode)setRenderView:(TXView *)view; + +/** + * Sets the rotation angle of the player view. + * + * @param rotation Rotation angle of the view {@link V2TXLiveRotation} + * - V2TXLiveRotation0 **[Default]**: 0 degrees, which means the view is not rotated. + * - V2TXLiveRotation90: rotate 90 degrees clockwise. + * - V2TXLiveRotation180: rotate 180 degrees clockwise. + * - V2TXLiveRotation270: rotate 270 degrees clockwise. + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + */ +- (V2TXLiveCode)setRenderRotation:(V2TXLiveRotation)rotation; + +/** + * Sets the fill mode of the view. + * + * @param mode Fill mode of the view {@link V2TXLiveFillMode}。 + * - V2TXLiveFillModeFill: **[Default]**: fill the screen with the image without leaving any black edges. If the aspect ratio of the view is different from that of the screen, part of the view will be cropped. + * - V2TXLiveFillModeFit make the view fit the screen without cropping. If the aspect ratio of the view is different from that of the screen, black edges will appear. + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + */ +- (V2TXLiveCode)setRenderFillMode:(V2TXLiveFillMode)mode; + +/** + * Starts playing the audio and video streams. + * + * @param url URL of the audio and video streams to be played. The RTMP, HTTP-FLV and TRTC streaming protocols are supported. + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: operation succeeded. The player starts connecting to the URL and playing the audio and video streams. + * - V2TXLIVE_ERROR_INVALID_PARAMETER: operation failed. The URL is invalid. + * - V2TXLIVE_ERROR_REFUSED: operation failed. Duplicate streamId, please ensure that no other player or pusher is using this streamId now. + */ +- (V2TXLiveCode)startPlay:(NSString *)url; + +/** + * Stops playing the audio and video streams. + * + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + */ +- (V2TXLiveCode)stopPlay; + +/** + * Indicates whether the player is playing the audio and video streams. + * + * @return Indicates whether the player is playing the audio and video streams. + * - 1: yes + * - 0: no + */ +- (int)isPlaying; + +/** + * Pauses the audio stream of the player. + * + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + */ +- (V2TXLiveCode)pauseAudio; + +/** + * Resumes the audio stream of the player. + * + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + */ +- (V2TXLiveCode)resumeAudio; + +/** + * Pauses the video stream of the player. + * + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + */ +- (V2TXLiveCode)pauseVideo; + +/** + * Resumes the video stream of the player. + * + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + */ +- (V2TXLiveCode)resumeVideo; + +/** + * Sets the volume. + * + * @param volume Volume. Valid range: 0 - 100. **[Default]**: 100 + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + */ +- (V2TXLiveCode)setPlayoutVolume:(NSUInteger)volume; + +/** + * Set the minimum time and maximum time (unit: s) for auto adjustment of the player cache. + * + * @param minTime Minimum time for auto cache adjustment. The value must be greater than 0. **[Default]**: 1 + * @param maxTime Maximum time for auto cache adjustment. The value must be greater than 0. **[Default]**: 5 + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + * - V2TXLIVE_ERROR_INVALID_PARAMETER: operation failed. MinTime and maxTime must be greater than 0. + * - V2TXLIVE_ERROR_REFUSED: operation failed. Change of cache is not suppoted when playing. + */ +- (V2TXLiveCode)setCacheParams:(CGFloat)minTime maxTime:(CGFloat)maxTime; + +/** + * Enables playback volume update. + * + * After this feature is enabled, you can obtain the SDK’s volume evaluation through the [onPlayoutVolumeUpdate](@ref V2TXLivePlayerObserver#onPlayoutVolumeUpdate:volume:) callback. + * + * @param intervalMs Interval for triggering the volume callback. The unit is ms. The minimum interval is 100 ms. If the value is equal to or smaller than 0, the callback is disabled. We recommend that you set this parameter to 300 ms. **[Default]**: + * 0. + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + */ +- (V2TXLiveCode)enableVolumeEvaluation:(NSUInteger)intervalMs; + +/** + * Captures the video view in the playback process. + * + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + * - V2TXLIVE_ERROR_REFUSED: playback is stopped, the snapshot operation cannot be called. + */ +- (V2TXLiveCode)snapshot; + +/** + * Turn on/off the monitoring callback of the video frame. + * + * The SDK will no longer render the video after you turn on this switch. You can get the video frame through V2TXLivePlayerObserver and execute custom rendering logic. + * + * @param enable Whether to enable custom rendering. **[Default]**: NO + * @param pixelFormat Video pixel format for custom rendering callback {@link V2TXLivePixelFormat}。 + * @param bufferType Video data format for custom rendering callback {@link V2TXLiveBufferType}。 + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + * - V2TXLIVE_ERROR_NOT_SUPPORTED: the pixel format or data format is not supported. + */ +- (V2TXLiveCode)enableObserveVideoFrame:(BOOL)enable pixelFormat:(V2TXLivePixelFormat)pixelFormat bufferType:(V2TXLiveBufferType)bufferType; + +/** + * Enables the receiving of SEI messages. + * + * @param enable `YES`: enable; `NO` (**default**): disable + * @param payloadType The payload type of SEI messages. Valid values: `5`, `242`, please be consistent with the payload type of the sender + * + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + */ +- (V2TXLiveCode)enableReceiveSeiMessage:(BOOL)enable payloadType:(int)payloadType; + +/** + * Indicates whether the debug view of the player video status information is displayed. + * + * @param isShow Specifies whether to display the debug view. **[Default]**: NO. + */ +- (void)showDebugView:(BOOL)isShow; + +/** + * Calls the advanced API of V2TXLivePlayer. + * + * @note This API is used to call some advanced features. + * @param key Key of the advanced API. + * @param value Parameter needed to call the advanced API corresponding to the key. + * @return Return code {@link V2TXLiveCode} + * - V2TXLIVE_OK: successful + * - V2TXLIVE_ERROR_INVALID_PARAMETER: operation failed. The key cannot be nil. + */ +- (V2TXLiveCode)setProperty:(NSString *)key value:(NSObject *)value; + +@end + +/// @} + +@interface V2TXLivePlayer : NSObject + +@end diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayerObserver.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayerObserver.h new file mode 100644 index 0000000..41766db --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePlayerObserver.h @@ -0,0 +1,145 @@ +// +// Copyright © 2020 Tencent. All rights reserved. +// +// Module: V2TXLive +// + +#import "V2TXLiveDef.h" + +@protocol V2TXLivePlayer; + +/// @defgroup V2TXLivePlayerObserver_ios V2TXLivePlayerObserver +/// Tencent Cloud live player callback notification
+/// Some V2TXLivePlayer callback notifications can be received, including the player status, playback volume callback, audio/video first-frame callback, statistics, warning, and error messages. +/// @{ + +@protocol V2TXLivePlayerObserver + +@optional + +///////////////////////////////////////////////////////////////////////////////// +// +// Live Player Event Callback +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * live player error notification, which is called back when the player encounters an error + * + * @param player Player object that calls back this notification + * @param code Error code {@link V2TXLiveCode} + * @param msg Error message + * @param extraInfo Extended information + */ +- (void)onError:(id)player code:(V2TXLiveCode)code message:(NSString *)msg extraInfo:(NSDictionary *)extraInfo; + +/** + * live player warning notification + * + * @param player Player object that calls back this notification + * @param code Warning code {@link V2TXLiveCode} + * @param msg Warning message + * @param extraInfo Extended information + */ +- (void)onWarning:(id)player code:(V2TXLiveCode)code message:(NSString *)msg extraInfo:(NSDictionary *)extraInfo; + +/** + * live player resolution change notification + * + * @param player Player object that calls back this notification + * @param width Video width + * @param height Video height + */ +- (void)onVideoResolutionChanged:(id)player width:(NSInteger)width height:(NSInteger)height; + +/** + * live player has successfully connected to the server notification + * + * @param player Player object that calls back this notification + * @param extraInfo Extended information + */ +- (void)onConnected:(id)player extraInfo:(NSDictionary *)extraInfo; + +/** + * Video playback event + * + * @param player Player object that calls back this notification + * @param firstPlay Play for the first time + * @param extraInfo Extended information + */ +- (void)onVideoPlaying:(id)player firstPlay:(BOOL)firstPlay extraInfo:(NSDictionary *)extraInfo; + +/** + * Audio playback event + * + * @param player Player object that calls back this notification + * @param firstPlay Play for the first time + * @param extraInfo Extended information + */ +- (void)onAudioPlaying:(id)player firstPlay:(BOOL)firstPlay extraInfo:(NSDictionary *)extraInfo; + +/** + * Video loading event + * + * @param player Player object that calls back this notification + * @param extraInfo Extended information + */ +- (void)onVideoLoading:(id)player extraInfo:(NSDictionary *)extraInfo; + +/** + * Audio loading event + * + * @param player Player object that calls back this notification + * @param extraInfo Extended information + */ +- (void)onAudioLoading:(id)player extraInfo:(NSDictionary *)extraInfo; + +/** + * Player playback volume callback. + * + * @note This callback notification is received after [enableVolumeEvaluation](@ref V2TXLivePlayer#enableVolumeEvaluation:) is called to enable playback volume display. + * @param player Player object that calls back this notification + * @param volume Current playback volume + */ +- (void)onPlayoutVolumeUpdate:(id)player volume:(NSInteger)volume; + +/** + * Live player statistics callback. + * + * @param player Player object that calls back this notification + * @param statistics Player statistics {@link V2TXLivePlayerStatistics} + */ +- (void)onStatisticsUpdate:(id)player statistics:(V2TXLivePlayerStatistics *)statistics; + +/** + * Screenshot callback + * + * @note This callback notification is received after [snapshot](@ref V2TXLivePlayer#snapshot) is called to snapshot. + * @param player Player object that calls back this notification + * @param image Captured video image + */ +- (void)onSnapshotComplete:(id)player image:(TXImage *)image; + +/** + * Custom video rendering callback + * + * @note Need you call [enableObserveVideoFrame](@ref V2TXLivePlayer#enableObserveVideoFrame:pixelFormat:bufferType:) to turn on the callback switch. + * @param player Player object that calls back this notification + * @param videoFrame Video frame data {@link V2TXLiveVideoFrame} + */ +- (void)onRenderVideoFrame:(id)player frame:(V2TXLiveVideoFrame *)videoFrame; + +/** + * Callback of receiving an SEI message. The sender calls `sendSeiMessage` in {@link V2TXLivePusher} to send an SEI + * message. + * + * @note You will receive this callback after calling `enableReceiveSeiMessage` in {@link V2TXLivePlayer} to enable the receiving of SEI + * + * @param player Player object that calls back this notification + * @param payloadType The payload type of the received SEI message + * @param data sei message data + */ +- (void)onReceiveSeiMessage:(id)player payloadType:(int)payloadType data:(NSData *)data; + +@end +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePremier.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePremier.h new file mode 100644 index 0000000..c2d23eb --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/V2TXLivePremier.h @@ -0,0 +1,101 @@ +// +// Copyright © 2020 Tencent. All rights reserved. +// +// Module: V2TXLive +// +#import "V2TXLiveDef.h" + +NS_ASSUME_NONNULL_BEGIN + +/// @defgroup V2TXLivePremier_ios V2TXLivePremier +/// +/// @{ + +///////////////////////////////////////////////////////////////////////////////// +// +// V2TXLive High-level interface +// +///////////////////////////////////////////////////////////////////////////////// + +@protocol V2TXLivePremierObserver; +@protocol V2TXLivePremier + +/** + * Get the SDK version number + */ ++ (NSString *)getSDKVersionStr; + +/** + * Set V2TXLivePremier callback interface + */ ++ (void)setObserver:(id)observer; + +/** + * Set Log configuration information + */ ++ (V2TXLiveCode)setLogConfig:(V2TXLiveLogConfig *)config; + +/** + * Set up SDK access environment + * + * @note If your application has no special requirements, please do not call this interface for setting. + * @param env currently supports two parameters "default" and "GDPR" + * - default: In the default environment, the SDK will find the best access point in the world for access. + * - GDPR: All audio and video data and quality statistics will not pass through servers in mainland China. + */ ++ (V2TXLiveCode)setEnvironment:(const char *)env; + +/** + * Set SDK authorization license + * + * Try and Purchase a License: https://intl.cloud.tencent.com/document/product/1071/38546 + * @param url the url of licence + * @param key the key of licence + */ +#if TARGET_OS_IPHONE ++ (void)setLicence:(NSString *)url key:(NSString *)key; +#endif + +/** + * Set SDK sock5 proxy config + * + * @param host sock5 sock5 proxy host + * @param port sock5 sock5 proxy port + * @param username sock5 sock5 proxy username + * @param password sock5 sock5 proxy password + */ ++ (V2TXLiveCode)setSocks5Proxy:(NSString *)host port:(NSInteger)port username:(NSString *)username password:(NSString *)password; + +@end + +///////////////////////////////////////////////////////////////////////////////// +// +// V2TXLive Advanced callback interface +// +///////////////////////////////////////////////////////////////////////////////// + +@protocol V2TXLivePremierObserver +@optional + +/** + * Custom Log output callback interface + */ +- (void)onLog:(V2TXLiveLogLevel)level log:(NSString *)log; + +/** + * setLicence result callback interface + * + * @param result the result of setLicence interface, 0 succeeds, negative number fails + * @param reason the reason for failure + */ +- (void)onLicenceLoaded:(int)result Reason:(NSString *)reason; + +@end + +@interface V2TXLivePremier : NSObject + +@end + +NS_ASSUME_NONNULL_END + +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCCloud.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCCloud.h new file mode 100644 index 0000000..ec38db7 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCCloud.h @@ -0,0 +1,1503 @@ +/** + * Module: TRTCCloud @ TXLiteAVSDK + * Function: TRTC's main feature API + * Version: <:Version:> + */ +#ifndef __ITRTCCLOUD_H__ +#define __ITRTCCLOUD_H__ +#include "TRTCCloudCallback.h" +#include "TRTCTypeDef.h" +#include "ITXAudioEffectManager.h" +#include "ITXDeviceManager.h" +#ifdef _WIN32 +#include "IDeprecatedTRTCCloud.h" +#include "TXLiteAVBase.h" +#endif + +/// @defgroup TRTCCloud_cplusplus TRTCCloud +/// Tencent Cloud TRTC Core Function Interface +/// @{ +namespace liteav { +class ITRTCCloud; +} + +/// Export the following C-style interface to facilitate “LoadLibrary()” +/// You can use the following methods to create and destroy TRTCCloud instance: +///
+///    ITRTCCloud *trtcCloud = getTRTCShareInstance();
+///    if(trtcCloud) {
+///        std::string version(trtcCloud->getSDKVersion());
+///    }
+///    //
+///    //
+///    destroyTRTCShareInstance();
+///    trtcCloud = nullptr;
+/// 
+/// +extern "C" { +/// @name Exported C function +/// @{ +#ifdef __ANDROID__ +TRTC_API liteav::ITRTCCloud* getTRTCShareInstance(void* context); +#else +TRTC_API liteav::ITRTCCloud* getTRTCShareInstance(); +#endif +TRTC_API void destroyTRTCShareInstance(); +/// @} +} +namespace liteav { + +class ITRTCCloud +#ifdef _WIN32 + : public IDeprecatedTRTCCloud +#endif // _WIN32 +{ + protected: + virtual ~ITRTCCloud(){}; + + public: +///////////////////////////////////////////////////////////////////////////////// +// +// Create Instance And Event Callback +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Create Instance And Event Callback +/// @{ + +/** + * 1.1 Create `TRTCCloud` instance (singleton mode) + * + * @param context It is only applicable to the Android platform. The SDK internally converts it into the `ApplicationContext` of Android to call the Android system API. + * @note + * 1. If you use `delete ITRTCCloud*`, a compilation error will occur. Please use `destroyTRTCCloud` to release the object pointer. + * 2. On Windows, macOS, or iOS, please call the `getTRTCShareInstance()` API. + * 3. On Android, please call the `getTRTCShareInstance(void *context)` API. + */ +#ifdef __ANDROID__ + TRTC_API static liteav::ITRTCCloud* getTRTCShareInstance(void* context); +#else + TRTC_API static liteav::ITRTCCloud* getTRTCShareInstance(); +#endif + + /** + * 1.2 Terminate `TRTCCloud` instance (singleton mode) + */ + TRTC_API static void destroyTRTCShareInstance(); + + /** + * 1.3 Set TRTC event callback + * + * You can use {@link TRTCCloudDelegate} to get various event notifications from the SDK, such as error codes, warning codes, and audio/video status parameters. + */ + virtual void addCallback(ITRTCCloudCallback* callback) = 0; + + /** + * 1.4 Remove TRTC event callback + * + * @param callback + */ + virtual void removeCallback(ITRTCCloudCallback* callback) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Room APIs + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Room APIs + /// @{ + + /** + * 2.1 Enter room + * + * All TRTC users need to enter a room before they can "publish" or "subscribe to" audio/video streams. "Publishing" refers to pushing their own streams to the cloud, and "subscribing to" refers to pulling the streams of other users in the room + * from the cloud. When calling this API, you need to specify your application scenario ({@link TRTCAppScene}) to get the best audio/video transfer experience. We provide the following four scenarios for your choice: + * - {@link TRTCAppSceneVideoCall}: + * Video call scenario. Use cases: [one-to-one video call], [video conferencing with up to 300 participants], [online medical diagnosis], [small class], [video interview], etc. + * In this scenario, each room supports up to 300 concurrent online users, and up to 50 of them can speak simultaneously. + * - {@link TRTCAppSceneAudioCall}: + * Audio call scenario. Use cases: [one-to-one audio call], [audio conferencing with up to 300 participants], [audio chat], [online Werewolf], etc. + * In this scenario, each room supports up to 300 concurrent online users, and up to 50 of them can speak simultaneously. + * - {@link TRTCAppSceneLIVE}: + * Live streaming scenario. Use cases: [low-latency video live streaming], [interactive classroom for up to 100,000 participants], [live video competition], [video dating room], [remote training], [large-scale conferencing], etc. + * In this scenario, each room supports up to 100,000 concurrent online users, but you should specify the user roles: anchor ({@link TRTCRoleAnchor }) or audience ({@link TRTCRoleAudience }). + * - {@link TRTCAppSceneVoiceChatRoom}: + * Audio chat room scenario. Use cases: [Clubhouse], [online karaoke room], [music live room], [FM radio], etc. + * In this scenario, each room supports up to 100,000 concurrent online users, but you should specify the user roles: anchor ({@link TRTCRoleAnchor }) or audience ({@link TRTCRoleAudience }). + * After calling this API, you will receive the `onEnterRoom(result)` callback from {@link TRTCCloudDelegate}: + * - If room entry succeeded, the `result` parameter will be a positive number (`result` > 0), indicating the time in milliseconds (ms) between function call and room entry. + * - If room entry failed, the `result` parameter will be a negative number (`result` < 0), indicating the [error code](https://cloud.tencent.com/document/product/647/32257) for room entry failure. + * @param param Room entry parameter, which is used to specify the user's identity, role, authentication credentials, and other information. For more information, please see {@link TRTCParams}. + * @param scene Application scenario, which is used to specify the use case. The same {@link TRTCAppScene} should be configured for all users in the same room. + * @note + * 1. If `scene` is specified as {@link TRTCAppSceneLIVE} or {@link TRTCAppSceneVoiceChatRoom}, you must use the `role` field in {@link TRTCParams} to specify the role of the current user in the room. + * 2. The same `scene` should be configured for all users in the same room. + * 3. Please try to ensure that {@link enterRoom} and {@link exitRoom} are used in pair; that is, please make sure that "the previous room is exited before the next room is entered"; otherwise, many issues may occur. + */ + virtual void enterRoom(const TRTCParams& param, TRTCAppScene scene) = 0; + + /** + * 2.2 Exit room + * + * Calling this API will allow the user to leave the current audio or video room and release the camera, mic, speaker, and other device resources. + * After resources are released, the SDK will use the `onExitRoom()` callback in {@link TRTCCloudDelegate} to notify you. + * If you need to call {@link enterRoom} again or switch to the SDK of another provider, we recommend you wait until you receive the `onExitRoom()` callback, so as to avoid the problem of the camera or mic being occupied. + */ + virtual void exitRoom() = 0; + + /** + * 2.3 Switch role + * + * This API is used to switch the user role between "anchor" and "audience". + * As video live rooms and audio chat rooms need to support an audience of up to 100,000 concurrent online users, the rule "only anchors can publish their audio/video streams" has been set. Therefore, when some users want to publish their streams + * (so that they can interact with anchors), they need to switch their role to "anchor" first. You can use the `role` field in {@link TRTCParams} during room entry to specify the user role in advance or use the `switchRole` API to switch roles + * after room entry. + * @param role Role, which is "anchor" by default: + * - {@link TRTCRoleAnchor}: anchor, who can publish their audio/video streams. Up to 50 anchors are allowed to publish streams at the same time in one room. + * - {@link TRTCRoleAudience}: audience, who cannot publish their audio/video streams, but can only watch streams of anchors in the room. If they want to publish their streams, they need to switch to the "anchor" role first through {@link + * switchRole}. One room supports an audience of up to 100,000 concurrent online users. + * @note + * 1. This API is only applicable to two scenarios: live streaming ({@link TRTCAppSceneLIVE}) and audio chat room ({@link TRTCAppSceneVoiceChatRoom}). + * 2. If the `scene` you specify in {@link enterRoom} is {@link TRTCAppSceneVideoCall} or {@link TRTCAppSceneAudioCall}, please do not call this API. + */ + virtual void switchRole(TRTCRoleType role) = 0; + + /** + * 2.4 Switch room + * + * This API is used to quickly switch a user from one room to another. + * - If the user's role is "audience", calling this API is equivalent to `exitRoom` (current room) + `enterRoom` (new room). + * - If the user's role is "anchor", the API will retain the current audio/video publishing status while switching the room; therefore, during the room switch, camera preview and sound capturing will not be interrupted. + * This API is suitable for the online education scenario where the supervising teacher can perform fast room switch across multiple rooms. In this scenario, using `switchRoom` can get better smoothness and use less code than `exitRoom + + * enterRoom`. The API call result will be called back through `onSwitchRoom(errCode, errMsg)` in {@link TRTCCloudDelegate}. + * @param config Room parameter. For more information, please see {@link TRTCSwitchRoomConfig}. + * @note Due to the requirement for compatibility with legacy versions of the SDK, the `config` parameter contains both `roomId` and `strRoomId` parameters. You should pay special attention as detailed below when specifying these two parameters: + * 1. If you decide to use `strRoomId`, then set `roomId` to 0. If both are specified, `roomId` will be used. + * 2. All rooms need to use either `strRoomId` or `roomId` at the same time. They cannot be mixed; otherwise, there will be many unexpected bugs. + */ + virtual void switchRoom(const TRTCSwitchRoomConfig& config) = 0; + + /** + * 2.5 Request cross-room call + * + * By default, only users in the same room can make audio/video calls with each other, and the audio/video streams in different rooms are isolated from each other. + * However, you can publish the audio/video streams of an anchor in another room to the current room by calling this API. At the same time, this API will also publish the local audio/video streams to the target anchor's room. + * In other words, you can use this API to share the audio/video streams of two anchors in two different rooms, so that the audience in each room can watch the streams of these two anchors. This feature can be used to implement anchor + * competition. The result of requesting cross-room call will be returned through the `onConnectOtherRoom()` callback in {@link TRTCCloudDelegate}. For example, after anchor A in room "101" uses `connectOtherRoom()` to successfully call anchor B + * in room "102": + * - All users in room "101" will receive the `onRemoteUserEnterRoom(B)` and `onUserVideoAvailable(B,true)` event callbacks of anchor B; that is, all users in room "101" can subscribe to the audio/video streams of anchor B. + * - All users in room "102" will receive the `onRemoteUserEnterRoom(A)` and `onUserVideoAvailable(A,true)` event callbacks of anchor A; that is, all users in room "102" can subscribe to the audio/video streams of anchor A. + *
+     *                                   Room 101                          Room 102
+     *                             ---------------------               ---------------------
+     *  Before cross-room call:   | Anchor:     A       |             | Anchor:     B       |
+     *                            | Users :   U, V, W   |             | Users:   X, Y, Z    |
+     *                             ---------------------               ---------------------
+     *
+     *                                   Room 101                           Room 102
+     *                             ---------------------               ---------------------
+     *  After cross-room call:    | Anchors: A and B    |             | Anchors: B and A    |
+     *                            | Users  : U, V, W    |             | Users  : X, Y, Z    |
+     *                             ---------------------               ---------------------
+     * 
+ * For compatibility with subsequent extended fields for cross-room call, parameters in JSON format are used currently. + * Case 1: numeric room ID + * If anchor A in room "101" wants to co-anchor with anchor B in room "102", then anchor A needs to pass in {"roomId": 102, "userId": "userB"} when calling this API. + * Below is the sample code: + *
+     *   Json::Value jsonObj;
+     *   jsonObj["roomId"] = 102;
+     *   jsonObj["userId"] = "userB";
+     *   Json::FastWriter writer;
+     *   std::string params = writer.write(jsonObj);
+     *   trtc.ConnectOtherRoom(params.c_str());
+     * 
+ * + * Case 2: string room ID + * If you use a string room ID, please be sure to replace the `roomId` in JSON with `strRoomId`, such as {"strRoomId": "102", "userId": "userB"} + * Below is the sample code: + *
+     *   Json::Value jsonObj;
+     *   jsonObj["strRoomId"] = "102";
+     *   jsonObj["userId"] = "userB";
+     *   Json::FastWriter writer;
+     *   std::string params = writer.write(jsonObj);
+     *   trtc.ConnectOtherRoom(params.c_str());
+     * 
+ * + * @param param You need to pass in a string parameter in JSON format: `roomId` represents the room ID in numeric format, `strRoomId` represents the room ID in string format, and `userId` represents the user ID of the target anchor. + */ + virtual void connectOtherRoom(const char* param) = 0; + + /** + * 2.6 Exit cross-room call + * + * The result will be returned through the `onDisconnectOtherRoom()` callback in {@link TRTCCloudDelegate}. + */ + virtual void disconnectOtherRoom() = 0; + + /** + * 2.7 Set subscription mode (which must be set before room entry for it to take effect) + * + * You can switch between the "automatic subscription" and "manual subscription" modes through this API: + * - Automatic subscription: this is the default mode, where the user will immediately receive the audio/video streams in the room after room entry, so that the audio will be automatically played back, and the video will be automatically decoded + * (you still need to bind the rendering control through the `startRemoteView` API). + * - Manual subscription: after room entry, the user needs to manually call the {@startRemoteView} API to start subscribing to and decoding the video stream and call the `{@muteRemoteAudio} (false)` API to start playing back the audio stream. + * In most scenarios, users will subscribe to the audio/video streams of all anchors in the room after room entry. Therefore, TRTC adopts the automatic subscription mode by default in order to achieve the best "instant streaming experience". + * In your application scenario, if there are many audio/video streams being published at the same time in each room, and each user only wants to subscribe to 1–2 streams of them, we recommend you use the "manual subscription" mode to reduce the + * traffic costs. + * @param autoRecvAudio true: automatic subscription to audio; false: manual subscription to audio by calling `muteRemoteAudio(false)`. Default value: true + * @param autoRecvVideo true: automatic subscription to video; false: manual subscription to video by calling `startRemoteView`. Default value: true + * @note + * 1. The configuration takes effect only if this API is called before room entry (enterRoom). + * 2. In the automatic subscription mode, if the user does not call {@startRemoteView} to subscribe to the video stream after room entry, the SDK will automatically stop subscribing to the video stream in order to reduce the traffic consumption. + */ + virtual void setDefaultStreamRecvMode(bool autoRecvAudio, bool autoRecvVideo) = 0; + +/** + * 2.8 Create room subinstance (for concurrent multi-room listen/watch) + * + * `TRTCCloud` was originally designed to work in the singleton mode, which limited the ability to watch concurrently in multiple rooms. + * By calling this API, you can create multiple `TRTCCloud` instances, so that you can enter multiple different rooms at the same time to listen/watch audio/video streams. + * However, it should be noted that because there are still only one camera and one mic available, you can exist as an "anchor" in only one `TRTCCloud` instance at any time; that is, you can only publish your audio/video streams in one `TRTCCloud` + * instance at any time. This feature is mainly used in the "super small class" use case in the online education scenario to break the limit that "only up to 50 users can publish their audio/video streams simultaneously in one TRTC room". Below is + * the sample code:
 ITRTCCloud *mainCloud = getTRTCShareInstance(); mainCloud->enterRoom(params1, TRTCAppSceneLIVE);
+ *     //...
+ *     //Switch the role from "anchor" to "audience" in your own room
+ *     mainCloud->switchRole(TRTCRoleAudience);
+ *     mainCloud->muteLocalVideo(true);
+ *     mainCloud->muteLocalAudio(true);
+ *     //...
+ *     //Use subcloud to enter another room and switch the role from "audience" to "anchor"
+ *     ITRTCCloud *subCloud = mainCloud->createSubCloud();
+ *     subCloud->enterRoom(params2, TRTCAppSceneLIVE);
+ *     subCloud->switchRole(TRTCRoleAnchor);
+ *     subCloud->muteLocalVideo(false);
+ *     subCloud->muteLocalAudio(false);
+ *     //...
+ *     //Exit from new room and release it.
+ *     subCloud->exitRoom();
+ *     mainCloud->destroySubCloud(subCloud);
+ * 
+ * + * @note + * - The same user can enter multiple rooms with different `roomId` values by using the same `userId`. + * - Two devices cannot use the same `userId` to enter the same room with a specified `roomId`. + * - The same user can push a stream in only one `TRTCCloud` instance at any time. If streams are pushed simultaneously in different rooms, a status mess will be caused in the cloud, leading to various bugs. + * - The `TRTCCloud` instance created by the `createSubCloud` API cannot call APIs related to the local audio/video in the subinstance, except `switchRole`, `muteLocalVideo`, and `muteLocalAudio`. To use APIs such as the beauty filter, please use the + * original `TRTCCloud` instance object. + * @return `TRTCCloud` subinstance + */ +#if _WIN32 || __APPLE__ + virtual ITRTCCloud* createSubCloud() = 0; +#endif + +/** + * 2.9 Terminate room subinstance + * + * @param subCloud + */ +#if _WIN32 || __APPLE__ + virtual void destroySubCloud(ITRTCCloud* subCloud) = 0; +#endif + + ///////////////////////////////////////////////////////////////////////////////// + // + // CDN APIs + // + ///////////////////////////////////////////////////////////////////////////////// + + /** + * 3.1 Start publishing audio/video streams to Tencent Cloud CSS CDN + * + * This API sends a command to the TRTC server, requesting it to relay the current user's audio/video streams to CSS CDN. + * You can set the `StreamId` of the live stream through the `streamId` parameter, so as to specify the playback address of the user's audio/video streams on CSS CDN. + * For example, if you specify the current user's live stream ID as `user_stream_001` through this API, then the corresponding CDN playback address is: + * "http://yourdomain/live/user_stream_001.flv", where `yourdomain` is your playback domain name with an ICP filing. + * You can configure your playback domain name in the [CSS console](https://console.cloud.tencent.com/live). Tencent Cloud does not provide a default playback domain name. + * You can also specify the `streamId` when setting the `TRTCParams` parameter of `enterRoom`, which is the recommended approach. + * @param streamId Custom stream ID. + * @param streamType Only `TRTCVideoStreamTypeBig` and `TRTCVideoStreamTypeSub` are supported. + * @note You need to enable the "Enable Relayed Push" option on the "Function Configuration" page in the [TRTC console](https://console.cloud.tencent.com/trtc/) in advance. + * - If you select "Specified stream for relayed push", you can use this API to push the corresponding audio/video stream to Tencent Cloud CDN and specify the entered stream ID. + * - If you select "Global auto-relayed push", you can use this API to adjust the default stream ID. + */ + virtual void startPublishing(const char* streamId, TRTCVideoStreamType streamType) = 0; + + /** + * 3.2 Stop publishing audio/video streams to Tencent Cloud CSS CDN + */ + virtual void stopPublishing() = 0; + + /** + * 3.3 Start publishing audio/video streams to non-Tencent Cloud CDN + * + * This API is similar to the `startPublishing` API. The difference is that `startPublishing` can only publish audio/video streams to Tencent Cloud CDN, while this API can relay streams to live streaming CDN services of other cloud providers. + * @param param CDN relaying parameter. For more information, please see {@link TRTCPublishCDNParam} + * @note + * - Using the `startPublishing` API to publish audio/video streams to Tencent Cloud CSS CDN does not incur additional fees. + * - Using the `startPublishCDNStream` API to publish audio/video streams to non-Tencent Cloud CDN incurs additional relaying bandwidth fees. + */ + virtual void startPublishCDNStream(const TRTCPublishCDNParam& param) = 0; + + /** + * 3.4 Stop publishing audio/video streams to non-Tencent Cloud CDN + */ + virtual void stopPublishCDNStream() = 0; + + /** + * 3.5 Set the layout and transcoding parameters of On-Cloud MixTranscoding + * + * In a live room, there may be multiple anchors publishing their audio/video streams at the same time, but for audience on CSS CDN, they only need to watch one video stream in HTTP-FLV or HLS format. + * When you call this API, the SDK will send a command to the TRTC mixtranscoding server to combine multiple audio/video streams in the room into one stream. + * You can use the {@link TRTCTranscodingConfig} parameter to set the layout of each channel of image. You can also set the encoding parameters of the mixed audio/video streams. + * For more information, please see [On-Cloud MixTranscoding](https://cloud.tencent.com/document/product/647/16827). + *
+     *     **Image 1** => decoding ====> \\
+     *                                    \\
+     *     **Image 2** => decoding => image mixing => encoding => **mixed image**
+     *                                    //
+     *     **Image 3** => decoding ====> //
+     *
+     *     **Audio 1** => decoding ====> \\
+     *                                    \\
+     *     **Audio 2** => decoding => audio mixing => encoding => **mixed audio**
+     *                                    //
+     *     **Audio 3** => decoding ====> //
+     * 
+ * @param config If `config` is not empty, On-Cloud MixTranscoding will be started; otherwise, it will be stopped. For more information, please see {@link TRTCTranscodingConfig}. + * @note Notes on On-Cloud MixTranscoding: + * - Mixed-stream transcoding is a chargeable function, calling the interface will incur cloud-based mixed-stream transcoding fees, see https://intl.cloud.tencent.com/document/product/647/38929. + * - If the user calling this API does not set `streamId` in the `config` parameter, TRTC will mix the multiple channels of images in the room into the audio/video streams corresponding to the current user, i.e., A + B => A. + * - If the user calling this API sets `streamId` in the `config` parameter, TRTC will mix the multiple channels of images in the room into the specified `streamId`, i.e., A + B => streamId. + * - Please note that if you are still in the room but do not need mixtranscoding anymore, be sure to call this API again and leave `config` empty to cancel it; otherwise, additional fees may be incurred. + * - Please rest assured that TRTC will automatically cancel the mixtranscoding status upon room exit. + */ + virtual void setMixTranscodingConfig(TRTCTranscodingConfig* config) = 0; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Video APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Video APIs +/// @{ + +/** + * 4.1 Enable the preview image of local camera (mobile) + * + * If this API is called before `enterRoom`, the SDK will only enable the camera and wait until `enterRoom` is called before starting push. + * If it is called after `enterRoom`, the SDK will enable the camera and automatically start pushing the video stream. + * When the first camera video frame starts to be rendered, you will receive the `onCameraDidReady` callback in {@link TRTCCloudDelegate}. + * @param frontCamera true: front camera; false: rear camera + * @param view Control that carries the video image + * @note If you want to preview the camera image and adjust the beauty filter parameters through `BeautyManager` before going live, you can: + * - Scheme 1. Call `startLocalPreview` before calling `enterRoom` + * - Scheme 2. Call `startLocalPreview` and `muteLocalVideo(true)` after calling `enterRoom` + */ +#if TARGET_PLATFORM_PHONE + virtual void startLocalPreview(bool frontCamera, TXView view) = 0; +#endif + +/** + * 4.2 Enable the preview image of local camera (desktop) + * + * Before this API is called, `setCurrentCameraDevice` can be called first to select whether to use the macOS device's built-in camera or an external camera. + * If this API is called before `enterRoom`, the SDK will only enable the camera and wait until `enterRoom` is called before starting push. + * If it is called after `enterRoom`, the SDK will enable the camera and automatically start pushing the video stream. + * When the first camera video frame starts to be rendered, you will receive the `onCameraDidReady` callback in {@link TRTCCloudDelegate}. + * @param view Control that carries the video image + * @note If you want to preview the camera image and adjust the beauty filter parameters through `BeautyManager` before going live, you can: + * - Scheme 1. Call `startLocalPreview` before calling `enterRoom` + * - Scheme 2. Call `startLocalPreview` and `muteLocalVideo(true)` after calling `enterRoom` + */ +#if TARGET_PLATFORM_DESKTOP + virtual void startLocalPreview(TXView view) = 0; +#endif + + /** + * 4.3 Update the preview image of local camera + */ + virtual void updateLocalView(TXView view) = 0; + + /** + * 4.4 Stop camera preview + */ + virtual void stopLocalPreview() = 0; + + /** + * 4.5 Pause/Resume publishing local video stream + * + * This API can pause (or resume) publishing the local video image. After the pause, other users in the same room will not be able to see the local image. + * This API is equivalent to the two APIs of `startLocalPreview/stopLocalPreview` when TRTCVideoStreamTypeBig is specified, but has higher performance and response speed. + * The `startLocalPreview/stopLocalPreview` APIs need to enable/disable the camera, which are hardware device-related operations, so they are very time-consuming. + * In contrast, `muteLocalVideo` only needs to pause or allow the data stream at the software level, so it is more efficient and more suitable for scenarios where frequent enabling/disabling are needed. + * After local video publishing is paused, other members in the same room will receive the `onUserVideoAvailable(userId, false)` callback notification. + * After local video publishing is resumed, other members in the same room will receive the `onUserVideoAvailable(userId, true)` callback notification. + * @param streamType Specify for which video stream to pause (or resume). Only {@link TRTCVideoStreamTypeBig} and {@link TRTCVideoStreamTypeSub} are supported + * @param mute true: pause; false: resume + */ + virtual void muteLocalVideo(TRTCVideoStreamType streamType, bool mute) = 0; + + /** + * 4.7 Subscribe to remote user's video stream and bind video rendering control + * + * Calling this API allows the SDK to pull the video stream of the specified `userId` and render it to the rendering control specified by the `view` parameter. You can set the display mode of the video image through {@link setRemoteRenderParams}. + * - If you already know the `userId` of a user who has a video stream in the room, you can directly call `startRemoteView` to subscribe to the user's video image. + * - If you don't know which users in the room are publishing video streams, you can wait for the notification from {@link onUserVideoAvailable} after `enterRoom`. + * + * Calling this API only starts pulling the video stream, and the image needs to be loaded and buffered at this time. After the buffering is completed, you will receive a notification from {@link onFirstVideoFrame}. + * @param userId ID of the specified remote user + * @param streamType Video stream type of the `userId` specified for watching: + * - HD big image: {@link TRTCVideoStreamTypeBig} + * - Smooth small image: {@link TRTCVideoStreamTypeSmall} (the remote user should enable dual-channel encoding through {@link enableEncSmallVideoStream} for this parameter to take effect) + * - Substream image (usually used for screen sharing): {@link TRTCVideoStreamTypeSub} + * + * @param view Rendering control that carries the video image + * @note The following requires your attention: + * 1. The SDK supports watching the big image and substream image or small image and substream image of a `userId` at the same time, but does not support watching the big image and small image at the same time. + * 2. Only when the specified `userId` enables dual-channel encoding through {@link enableEncSmallVideoStream} can the user's small image be viewed. + * 3. If the small image of the specified `userId` does not exist, the SDK will switch to the big image of the user by default. + */ + virtual void startRemoteView(const char* userId, TRTCVideoStreamType streamType, TXView view) = 0; + + /** + * 4.8 Update remote user's video rendering control + * + * This API can be used to update the rendering control of the remote video image. It is often used in interactive scenarios where the display area needs to be switched. + * @param view Control that carries the video image + * @param streamType Type of the stream for which to set the preview window (only {@link TRTCVideoStreamTypeBig} and {@link TRTCVideoStreamTypeSub} are supported) + * @param userId ID of the specified remote user + */ + virtual void updateRemoteView(const char* userId, TRTCVideoStreamType streamType, TXView view) = 0; + + /** + * 4.9 Stop subscribing to remote user's video stream and release rendering control + * + * Calling this API will cause the SDK to stop receiving the user's video stream and release the decoding and rendering resources for the stream. + * @param userId ID of the specified remote user + * @param streamType Video stream type of the `userId` specified for watching: + * - HD big image: {@link TRTCVideoStreamTypeBig} + * - Smooth small image: {@link TRTCVideoStreamTypeSmall} + * - Substream image (usually used for screen sharing): {@link TRTCVideoStreamTypeSub} + */ + virtual void stopRemoteView(const char* userId, TRTCVideoStreamType streamType) = 0; + + /** + * 4.10 Stop subscribing to all remote users' video streams and release all rendering resources + * + * Calling this API will cause the SDK to stop receiving all remote video streams and release all decoding and rendering resources. + * @note If a substream image (screen sharing) is being displayed, it will also be stopped. + */ + virtual void stopAllRemoteView() = 0; + + /** + * 4.11 Pause/Resume subscribing to remote user's video stream + * + * This API only pauses/resumes receiving the specified user's video stream but does not release displaying resources; therefore, the video image will freeze at the last frame before it is called. + * @param userId ID of the specified remote user + * @param streamType Specify for which video stream to pause (or resume). Only {@link TRTCVideoStreamTypeBig} and {@link TRTCVideoStreamTypeSub} are supported + * @param mute Whether to pause receiving + * @note This API can be called before room entry (enterRoom), and the pause status will be reset after room exit (exitRoom). + */ + virtual void muteRemoteVideoStream(const char* userId, TRTCVideoStreamType streamType, bool mute) = 0; + + /** + * 4.12 Pause/Resume subscribing to all remote users' video streams + * + * This API only pauses/resumes receiving all users' video streams but does not release displaying resources; therefore, the video image will freeze at the last frame before it is called. + * @param mute Whether to pause receiving + * @note This API can be called before room entry (enterRoom), and the pause status will be reset after room exit (exitRoom). + */ + virtual void muteAllRemoteVideoStreams(bool mute) = 0; + + /** + * 4.13 Set the encoding parameters of video encoder + * + * This setting can determine the quality of image viewed by remote users, which is also the image quality of on-cloud recording files. + * @param param It is used to set relevant parameters for the video encoder. For more information, please see {@link TRTCVideoEncParam}. + */ + virtual void setVideoEncoderParam(const TRTCVideoEncParam& param) = 0; + + /** + * 4.14 Set network quality control parameters + * + * This setting determines the quality control policy in a poor network environment, such as "image quality preferred" or "smoothness preferred". + * @param param It is used to set relevant parameters for network quality control. For details, please refer to {@link TRTCNetworkQosParam}. + */ + virtual void setNetworkQosParam(const TRTCNetworkQosParam& param) = 0; + + /** + * 4.15 Set the rendering parameters of local video image + * + * The parameters that can be set include video image rotation angle, fill mode, and mirror mode. + * @param params Video image rendering parameters. For more information, please see {@link TRTCRenderParams}. + */ + virtual void setLocalRenderParams(const TRTCRenderParams& params) = 0; + + /** + * 4.16 Set the rendering mode of remote video image + * + * The parameters that can be set include video image rotation angle, fill mode, and mirror mode. + * @param userId ID of the specified remote user + * @param streamType It can be set to the primary stream image (TRTCVideoStreamTypeBig) or substream image (TRTCVideoStreamTypeSub). + * @param params Video image rendering parameters. For more information, please see {@link TRTCRenderParams}. + */ + virtual void setRemoteRenderParams(const char* userId, TRTCVideoStreamType streamType, const TRTCRenderParams& params) = 0; + + /** + * 4.17 Set the direction of image output by video encoder + * + * This setting does not affect the preview direction of the local video image, but affects the direction of the image viewed by other users in the room (and on-cloud recording files). + * When a phone or tablet is rotated upside down, as the capturing direction of the camera does not change, the video image viewed by other users in the room will become upside-down. + * In this case, you can call this API to rotate the image encoded by the SDK 180 degrees, so that other users in the room can view the image in the normal direction. + * If you want to achieve the aforementioned user-friendly interactive experience, we recommend you directly call {@link setGSensorMode} to implement smarter direction adaptation, with no need to call this API manually. + * @param rotation Currently, rotation angles of 0 and 180 degrees are supported. Default value: TRTCVideoRotation_0 (no rotation) + */ + virtual void setVideoEncoderRotation(TRTCVideoRotation rotation) = 0; + + /** + * 4.18 Set the mirror mode of image output by encoder + * + * This setting does not affect the mirror mode of the local video image, but affects the mirror mode of the image viewed by other users in the room (and on-cloud recording files). + * @param mirror Whether to enable remote mirror mode. true: yes; false: no. Default value: false + */ + virtual void setVideoEncoderMirror(bool mirror) = 0; + + /** + * 4.20 Enable dual-channel encoding mode with big and small images + * + * In this mode, the current user's encoder will output two channels of video streams, i.e., **HD big image** and **Smooth small image**, at the same time (only one channel of audio stream will be output though). + * In this way, other users in the room can choose to subscribe to the **HD big image** or **Smooth small image** according to their own network conditions or screen size. + * @note Dual-channel encoding will consume more CPU resources and network bandwidth; therefore, this feature can be enabled on macOS, Windows, or high-spec tablets, but is not recommended for phones. + * @param enable Whether to enable small image encoding. Default value: false + * @param smallVideoEncParam Video parameters of small image stream + * @return 0: success; -1: the current big image has been set to a lower quality, and it is not necessary to enable dual-channel encoding + */ + virtual void enableSmallVideoStream(bool enable, const TRTCVideoEncParam& smallVideoEncParam) = 0; + + /** + * 4.21 Switch the big/small image of specified remote user + * + * After an anchor in a room enables dual-channel encoding, the video image that other users in the room subscribe to through {@link startRemoteView} will be **HD big image** by default. + * You can use this API to select whether the image subscribed to is the big image or small image. The API can take effect before or after {@link startRemoteView} is called. + * @note To implement this feature, the target user must have enabled the dual-channel encoding mode through {@link enableEncSmallVideoStream}; otherwise, this API will not work. + * @param userId ID of the specified remote user + * @param streamType Video stream type, i.e., big image or small image. Default value: big image + */ + virtual void setRemoteVideoStreamType(const char* userId, TRTCVideoStreamType streamType) = 0; + +/** + * 4.22 Screencapture video + * + * You can use this API to screencapture the local video image or the primary stream image and substream (screen sharing) image of a remote user. + * @param userId User ID. A null value indicates to screencapture the local video. + * @param streamType Video stream type, which can be the primary stream image ({@link TRTCVideoStreamTypeBig}, generally for camera) or substream image ({@link TRTCVideoStreamTypeSub}, generally for screen sharing) + * @param sourceType Video image source, which can be the video stream image ({@link TRTCSnapshotSourceTypeStream}, generally in higher definition) or the video rendering image ({@link TRTCSnapshotSourceTypeView}) + * @note On Windows, only video image from the {@link TRTCSnapshotSourceTypeStream} source can be screencaptured currently. + */ +#if _WIN32 || __APPLE__ + virtual void snapshotVideo(const char* userId, TRTCVideoStreamType streamType, TRTCSnapshotSourceType sourceType) = 0; +#endif + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Audio APIs + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Audio APIs + /// @{ + + /** + * 5.1 Enable local audio capturing and publishing + * + * The SDK does not enable the mic by default. When a user wants to publish the local audio, the user needs to call this API to enable mic capturing and encode and publish the audio to the current room. + * After local audio capturing and publishing is enabled, other users in the room will receive the {@link onUserAudioAvailable}(userId, true) notification. + * @param quality Sound quality + * - {@link TRTCAudioQualitySpeech} - Smooth: sample rate: 16 kHz; mono channel; audio bitrate: 16 Kbps. This is suitable for audio call scenarios, such as online meeting and audio call. + * - {@link TRTCAudioQualityDefault} - Default: sample rate: 48 kHz; mono channel; audio bitrate: 50 Kbps. This is the default sound quality of the SDK and recommended if there are no special requirements. + * - {@link TRTCAudioQualityMusic} - HD: sample rate: 48 kHz; dual channel + full band; audio bitrate: 128 Kbps. This is suitable for scenarios where Hi-Fi music transfer is required, such as online karaoke and music live streaming. + * @note This API will check the mic permission. If the current application does not have permission to use the mic, the SDK will automatically ask the user to grant the mic permission. + */ + virtual void startLocalAudio(TRTCAudioQuality quality) = 0; + + /** + * 5.2 Stop local audio capturing and publishing + * + * After local audio capturing and publishing is stopped, other users in the room will receive the {@link onUserAudioAvailable}(userId, false) notification. + */ + virtual void stopLocalAudio() = 0; + + /** + * 5.3 Pause/Resume publishing local audio stream + * + * After local audio publishing is paused, other users in the room will receive the {@link onUserAudioAvailable}(userId, false) notification. + * After local audio publishing is resumed, other users in the room will receive the {@link onUserAudioAvailable}(userId, true) notification. + * Different from {@link stopLocalAudio}, `muteLocalAudio(true)` does not release the mic permission; instead, it continues to send mute packets with extremely low bitrate. + * This is very suitable for scenarios that require on-cloud recording, as video file formats such as MP4 have a high requirement for audio continuity, while an MP4 recording file cannot be played back smoothly if {@link stopLocalAudio} is used. + * Therefore, `muteLocalAudio` instead of `stopLocalAudio` is recommended in scenarios where the requirement for recording file quality is high. + * @param mute true: mute; false: unmute + */ + virtual void muteLocalAudio(bool mute) = 0; + + /** + * 5.4 Pause/Resume playing back remote audio stream + * + * When you mute the remote audio of a specified user, the SDK will stop playing back the user's audio and pulling the user's audio data. + * @param userId ID of the specified remote user + * @param mute true: mute; false: unmute + * @note This API works when called either before or after room entry (enterRoom), and the mute status will be reset to `false` after room exit (exitRoom). + */ + virtual void muteRemoteAudio(const char* userId, bool mute) = 0; + + /** + * 5.5 Pause/Resume playing back all remote users' audio streams + * + * When you mute the audio of all remote users, the SDK will stop playing back all their audio streams and pulling all their audio data. + * @param mute true: mute; false: unmute + * @note This API works when called either before or after room entry (enterRoom), and the mute status will be reset to `false` after room exit (exitRoom). + */ + virtual void muteAllRemoteAudio(bool mute) = 0; + + /** + * 5.7 Set the audio playback volume of remote user + * + * You can mute the audio of a remote user through `setRemoteAudioVolume(userId, 0)`. + * @param userId ID of the specified remote user + * @param volume Volume. 100 is the original volume. Value range: [0,150]. Default value: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ + virtual void setRemoteAudioVolume(const char* userId, int volume) = 0; + + /** + * 5.8 Set the capturing volume of local audio + * + * @param volume Volume. 100 is the original volume. Value range: [0,150]. Default value: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ + virtual void setAudioCaptureVolume(int volume) = 0; + + /** + * 5.9 Get the capturing volume of local audio + */ + virtual int getAudioCaptureVolume() = 0; + + /** + * 5.10 Set the playback volume of remote audio + * + * This API controls the volume of the sound ultimately delivered by the SDK to the system for playback. It affects the volume of the recorded local audio file but not the volume of in-ear monitoring. + * @param volume Volume. 100 is the original volume. Value range: [0,150]. Default value: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ + virtual void setAudioPlayoutVolume(int volume) = 0; + + /** + * 5.11 Get the playback volume of remote audio + */ + virtual int getAudioPlayoutVolume() = 0; + + /** + * 5.12 Enable volume reminder + * + * After this feature is enabled, the SDK will return the remote audio volume in the {@link onUserVoiceVolume} callback of {@link TRTCCloudDelegate}. + * @note To enable this feature, call this API before calling `startLocalAudio`. + * @param interval Set the interval in ms for triggering the `onUserVoiceVolume` callback. The minimum interval is 100 ms. If the value is smaller than or equal to 0, the callback will be disabled. We recommend you set this parameter to 300 ms. + */ + virtual void enableAudioVolumeEvaluation(uint32_t interval) = 0; + + /** + * 5.13 Start audio recording + * + * After you call this API, the SDK will selectively record local and remote audio streams (such as local audio, remote audio, background music, and sound effects) into a local file. + * This API works when called either before or after room entry. If a recording task has not been stopped through `stopAudioRecording` before room exit, it will be automatically stopped after room exit. + * @param param Recording parameter. For more information, please see {@link TRTCAudioRecordingParams} + * @return 0: success; -1: audio recording has been started; -2: failed to create file or directory; -3: the audio format of the specified file extension is not supported + */ + virtual int startAudioRecording(const TRTCAudioRecordingParams& param) = 0; + + /** + * 5.14 Stop audio recording + * + * If a recording task has not been stopped through this API before room exit, it will be automatically stopped after room exit. + */ + virtual void stopAudioRecording() = 0; + +/** + * 5.15 Start local media recording + * + * This API records the audio/video content during live streaming into a local file. + * @param params Recording parameter. For more information, please see {@link TRTCLocalRecordingParams} + */ +#if _WIN32 + virtual void startLocalRecording(const TRTCLocalRecordingParams& params) = 0; +#endif + +/** + * 5.16 Stop local media recording + * + * If a recording task has not been stopped through this API before room exit, it will be automatically stopped after room exit. + */ +#if _WIN32 + virtual void stopLocalRecording() = 0; +#endif + + /** + * 5.18 Set the parallel strategy of remote audio streams + * + * For room with many speakers. + * @param params Audio parallel parameter. For more information, please see {@link TRTCAudioParallelParams} + */ + virtual void setRemoteAudioParallelParams(const TRTCAudioParallelParams& params) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Device management APIs + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Device management APIs + /// @{ + + /** + * 6.1 Get device management class (TXDeviceManager) + */ + virtual ITXDeviceManager* getDeviceManager() = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Beauty filter and watermark APIs + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Beauty filter and watermark APIs + /// @{ + + /** + * 7.1 Set special effects such as beauty, brightening, and rosy skin filters + * + * The SDK is integrated with two skin smoothing algorithms of different styles: + * - "Smooth" style, which uses a more radical algorithm for more obvious effect and is suitable for show live streaming. + * - "Natural" style, which retains more facial details for more natural effect and is suitable for most live streaming use cases. + * @param style Skin smoothening algorithm ("smooth" or "natural") + * @param beautyLevel Strength of the beauty filter. Value range: 0–9; 0 indicates that the filter is disabled, and the greater the value, the more obvious the effect. + * @param whitenessLevel Strength of the brightening filter. Value range: 0–9; 0 indicates that the filter is disabled, and the greater the value, the more obvious the effect. + * @param ruddinessLevel Strength of the rosy skin filter. Value range: 0–9; 0 indicates that the filter is disabled, and the greater the value, the more obvious the effect. + */ + virtual void setBeautyStyle(TRTCBeautyStyle style, uint32_t beautyLevel, uint32_t whitenessLevel, uint32_t ruddinessLevel) = 0; + + /** + * 7.2 Add watermark + * + * The watermark position is determined by the `xOffset`, `yOffset`, and `fWidthRatio` parameters. + * - `xOffset`: X coordinate of watermark, which is a floating-point number between 0 and 1. + * - `yOffset`: Y coordinate of watermark, which is a floating-point number between 0 and 1. + * - `fWidthRatio`: watermark dimensions ratio, which is a floating-point number between 0 and 1. + * + * @param streamType Stream type of the watermark to be set (`TRTCVideoStreamTypeBig` or `TRTCVideoStreamTypeSub`) + * @param srcData Source data of watermark image (if `nullptr` is passed in, the watermark will be removed) + * @param srcType Source data type of watermark image + * @param nWidth Pixel width of watermark image (this parameter will be ignored if the source data is a file path) + * @param nHeight Pixel height of watermark image (this parameter will be ignored if the source data is a file path) + * @param xOffset Top-left offset on the X axis of watermark + * @param yOffset Top-left offset on the Y axis of watermark + * @param fWidthRatio Ratio of watermark width to image width (the watermark will be scaled according to this parameter) + * @note This API only supports adding an image watermark to the primary stream + */ + virtual void setWaterMark(TRTCVideoStreamType streamType, const char* srcData, TRTCWaterMarkSrcType srcType, uint32_t nWidth, uint32_t nHeight, float xOffset, float yOffset, float fWidthRatio) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Background music and sound effect APIs + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Background music and sound effect APIs + /// @{ + + /** + * 8.1 Get sound effect management class (TXAudioEffectManager) + * + * `TXAudioEffectManager` is a sound effect management API, through which you can implement the following features: + * - Background music: both online music and local music can be played back with various features such as speed adjustment, pitch adjustment, original voice, accompaniment, and loop. + * - In-ear monitoring: the sound captured by the mic is played back in the headphones in real time, which is generally used for music live streaming. + * - Reverb effect: karaoke room, small room, big hall, deep, resonant, and other effects. + * - Voice changing effect: young girl, middle-aged man, heavy metal, and other effects. + * - Short sound effect: short sound effect files such as applause and laughter are supported (for files less than 10 seconds in length, please set the `isShortFile` parameter to `true`). + */ + virtual ITXAudioEffectManager* getAudioEffectManager() = 0; + +/** + * 8.2 Enable system audio capturing (for desktop systems only) + * + * This API captures audio data from the sound card of the anchor’s computer and mixes it into the current audio stream of the SDK. This ensures that other users in the room hear the audio played back by the anchor’s computer. + * In online education scenarios, a teacher can use this API to have the SDK capture the audio of instructional videos and broadcast it to students in the room. + * In live music scenarios, an anchor can use this API to have the SDK capture the music played back by his or her player so as to add background music to the room. + * @param deviceName If this parameter is empty, the audio of the entire system is captured. On Windows, if the parameter is a speaker name, you can capture this speaker. About speaker device name you can see {@link TXDeviceManager} + * On Windows, you can also set `deviceName` to the deviceName of an executable file (such as `QQMuisc.exe`) to have the SDK capture only the audio of the application. + * @note You can specify `deviceName` only on Windows and with 32-bit TRTC SDK. + */ +#if TARGET_PLATFORM_DESKTOP + virtual void startSystemAudioLoopback(const char* deviceName = nullptr) = 0; +#endif + +/** + * 8.3 Stop system audio capturing (for desktop systems only) + */ +#if TARGET_PLATFORM_DESKTOP + virtual void stopSystemAudioLoopback() = 0; +#endif + +/** + * 8.4 Set the volume of system audio capturing + * + * @param volume Set volume. Value range: [0, 150]. Default value: 100 + */ +#if TARGET_PLATFORM_DESKTOP || TARGET_OS_IPHONE + virtual void setSystemAudioLoopbackVolume(uint32_t volume) = 0; +#endif + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Screen sharing APIs +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Screen sharing APIs +/// @{ + +/** + * 9.1 Start desktop screen sharing + * + * This API can capture the screen content or a specified application(desktop only) and share it with other users in the same room. + * @param view Parent control of the rendering control, which can be set to a null value, indicating not to display the preview of the shared screen.(desktop only) + * @param streamType Channel used for screen sharing, which can be the primary stream ({@link TRTCVideoStreamTypeBig}) or substream ({@link TRTCVideoStreamTypeSub}). + * @param encParam Image encoding parameters used for screen sharing, which can be set to `nil`, indicating to let the SDK choose the optimal encoding parameters (such as resolution and bitrate). + * + * @note + * 1. A user can publish at most one primary stream ({@link TRTCVideoStreamTypeBig}) and one substream ({@link TRTCVideoStreamTypeSub}) at the same time. + * 2. By default, screen sharing uses the substream image. If you want to use the primary stream for screen sharing, you need to stop camera capturing (through {@link stopLocalPreview}) in advance to avoid conflicts. + * 3. Only one user can use the substream for screen sharing in the same room at any time; that is, only one user is allowed to enable the substream in the same room at any time. + * 4. When there is already a user in the room using the substream for screen sharing, calling this API will return the `onError(ERR_SERVER_CENTER_ANOTHER_USER_PUSH_SUB_VIDEO)` callback from {@link TRTCCloudDelegate}. + */ + virtual void startScreenCapture(TXView view, TRTCVideoStreamType streamType, TRTCVideoEncParam* encParam) = 0; + +/** + * 9.2 Stop screen sharing + */ + virtual void stopScreenCapture() = 0; + +/** + * 9.3 Pause screen sharing + */ + virtual void pauseScreenCapture() = 0; + +/** + * 9.4 Resume screen sharing + */ + virtual void resumeScreenCapture() = 0; + +/** + * 9.5 Enumerate shareable screens and windows (for desktop systems only) + * + * When you integrate the screen sharing feature of a desktop system, you generally need to display a UI for selecting the sharing target, so that users can use the UI to choose whether to share the entire screen or a certain window. + * Through this API, you can query the IDs, names, and thumbnails of sharable windows on the current system. We provide a default UI implementation in the demo for your reference. + * @note + * 1. The returned list contains the screen and the application windows. The screen is the first element in the list. If the user has multiple displays, then each display is a sharing target. + * 2. Please do not use `delete ITRTCScreenCaptureSourceList*` to delete the `SourceList`; otherwise, crashes may occur. Instead, please use the `release` method in `ITRTCScreenCaptureSourceList` to release the list. + * @param thumbnailSize Specify the thumbnail size of the window to be obtained. The thumbnail can be drawn on the window selection UI. + * @param iconSize Specify the icon size of the window to be obtained. + * @return List of windows (including the screen) + */ +#if TARGET_PLATFORM_DESKTOP + virtual ITRTCScreenCaptureSourceList* getScreenCaptureSources(const SIZE& thumbnailSize, const SIZE& iconSize) = 0; +#endif + +/** + * 9.6 Select the screen or window to share (for desktop systems only) + * + * After you get the sharable screens and windows through `getScreenCaptureSources`, you can call this API to select the target screen or window you want to share. + * During the screen sharing process, you can also call this API at any time to switch the sharing target. + * The following four sharing modes are supported: + * - Sharing the entire screen: for `source` whose `type` is `Screen` in `sourceInfoList`, set `captureRect` to `{ 0, 0, 0, 0 }`. + * - Sharing a specified area: for `source` whose `type` is `Screen` in `sourceInfoList`, set `captureRect` to a non-nullptr value, e.g., `{ 100, 100, 300, 300 }`. + * - Sharing an entire window: for `source` whose `type` is `Window` in `sourceInfoList`, set `captureRect` to `{ 0, 0, 0, 0 }`. + * - Sharing a specified window area: for `source` whose `type` is `Window` in `sourceInfoList`, set `captureRect` to a non-nullptr value, e.g., `{ 100, 100, 300, 300 }`. + * @param source Specify sharing source + * @param captureRect Specify the area to be captured + * @param property Specify the attributes of the screen sharing target, such as capturing the cursor and highlighting the captured window. For more information, please see the definition of `TRTCScreenCaptureProperty` + * @note Setting the highlight border color and width parameters does not take effect on macOS. + */ +#if TARGET_PLATFORM_DESKTOP + virtual void selectScreenCaptureTarget(const TRTCScreenCaptureSourceInfo& source, const RECT& captureRect, const TRTCScreenCaptureProperty& property) = 0; +#endif + +/** + * 9.7 Set the video encoding parameters of screen sharing (i.e., substream) (for desktop and mobile systems) + * + * This API can set the image quality of screen sharing (i.e., the substream) viewed by remote users, which is also the image quality of screen sharing in on-cloud recording files. + * Please note the differences between the following two APIs: + * - {@link setVideoEncoderParam} is used to set the video encoding parameters of the primary stream image ({@link TRTCVideoStreamTypeBig}, generally for camera). + * - {@link setSubStreamEncoderParam} is used to set the video encoding parameters of the substream image ({@link TRTCVideoStreamTypeSub}, generally for screen sharing). + * + * @param param Substream encoding parameters. For more information, please see {@link TRTCVideoEncParam}. + * @note Even if you use the primary stream to transfer screen sharing data (set `type=TRTCVideoStreamTypeBig` when calling `startScreenCapture`), you still need to call the {@link setSubStreamEncoderParam} API instead of the {@link + * setVideoEncoderParam} API to set the screen sharing encoding parameters. + */ + virtual void setSubStreamEncoderParam(const TRTCVideoEncParam& param) = 0; + +/** + * 9.8 Set the audio mixing volume of screen sharing (for desktop systems only) + * + * The greater the value, the larger the ratio of the screen sharing volume to the mic volume. We recommend you not set a high value for this parameter as a high volume will cover the mic sound. + * @param volume Set audio mixing volume. Value range: 0–100 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void setSubStreamMixVolume(uint32_t volume) = 0; +#endif + +/** + * 9.9 Add specified windows to the exclusion list of screen sharing (for desktop systems only) + * + * The excluded windows will not be shared. This feature is generally used to add a certain application's window to the exclusion list to avoid privacy issues. + * You can set the filtered windows before starting screen sharing or dynamically add the filtered windows during screen sharing. + * @param window Window not to be shared + * @note + * 1. This API takes effect only if the `type` in {@link TRTCScreenCaptureSourceInfo} is specified as {@link TRTCScreenCaptureSourceTypeScreen}; that is, the feature of excluding specified windows works only when the entire screen is shared. + * 2. The windows added to the exclusion list through this API will be automatically cleared by the SDK after room exit. + * 3. On macOS, please pass in the window ID (CGWindowID), which can be obtained through the `sourceId` member in {@link TRTCScreenCaptureSourceInfo}. + */ +#if TARGET_PLATFORM_DESKTOP + virtual void addExcludedShareWindow(TXView windowID) = 0; +#endif + +/** + * 9.10 Remove specified windows from the exclusion list of screen sharing (for desktop systems only) + * + * @param windowID + */ +#if TARGET_PLATFORM_DESKTOP + virtual void removeExcludedShareWindow(TXView windowID) = 0; +#endif + +/** + * 9.11 Remove all windows from the exclusion list of screen sharing (for desktop systems only) + */ +#if TARGET_PLATFORM_DESKTOP + virtual void removeAllExcludedShareWindow() = 0; +#endif + +/** + * 9.12 Add specified windows to the inclusion list of screen sharing (for desktop systems only) + * + * This API takes effect only if the `type` in {@link TRTCScreenCaptureSourceInfo} is specified as {@link TRTCScreenCaptureSourceTypeWindow}; that is, the feature of additionally including specified windows works only when a window is shared. + * You can call it before or after {@link startScreenCapture}. + * @param windowID Window to be shared (which is a window handle `HWND` on Windows) + * @note The windows added to the inclusion list by this method will be automatically cleared by the SDK after room exit. + */ +#if TARGET_PLATFORM_DESKTOP + virtual void addIncludedShareWindow(TXView windowID) = 0; +#endif + +/** + * 9.13 Remove specified windows from the inclusion list of screen sharing (for desktop systems only) + * + * This API takes effect only if the `type` in {@link TRTCScreenCaptureSourceInfo} is specified as {@link TRTCScreenCaptureSourceTypeWindow}. + * That is, the feature of additionally including specified windows works only when a window is shared. + * @param windowID Window to be shared (window ID on macOS or HWND on Windows) + */ +#if TARGET_PLATFORM_DESKTOP + virtual void removeIncludedShareWindow(TXView windowID) = 0; +#endif + +/** + * 9.14 Remove all windows from the inclusion list of screen sharing (for desktop systems only) + * + * This API takes effect only if the `type` in {@link TRTCScreenCaptureSourceInfo} is specified as {@link TRTCScreenCaptureSourceTypeWindow}. + * That is, the feature of additionally including specified windows works only when a window is shared. + */ +#if TARGET_PLATFORM_DESKTOP + virtual void removeAllIncludedShareWindow() = 0; +#endif + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Custom capturing and rendering APIs + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Custom capturing and rendering APIs + /// @{ + + /** + * 10.1 Enable/Disable custom video capturing mode + * + * After this mode is enabled, the SDK will not run the original video capturing process (i.e., stopping camera data capturing and beauty filter operations) and will retain only the video encoding and sending capabilities. + * You need to use {@link sendCustomVideoData} to continuously insert the captured video image into the SDK. + * @param streamType Specify video stream type ({@link TRTCVideoStreamTypeBig}: HD big image; {@link TRTCVideoStreamTypeSub}: substream image). + * @param enable Whether to enable. Default value: false + */ + virtual void enableCustomVideoCapture(TRTCVideoStreamType streamType, bool enable) = 0; + + /** + * 10.2 Deliver captured video frames to SDK + * + * You can use this API to deliver video frames you capture to the SDK, and the SDK will encode and transfer them through its own network module. + * We recommend you enter the following information for the {@link TRTCVideoFrame} parameter (other fields can be left empty): + * - pixelFormat: on Windows and Android, only {@link TRTCVideoPixelFormat_I420} is supported; on iOS and macOS, {@link TRTCVideoPixelFormat_I420} and {@link TRTCVideoPixelFormat_BGRA32} are supported. + * - bufferType: {@link TRTCVideoBufferType_Buffer} is recommended. + * - data: buffer used to carry video frame data. + * - length: video frame data length. If `pixelFormat` is set to I420, `length` can be calculated according to the following formula: length = width * height * 3 / 2. + * - width: video image width, such as 640 px. + * - height: video image height, such as 480 px. + * - timestamp (ms): Set it to the timestamp when video frames are captured, which you can obtain by calling {@link generateCustomPTS} after getting a video frame. + * + * For more information, please see [Custom Capturing and Rendering](https://cloud.tencent.com/document/product/647/34066). + * @param streamType Specify video stream type ({@link TRTCVideoStreamTypeBig}: HD big image; {@link TRTCVideoStreamTypeSub}: substream image). + * @param frame Video data, which can be in I420 format. + * @note + * 1. We recommend you call the {@link generateCustomPTS} API to get the `timestamp` value of a video frame immediately after capturing it, so as to achieve the best audio/video sync effect. + * 2. The video frame rate eventually encoded by the SDK is not determined by the frequency at which you call this API, but by the FPS you set in {@link setVideoEncoderParam}. + * 3. Please try to keep the calling interval of this API even; otherwise, problems will occur, such as unstable output frame rate of the encoder or out-of-sync audio/video. + * 4. On iOS and macOS, video frames in {@link TRTCVideoPixelFormat_I420} or {@link TRTCVideoPixelFormat_BGRA32} format can be passed in currently. + * 5. On Windows and Android, only video frames in {@link TRTCVideoPixelFormat_I420} format can be passed in currently. + */ + virtual void sendCustomVideoData(TRTCVideoStreamType streamType, TRTCVideoFrame* frame) = 0; + + /** + * 10.3 Enable custom audio capturing mode + * + * After this mode is enabled, the SDK will not run the original audio capturing process (i.e., stopping mic data capturing) and will retain only the audio encoding and sending capabilities. + * You need to use {@link sendCustomAudioData} to continuously insert the captured audio data into the SDK. + * @param enable Whether to enable. Default value: false + * @note As acoustic echo cancellation (AEC) requires strict control over the audio capturing and playback time, after custom audio capturing is enabled, AEC may fail. + */ + virtual void enableCustomAudioCapture(bool enable) = 0; + + /** + * 10.4 Deliver captured audio data to SDK + * + * We recommend you enter the following information for the {@link TRTCAudioFrame} parameter (other fields can be left empty): + * - audioFormat: audio data format, which can only be `TRTCAudioFrameFormatPCM`. + * - data: audio frame buffer. Audio frame data must be in PCM format, and it supports a frame length of 5–100 ms (20 ms is recommended). Length calculation method: **for example, if the sample rate is 48000, then the frame length for mono + * channel will be `48000 * 0.02s * 1 * 16 bit = 15360 bit = 1920 bytes`.** + * - sampleRate: sample rate. Valid values: 16000, 24000, 32000, 44100, 48000. + * - channel: number of channels (if stereo is used, data is interwoven). Valid values: 1: mono channel; 2: dual channel. + * - timestamp (ms): Set it to the timestamp when audio frames are captured, which you can obtain by calling {@link generateCustomPTS} after getting a audio frame. + * + * For more information, please see [Custom Capturing and Rendering](https://cloud.tencent.com/document/product/647/34066). + * @param frame Audio data + * @note Please call this API accurately at intervals of the frame length; otherwise, sound lag may occur due to uneven data delivery intervals. + */ + virtual void sendCustomAudioData(TRTCAudioFrame* frame) = 0; + + /** + * 10.5 Enable/Disable custom audio track + * + * After this feature is enabled, you can mix a custom audio track into the SDK through this API. With two boolean parameters, you can control whether to play back this track remotely or locally. + * @param enablePublish Whether the mixed audio track should be played back remotely. Default value: false + * @param enablePlayout Whether the mixed audio track should be played back locally. Default value: false + * @note If you specify both `enablePublish` and `enablePlayout` as `false`, the custom audio track will be completely closed. + */ + virtual void enableMixExternalAudioFrame(bool enablePublish, bool enablePlayout) = 0; + + /** + * 10.6 Mix custom audio track into SDK + * + * Before you use this API to mix custom PCM audio into the SDK, you need to first enable custom audio tracks through {@link enableMixExternalAudioFrame}. + * You are expected to feed audio data into the SDK at an even pace, but we understand that it can be challenging to call an API at absolutely regular intervals. + * Given this, we have provided a buffer pool in the SDK, which can cache the audio data you pass in to reduce the fluctuations in intervals between API calls. + * The value returned by this API indicates the size (ms) of the buffer pool. For example, if `50` is returned, it indicates that the buffer pool has 50 ms of audio data. As long as you call this API again within 50 ms, the SDK can make sure that + * continuous audio data is mixed. If the value returned is `100` or greater, you can wait after an audio frame is played to call the API again. If the value returned is smaller than `100`, then there isn’t enough data in the buffer pool, and you + * should feed more audio data into the SDK until the data in the buffer pool is above the safety level. Fill the fields in {@link TRTCAudioFrame} as follows (other fields are not required). + * - `data`: audio frame buffer. Audio frames must be in PCM format. Each frame can be 5-100 ms (20 ms is recommended) in duration. Assume that the sample rate is 48000, and sound channels mono-channel. Then the **frame size would be 48000 x + * 0.02s x 1 x 16 bit = 15360 bit = 1920 bytes**. + * - `sampleRate`: sample rate. Valid values: 16000, 24000, 32000, 44100, 48000 + * - `channel`: number of sound channels (if dual-channel is used, data is interleaved). Valid values: `1` (mono-channel); `2` (dual channel) + * - `timestamp`: timestamp (ms). Set it to the timestamp when audio frames are captured, which you can obtain by calling {@link generateCustomPTS} after getting an audio frame. + * + * @param frame Audio data + * @return If the value returned is `0` or greater, the value represents the current size of the buffer pool; if the value returned is smaller than `0`, it means that an error occurred. `-1` indicates that you didn’t call {@link + * enableMixExternalAudioFrame} to enable custom audio tracks. + */ + virtual int mixExternalAudioFrame(TRTCAudioFrame* frame) = 0; + + /** + * 10.7 Set the publish volume and playback volume of mixed custom audio track + * + * @param publishVolume set the publish volume,from 0 to 100, -1 means no change + * @param playoutVolume set the play volume,from 0 to 100, -1 means no change + */ + virtual void setMixExternalAudioVolume(int publishVolume, int playoutVolume) = 0; + + /** + * 10.8 Generate custom capturing timestamp + * + * This API is only suitable for the custom capturing mode and is used to solve the problem of out-of-sync audio/video caused by the inconsistency between the capturing time and delivery time of audio/video frames. + * When you call APIs such as {@link sendCustomVideoData} or {@link sendCustomAudioData} for custom video or audio capturing, please use this API as instructed below: + * 1. First, when a video or audio frame is captured, call this API to get the corresponding PTS timestamp. + * 2. Then, send the video or audio frame to the preprocessing module you use (such as a third-party beauty filter or sound effect component). + * 3. When you actually call {@link sendCustomVideoData} or {@link sendCustomAudioData} for delivery, assign the PTS timestamp recorded when the frame was captured to the `timestamp` field in {@link TRTCVideoFrame} or {@link TRTCAudioFrame}. + * + * @return Timestamp in ms + */ + virtual uint64_t generateCustomPTS() = 0; + + /** + * 10.9 Set video data callback for third-party beauty filters + * + * After this callback is set, the SDK will call back the captured video frames through the `listener` you set and use them for further processing by a third-party beauty filter component. Then, the SDK will encode and send the processed video + * frames. + * @param listener: Custom preprocessing callback. For more information, please see {@link ITRTCVideoFrameCallback} + * @return 0: success; values smaller than 0: error + */ + virtual int setLocalVideoProcessCallback(TRTCVideoPixelFormat pixelFormat, TRTCVideoBufferType bufferType, ITRTCVideoFrameCallback* callback) = 0; + + /** + * 10.10 Set the callback of custom rendering for local video + * + * After this callback is set, the SDK will skip its own rendering process and call back the captured data. Therefore, you need to complete image rendering on your own. + * - You can call `setLocalVideoRenderCallback(TRTCVideoPixelFormat_Unknown, TRTCVideoBufferType_Unknown, nullptr)` to stop the callback. + * - On iOS, macOS, and Windows, only video frames in {@link TRTCVideoPixelFormat_I420} or {@link TRTCVideoPixelFormat_BGRA32} pixel format can be called back currently. + * - On Android, only video frames in {@link TRTCVideoPixelFormat_I420} pixel format can be passed in currently. + * + * @param pixelFormat Specify the format of the pixel called back + * @param bufferType Specify video data structure type. Only {@link TRTCVideoBufferType_Buffer} is supported currently + * @param callback Callback for custom rendering + * @return 0: success; values smaller than 0: error + */ + virtual int setLocalVideoRenderCallback(TRTCVideoPixelFormat pixelFormat, TRTCVideoBufferType bufferType, ITRTCVideoRenderCallback* callback) = 0; + + /** + * 10.11 Set the callback of custom rendering for remote video + * + * After this callback is set, the SDK will skip its own rendering process and call back the captured data. Therefore, you need to complete image rendering on your own. + * - You can call `setLocalVideoRenderCallback(TRTCVideoPixelFormat_Unknown, TRTCVideoBufferType_Unknown, nullptr)` to stop the callback. + * - On iOS, macOS, and Windows, only video frames in {@link TRTCVideoPixelFormat_I420} or {@link TRTCVideoPixelFormat_BGRA32} pixel format can be called back currently. + * - On Android, only video frames in {@link TRTCVideoPixelFormat_I420} pixel format can be passed in currently. + * + * @note In actual use, you need to call `startRemoteView(userid, nullptr)` to get the video stream of the remote user first (set `view` to `nullptr`); otherwise, there will be no data called back. + * @param userId remote user id + * @param pixelFormat Specify the format of the pixel called back + * @param bufferType Specify video data structure type. Only {@link TRTCVideoBufferType_Buffer} is supported currently + * @param callback Callback for custom rendering + * @return 0: success; values smaller than 0: error + */ + virtual int setRemoteVideoRenderCallback(const char* userId, TRTCVideoPixelFormat pixelFormat, TRTCVideoBufferType bufferType, ITRTCVideoRenderCallback* callback) = 0; + + /** + * 10.12 Set custom audio data callback + * + * After this callback is set, the SDK will internally call back the audio data (in PCM format), including: + * - {@link onCapturedRawAudioFrame}: callback of the original audio data captured by the local mic + * - {@link onLocalProcessedAudioFrame}: callback of the audio data captured by the local mic and preprocessed by the audio module + * - {@link onRemoteUserAudioFrame}: audio data from each remote user before audio mixing + * - {@link onMixedPlayAudioFrame}: callback of the audio data that will be played back by the system after audio streams are mixed + * + * @note Setting the callback to null indicates to stop the custom audio callback, while setting it to a non-null value indicates to start the custom audio callback. + */ + virtual int setAudioFrameCallback(ITRTCAudioFrameCallback* callback) = 0; + + /** + * 10.13 Set the callback format of original audio frames captured by local mic + * + * This API is used to set the `AudioFrame` format called back by {@link onCapturedRawAudioFrame}: + * - sampleRate: sample rate. Valid values: 16000, 32000, 44100, 48000 + * - channel: number of channels (if stereo is used, data is interwoven). Valid values: 1: mono channel; 2: dual channel + * - samplesPerCall: number of sample points, which defines the frame length of the callback data. The frame length must be an integer multiple of 10 ms. + * + * If you want to calculate the callback frame length in milliseconds, the formula for converting the number of milliseconds into the number of sample points is as follows: number of sample points = number of milliseconds * sample rate / 1000 + * For example, if you want to call back the data of 20 ms frame length with 48000 sample rate, then the number of sample points should be entered as 960 = 20 * 48000 / 1000 + * Note that the frame length of the final callback is in bytes, and the calculation formula for converting the number of sample points into the number of bytes is as follows: number of bytes = number of sample points * number of channels * 2 + * (bit width) For example, if the parameters are 48000 sample rate, dual channel, 20 ms frame length, and 960 sample points, then the number of bytes is 3840 = 960 * 2 * 2 + * @param format Audio data callback format + * @return 0: success; values smaller than 0: error + */ + virtual int setCapturedRawAudioFrameCallbackFormat(TRTCAudioFrameCallbackFormat* format) = 0; + + /** + * 10.14 Set the callback format of preprocessed local audio frames + * + * This API is used to set the `AudioFrame` format called back by {@link onLocalProcessedAudioFrame}: + * - sampleRate: sample rate. Valid values: 16000, 32000, 44100, 48000 + * - channel: number of channels (if stereo is used, data is interwoven). Valid values: 1: mono channel; 2: dual channel + * - samplesPerCall: number of sample points, which defines the frame length of the callback data. The frame length must be an integer multiple of 10 ms. + * + * If you want to calculate the callback frame length in milliseconds, the formula for converting the number of milliseconds into the number of sample points is as follows: number of sample points = number of milliseconds * sample rate / 1000 + * For example, if you want to call back the data of 20 ms frame length with 48000 sample rate, then the number of sample points should be entered as 960 = 20 * 48000 / 1000 + * Note that the frame length of the final callback is in bytes, and the calculation formula for converting the number of sample points into the number of bytes is as follows: number of bytes = number of sample points * number of channels * 2 + * (bit width) For example, if the parameters are 48000 sample rate, dual channel, 20 ms frame length, and 960 sample points, then the number of bytes is 3840 = 960 * 2 * 2 + * @param format Audio data callback format + * @return 0: success; values smaller than 0: error + */ + virtual int setLocalProcessedAudioFrameCallbackFormat(TRTCAudioFrameCallbackFormat* format) = 0; + + /** + * 10.15 Set the callback format of audio frames to be played back by system + * + * This API is used to set the `AudioFrame` format called back by {@link onMixedPlayAudioFrame}: + * - sampleRate: sample rate. Valid values: 16000, 32000, 44100, 48000 + * - channel: number of channels (if stereo is used, data is interwoven). Valid values: 1: mono channel; 2: dual channel + * - samplesPerCall: number of sample points, which defines the frame length of the callback data. The frame length must be an integer multiple of 10 ms. + * + * If you want to calculate the callback frame length in milliseconds, the formula for converting the number of milliseconds into the number of sample points is as follows: number of sample points = number of milliseconds * sample rate / 1000 + * For example, if you want to call back the data of 20 ms frame length with 48000 sample rate, then the number of sample points should be entered as 960 = 20 * 48000 / 1000 + * Note that the frame length of the final callback is in bytes, and the calculation formula for converting the number of sample points into the number of bytes is as follows: number of bytes = number of sample points * number of channels * 2 + * (bit width) For example, if the parameters are 48000 sample rate, dual channel, 20 ms frame length, and 960 sample points, then the number of bytes is 3840 = 960 * 2 * 2 + * @param format Audio data callback format + * @return 0: success; values smaller than 0: error + */ + virtual int setMixedPlayAudioFrameCallbackFormat(TRTCAudioFrameCallbackFormat* format) = 0; + + /** + * 10.16 Enabling custom audio playback + * + * You can use this API to enable custom audio playback if you want to connect to an external audio device or control the audio playback logic by yourself. + * After you enable custom audio playback, the SDK will stop using its audio API to play back audio. You need to call {@link getCustomAudioRenderingFrame} to get audio frames and play them by yourself. + * @param enable Whether to enable custom audio playback. It’s disabled by default. + * @note The parameter must be set before room entry to take effect. + */ + virtual void enableCustomAudioRendering(bool enable) = 0; + + /** + * 10.17 Getting playable audio data + * + * Before calling this API, you need to first enable custom audio playback using {@link enableCustomAudioRendering}. + * Fill the fields in {@link TRTCAudioFrame} as follows (other fields are not required): + * - `sampleRate`: sample rate (required). Valid values: 16000, 24000, 32000, 44100, 48000 + * - `channel`: number of sound channels (required). `1`: mono-channel; `2`: dual-channel; if dual-channel is used, data is interleaved. + * - `data`: the buffer used to get audio data. You need to allocate memory for the buffer based on the duration of an audio frame. + * The PCM data obtained can have a frame duration of 10 ms or 20 ms. 20 ms is recommended. + * Assume that the sample rate is 48000, and sound channels mono-channel. The buffer size for a 20 ms audio frame would be 48000 x 0.02s x 1 x 16 bit = 15360 bit = 1920 bytes. + * + * @param audioFrame Audio frames + * @note + * 1. You must set `sampleRate` and `channel` in `audioFrame`, and allocate memory for one frame of audio in advance. + * 2. The SDK will fill the data automatically based on `sampleRate` and `channel`. + * 3. We recommend that you use the system’s audio playback thread to drive the calling of this API, so that it is called each time the playback of an audio frame is complete. + * + */ + virtual void getCustomAudioRenderingFrame(TRTCAudioFrame* audioFrame) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Custom message sending APIs + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Custom message sending APIs + /// @{ + + /** + * 11.1 Use UDP channel to send custom message to all users in room + * + * This API allows you to use TRTC's UDP channel to broadcast custom data to other users in the current room for signaling transfer. + * The UDP channel in TRTC was originally designed to transfer audio/video data. This API works by disguising the signaling data you want to send as audio/video data packets and sending them together with the audio/video data to be sent. + * Other users in the room can receive the message through the `onRecvCustomCmdMsg` callback in {@link TRTCCloudDelegate}. + * @param cmdID Message ID. Value range: 1–10 + * @param data Message to be sent. The maximum length of one single message is 1 KB. + * @param reliable Whether reliable sending is enabled. Reliable sending can achieve a higher success rate but with a longer reception delay than unreliable sending. + * @param ordered Whether orderly sending is enabled, i.e., whether the data packets should be received in the same order in which they are sent; if so, a certain delay will be caused. + * @return true: sent the message successfully; false: failed to send the message. + * @note + * 1. Up to 30 messages can be sent per second to all users in the room (this is not supported for web and mini program currently). + * 2. A packet can contain up to 1 KB of data; if the threshold is exceeded, the packet is very likely to be discarded by the intermediate router or server. + * 3. A client can send up to 8 KB of data in total per second. + * 4. `reliable` and `ordered` must be set to the same value (`true` or `false`) and cannot be set to different values currently. + * 5. We strongly recommend you set different `cmdID` values for messages of different types. This can reduce message delay when orderly sending is required. + */ + virtual bool sendCustomCmdMsg(uint32_t cmdId, const uint8_t* data, uint32_t dataSize, bool reliable, bool ordered) = 0; + + /** + * 11.2 Use SEI channel to send custom message to all users in room + * + * This API allows you to use TRTC's SEI channel to broadcast custom data to other users in the current room for signaling transfer. + * The header of a video frame has a header data block called SEI. This API works by embedding the custom signaling data you want to send in the SEI block and sending it together with the video frame. + * Therefore, the SEI channel has a better compatibility than {@link sendCustomCmdMsg} as the signaling data can be transferred to the CSS CDN along with the video frame. + * However, because the data block of the video frame header cannot be too large, we recommend you limit the size of the signaling data to only a few bytes when using this API. + * The most common use is to embed the custom timestamp into video frames through this API so as to implement a perfect alignment between the message and video image (such as between the teaching material and video signal in the education + * scenario). Other users in the room can receive the message through the `onRecvSEIMsg` callback in {@link TRTCCloudDelegate}. + * @param data Data to be sent, which can be up to 1 KB (1,000 bytes) + * @param repeatCount Data sending count + * @return true: the message is allowed and will be sent with subsequent video frames; false: the message is not allowed to be sent + * @note This API has the following restrictions: + * 1. The data will not be instantly sent after this API is called; instead, it will be inserted into the next video frame after the API call. + * 2. Up to 30 messages can be sent per second to all users in the room (this limit is shared with `sendCustomCmdMsg`). + * 3. Each packet can be up to 1 KB (this limit is shared with `sendCustomCmdMsg`). If a large amount of data is sent, the video bitrate will increase, which may reduce the video quality or even cause lagging. + * 4. Each client can send up to 8 KB of data in total per second (this limit is shared with `sendCustomCmdMsg`). + * 5. If multiple times of sending is required (i.e., `repeatCount` > 1), the data will be inserted into subsequent `repeatCount` video frames in a row for sending, which will increase the video bitrate. + * 6. If `repeatCount` is greater than 1, the data will be sent for multiple times, and the same message may be received multiple times in the `onRecvSEIMsg` callback; therefore, deduplication is required. + */ + virtual bool sendSEIMsg(const uint8_t* data, uint32_t dataSize, int32_t repeatCount) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Network test APIs + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Network test APIs + /// @{ + + /** + * 12.1 Start network speed test (used before room entry) + * + * @param params speed test options + * @return interface call result, <0: failure + * @note + * 1. The speed measurement process will incur a small amount of basic service fees, See [Purchase Guide > Base Services](https://intl.cloud.tencent.com/document/product/647/34610?lang=en&pg=#basic-services). + * 2. Please perform the Network speed test before room entry, because if performed after room entry, the test will affect the normal audio/video transfer, and its result will be inaccurate due to interference in the room. + * 3. Only one network speed test task is allowed to run at the same time. + */ + virtual int startSpeedTest(const TRTCSpeedTestParams& params) = 0; + + /** + * 12.2 Stop network speed test + */ + virtual void stopSpeedTest() = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Debugging APIs + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Debugging APIs + /// @{ + + /** + * 13.1 Get SDK version information + */ + virtual const char* getSDKVersion() = 0; + + /** + * 13.2 Set log output level + * + * @param level For more information, please see {@link TRTCLogLevel}. Default value: {@link TRTCLogLevelNone} + */ + virtual void setLogLevel(TRTCLogLevel level) = 0; + + /** + * 13.3 Enable/Disable console log printing + * + * @param enabled Specify whether to enable it, which is disabled by default + */ + virtual void setConsoleEnabled(bool enabled) = 0; + + /** + * 13.4 Enable/Disable local log compression + * + * If compression is enabled, the log size will significantly reduce, but logs can be read only after being decompressed by the Python script provided by Tencent Cloud. + * If compression is disabled, logs will be stored in plaintext and can be read directly in Notepad, but will take up more storage capacity. + * @param enabled Specify whether to enable it, which is enabled by default + */ + virtual void setLogCompressEnabled(bool enabled) = 0; + + /** + * 13.5 Set local log storage path + * + * You can use this API to change the default storage path of the SDK's local logs, which is as follows: + * - Windows: C:/Users/[username]/AppData/Roaming/liteav/log, i.e., under `%appdata%/liteav/log`. + * - iOS or macOS: under `sandbox Documents/log`. + * - Android: under `/app directory/files/log/liteav/`. + * @note Please be sure to call this API before all other APIs and make sure that the directory you specify exists and your application has read/write permissions of the directory. + * @param path Log storage path + */ + virtual void setLogDirPath(const char* path) = 0; + + /** + * 13.6 Set log callback + */ + virtual void setLogCallback(ITRTCLogCallback* callback) = 0; + + /** + * 13.7 Display dashboard + * + * "Dashboard" is a semi-transparent floating layer for debugging information on top of the video rendering control. It is used to display audio/video information and event information to facilitate integration and debugging. + * @param showType 0: does not display; 1: displays lite edition (only with audio/video information); 2: displays full edition (with audio/video information and event information). + */ + virtual void showDebugView(int showType) = 0; + +/** + * 13.9 Call experimental APIs + */ +#ifdef _WIN32 + virtual const char* callExperimentalAPI(const char* jsonStr) = 0; +#else + virtual void callExperimentalAPI(const char* jsonStr) = 0; +#endif + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Disused APIs (the corresponding new APIs are recommended) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Disused APIs (the corresponding new APIs are recommended) +/// @{ + +/** + * Enable custom video capturing mode + * + * @deprecated This API is not recommended after v8.5. Please use `enableCustomVideoCapture(streamType,enable)` instead. + */ +#ifndef _WIN32 + virtual void enableCustomVideoCapture(bool enable) = 0; +#endif + +/** + * Deliver captured video data to SDK + * + * @deprecated This API is not recommended after v8.5. Please use `sendCustomVideoData(streamType, TRTCVideoFrame)` instead. + */ +#ifndef _WIN32 + virtual void sendCustomVideoData(TRTCVideoFrame* frame) = 0; +#endif + +/** + * Pause/Resume publishing local video stream + * + * @deprecated This API is not recommended after v8.9. Please use `muteLocalVideo(streamType, mute)` instead. + */ +#ifndef _WIN32 + virtual void muteLocalVideo(bool mute) = 0; +#endif + +/** + * Pause/Resume subscribing to remote user's video stream + * + * @deprecated This API is not recommended after v8.9. Please use `muteRemoteVideoStream(userId, streamType, mute)` instead. + */ +#ifndef _WIN32 + virtual void muteRemoteVideoStream(const char* userId, bool mute) = 0; +#endif + +/** + * Start network speed test (used before room entry) + * + * @deprecated This API is not recommended after v9.2. Please use `startSpeedTest(params)` instead. + */ +#ifdef __APPLE__ + virtual void startSpeedTest(uint32_t sdkAppId, const char* userId, const char* userSig) __attribute__((deprecated("use startSpeedTest:params instead"))) = 0; +#elif !defined(_WIN32) + virtual void startSpeedTest(uint32_t sdkAppId, const char* userId, const char* userSig) = 0; +#endif + +#ifdef _WIN32 + using IDeprecatedTRTCCloud::enableCustomVideoCapture; + using IDeprecatedTRTCCloud::muteLocalVideo; + using IDeprecatedTRTCCloud::muteRemoteVideoStream; + using IDeprecatedTRTCCloud::selectScreenCaptureTarget; + using IDeprecatedTRTCCloud::sendCustomVideoData; + using IDeprecatedTRTCCloud::startLocalAudio; + using IDeprecatedTRTCCloud::startRemoteView; + using IDeprecatedTRTCCloud::startScreenCapture; + using IDeprecatedTRTCCloud::startSpeedTest; + using IDeprecatedTRTCCloud::stopRemoteView; +#endif + /// @} +}; +} // namespace liteav +/// @} +#endif /* __ITRTCCLOUD_H__ */ diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCStatistics.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCStatistics.h new file mode 100644 index 0000000..64c248a --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITRTCStatistics.h @@ -0,0 +1,215 @@ +/** + * Module: TRTC audio/video metrics (read-only) + * Function: the TRTC SDK reports to you the current real-time audio/video metrics (frame rate, bitrate, lag, etc.) once every two seconds + */ +/// @defgroup TRTCStatistic_cplusplus TRTCStatisic +/// Tencent Cloud TRTC : audio, video and network related statistical indicators +/// @{ +#ifndef __TRTCSTATISTIC_H__ +#define __TRTCSTATISTIC_H__ +namespace liteav { + +///////////////////////////////////////////////////////////////////////////////// +// +// Local audio/video metrics +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Local audio/video metrics +/// @{ + +struct TRTCLocalStatistics { + ///**Field description:** local video width in px + uint32_t width; + + ///**Field description:** local video height in px + uint32_t height; + + ///**Field description:** local video frame rate in fps, i.e., how many video frames there are per second + uint32_t frameRate; + + ///**Field description:** remote video bitrate in Kbps, i.e., how much video data is generated per second + uint32_t videoBitrate; + + ///**Field description:** remote audio sample rate (Hz) + uint32_t audioSampleRate; + + ///**Field description:** local audio bitrate in Kbps, i.e., how much audio data is generated per second + uint32_t audioBitrate; + + ///**Field description:** video stream type (HD big image | smooth small image | substream image) + TRTCVideoStreamType streamType; + + ///**Field description:**Audio equipment collection status( + /// 0:Normal;1:Long silence detected;2:Broken sound detected;3:Abnormal intermittent sound detected;) + uint32_t audioCaptureState; + + TRTCLocalStatistics() : width(0), height(0), frameRate(0), videoBitrate(0), audioSampleRate(0), audioBitrate(0), streamType(TRTCVideoStreamTypeBig), audioCaptureState(0) { + } +}; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Remote audio/video metrics +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Remote audio/video metrics +/// @{ + +struct TRTCRemoteStatistics { + ///**Field description:** user ID + const char* userId; + + ///**Field description:** total packet loss rate (%) of the audio stream + ///`audioPacketLoss ` represents the packet loss rate eventually calculated on the audience side after the audio/video stream goes through the complete transfer linkage of "anchor -> cloud -> audience". + /// The smaller the `audioPacketLoss `, the better. The packet loss rate of 0 indicates that all data of the audio stream has entirely reached the audience. + /// If `downLoss` is `0` but `audioPacketLoss ` isn't, there is no packet loss on the linkage of "cloud -> audience" for the audiostream, but there are unrecoverable packet losses on the linkage of "anchor -> cloud". + uint32_t audioPacketLoss; + + ///**Field description:** total packet loss rate (%) of the video stream + ///`videoPacketLoss ` represents the packet loss rate eventually calculated on the audience side after the audio/video stream goes through the complete transfer linkage of "anchor -> cloud -> audience". + /// The smaller the `videoPacketLoss `, the better. The packet loss rate of 0 indicates that all data of the video stream has entirely reached the audience. + /// If `downLoss` is `0` but `videoPacketLoss ` isn't, there is no packet loss on the linkage of "cloud -> audience" for the video stream, but there are unrecoverable packet losses on the linkage of "anchor -> cloud". + uint32_t videoPacketLoss; + + ///**Field description:** remote video width in px + uint32_t width; + + ///**Field description:** remote video height in px + uint32_t height; + + ///**Field description:** remote video frame rate (fps) + uint32_t frameRate; + + ///**Field description:** remote video bitrate (Kbps) + uint32_t videoBitrate; + + ///**Field description:** local audio sample rate (Hz) + uint32_t audioSampleRate; + + ///**Field description:** local audio bitrate (Kbps) + uint32_t audioBitrate; + + ///**Field description:** playback delay (ms) + /// In order to avoid audio/video lags caused by network jitters and network packet disorders, TRTC maintains a playback buffer on the playback side to organize the received network data packets. + /// The size of the buffer is adaptively adjusted according to the current network quality and converted to the length of time in milliseconds, i.e., `jitterBufferDelay`. + uint32_t jitterBufferDelay; + + ///**Field description:** end-to-end delay (ms) + ///`point2PointDelay` represents the delay of "anchor -> cloud -> audience". To be more precise, it represents the delay of the entire linkage of "collection -> encoding -> network transfer -> receiving -> buffering -> decoding -> playback". + ///`point2PointDelay` works only if both the local and remote SDKs are on version 8.5 or above. If the remote SDK is on a version below 8.5, this value will always be 0 and thus meaningless. + uint32_t point2PointDelay; + + ///**Field description:** cumulative audio playback lag duration (ms) + uint32_t audioTotalBlockTime; + + ///**Field description:** audio playback lag rate (%) + /// Audio playback lag rate (audioBlockRate) = cumulative audio playback lag duration (audioTotalBlockTime)/total audio playback duration + uint32_t audioBlockRate; + + ///**Field description:** cumulative video playback lag duration (ms) + uint32_t videoTotalBlockTime; + + ///**Field description:** video playback lag rate (%) + /// Video playback lag rate (videoBlockRate) = cumulative video playback lag duration (videoTotalBlockTime)/total video playback duration + uint32_t videoBlockRate; + + ///**Field description:** total packet loss rate (%) of the audio/video stream + /// Deprecated, please use audioPacketLoss and videoPacketLoss instead. + uint32_t finalLoss; + + ///**Field description:** video stream type (HD big image | smooth small image | substream image) + TRTCVideoStreamType streamType; + + TRTCRemoteStatistics() + : userId(nullptr), + audioPacketLoss(0), + videoPacketLoss(0), + width(0), + height(0), + frameRate(0), + videoBitrate(0), + audioSampleRate(0), + audioBitrate(0), + jitterBufferDelay(0), + point2PointDelay(0), + audioTotalBlockTime(0), + audioBlockRate(0), + videoTotalBlockTime(0), + videoBlockRate(0), + finalLoss(0), + streamType(TRTCVideoStreamTypeBig) { + } +}; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Network and performance metrics +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Network and performance metrics +/// @{ + +struct TRTCStatistics { + ///**Field description:** CPU utilization (%) of the current application + uint32_t appCpu; + + ///**Field description:** CPU utilization (%) of the current system + uint32_t systemCpu; + + ///**Field description:** upstream packet loss rate (%) from the SDK to cloud + /// The smaller the value, the better. If `upLoss` is `0%`, the upstream network quality is very good, and the data packets uploaded to the cloud are basically not lost. + /// If `upLoss` is `30%`, 30% of the audio/video data packets sent to the cloud by the SDK are lost on the transfer linkage. + uint32_t upLoss; + + ///**Field description:** downstream packet loss rate (%) from cloud to the SDK + /// The smaller the value, the better. If `downLoss` is `0%`, the downstream network quality is very good, and the data packets received from the cloud are basically not lost. + /// If `downLoss` is `30%`, 30% of the audio/video data packets sent to the SDK by the cloud are lost on the transfer linkage. + uint32_t downLoss; + + ///**Field description:** round-trip delay (ms) from the SDK to cloud + /// This value represents the total time it takes to send a network packet from the SDK to the cloud and then send a network packet back from the cloud to the SDK, i.e., the total time it takes for a network packet to go through the linkage of + /// "SDK -> cloud -> SDK". The smaller the value, the better. If `rtt` is below 50 ms, it means a short audio/video call delay; if `rtt` is above 200 ms, it means a long audio/video call delay. It should be explained that `rtt` represents the + /// total time spent on the linkage of "SDK -> cloud -> SDK"; therefore, there is no need to distinguish between `upRtt` and `downRtt`. + uint32_t rtt; + + ///**Field description:** round-trip delay (ms) from the SDK to gateway + /// This value represents the total time it takes to send a network packet from the SDK to the gateway and then send a network packet back from the gateway to the SDK, i.e., the total time it takes for a network packet to go through the linkage + /// of "SDK -> gateway -> SDK". The smaller the value, the better. If `gatewayRtt` is below 50 ms, it means a short audio/video call delay; if `gatewayRtt` is above 200 ms, it means a long audio/video call delay. It should be explained that + /// `gatewayRtt` is invalid for cellular network. + uint32_t gatewayRtt; + + ///**Field description:** total number of sent bytes (including signaling data and audio/video data) + uint32_t sentBytes; + + ///**Field description:** total number of received bytes (including signaling data and audio/video data) + uint32_t receivedBytes; + + ///**Field description:** local audio/video statistics + /// As there may be three local audio/video streams (i.e., HD big image, smooth small image, and substream image), the local audio/video statistics are an array. + TRTCLocalStatistics* localStatisticsArray; + + ///**Field description:** `localStatisticsArray` array size + uint32_t localStatisticsArraySize; + + ///**Field description:** remote audio/video statistics + /// As there may be multiple concurrent remote users, and each of them may have multiple concurrent audio/video streams (i.e., HD big image, smooth small image, and substream image), the remote audio/video statistics are an array. + TRTCRemoteStatistics* remoteStatisticsArray; + + ///**Field description:** `remoteStatisticsArray` array size + uint32_t remoteStatisticsArraySize; + + TRTCStatistics() : upLoss(0), downLoss(0), appCpu(0), systemCpu(0), rtt(0), gatewayRtt(0), receivedBytes(0), sentBytes(0), localStatisticsArray(nullptr), localStatisticsArraySize(0), remoteStatisticsArray(nullptr), remoteStatisticsArraySize(0) { + } +}; +/// @} + +} // namespace liteav + +#ifdef _WIN32 +using namespace liteav; +#endif + +#endif /* __TRTCSTATISTIC_H__ */ +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXAudioEffectManager.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXAudioEffectManager.h new file mode 100644 index 0000000..068e56c --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXAudioEffectManager.h @@ -0,0 +1,320 @@ +/** + * Module: management class for background music, short audio effects, and voice effects + * Description: sets background music, short audio effects, and voice effects + */ +/// @defgroup TXAudioEffectManager_cplusplus TXAudioEffectManager +/// Tencent Cloud Audio Effect Management Module +/// @{ +#ifndef __ITXAUDIOEFFECTMANAGER_H__ +#define __ITXAUDIOEFFECTMANAGER_H__ + +namespace liteav { + +class ITXMusicPlayObserver; +class AudioMusicParam; + +///////////////////////////////////////////////////////////////////////////////// +// +// Definitions of enumerated values related to audio effects +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Definitions of enumerated values related to audio effects +/// @{ + +/** + * 1.1 Reverb effects + * + * Reverb effects can be applied to human voice. Based on acoustic algorithms, they can mimic voice in different environments. The following effects are supported currently: + * 0: original; 1: karaoke; 2: room; 3: hall; 4: low and deep; 5: resonant; 6: metal; 7: husky; 8: ethereal; 9: studio; 10: melodious; 11: phonograph; 12: nature + */ +enum TXVoiceReverbType { + TXLiveVoiceReverbType_0 = 0, ///< Disable + TXLiveVoiceReverbType_1 = 1, ///< Karaoke + TXLiveVoiceReverbType_2 = 2, ///< Room + TXLiveVoiceReverbType_3 = 3, ///< Hall + TXLiveVoiceReverbType_4 = 4, ///< Low and deep + TXLiveVoiceReverbType_5 = 5, ///< Resonant + TXLiveVoiceReverbType_6 = 6, ///< Metal + TXLiveVoiceReverbType_7 = 7, ///< Husky + TXLiveVoiceReverbType_8 = 8, ///< ethereal + TXLiveVoiceReverbType_9 = 9, ///< studio + TXLiveVoiceReverbType_10 = 10, ///< melodious + TXLiveVoiceReverbType_11 = 11, ///< phonograph + TXLiveVoiceReverbType_12 = 12, ///< nature +}; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Callback of playing background music +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Callback of playing background music +/// @{ + +// Playback progress block of background music +class ITXMusicPlayObserver { + public: + virtual ~ITXMusicPlayObserver() { + } + + /// Background music started. + virtual void onStart(int id, int errCode) = 0; + + /// Playback progress of background music + virtual void onPlayProgress(int id, long curPtsMS, long durationMS) = 0; + + /// Background music ended + virtual void onComplete(int id, int errCode) = 0; +}; + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Background music playback information +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Background music playback information +/// @{ + +/** + * Background music playback information + * + * The information, including playback ID, file path, and loop times, is passed in the {@link startPlayMusic} API. + * 1. If you play the same music track multiple times, please use the same ID instead of a separate ID for each playback. + * 2. If you want to play different music tracks at the same time, use different IDs for them. + * 3. If you use the same ID to play a music track different from the current one, the SDK will stop the current one before playing the new one. + */ +class AudioMusicParam { + public: + ///**Field description:** music ID
+ ///**Note:** the SDK supports playing multiple music tracks. IDs are used to distinguish different music tracks and control their start, end, volume, etc. + int id; + + ///**Field description:** absolute path of the music file or url.the mp3,aac,m4a,wav supported. + char* path; + + ///**Field description:** number of times the music track is looped
+ ///**Valid values:** 0 or any positive integer. 0 (default) indicates that the music is played once, 1 twice, and so on. + int loopCount; + + ///**Field description:** whether to send the music to remote users
+ ///**Valid values:** `true`: remote users can hear the music played locally; `false` (default): only the local user can hear the music. + bool publish; + + ///**Field description:** whether the music played is a short music track
+ ///**Valid values:** `true`: short music track that needs to be looped; `false` (default): normal-length music track + bool isShortFile; + + ///**Field description:** the point in time in milliseconds for starting music playback + long startTimeMS; + + ///**Field description:** the point in time in milliseconds for ending music playback. 0 indicates that playback continues till the end of the music track. + long endTimeMS; + + AudioMusicParam(int id_, char* path_) { + path = path_; + id = id_; + loopCount = 0; + publish = false; + isShortFile = false; + startTimeMS = 0; + endTimeMS = 0; + } +}; +/// @} + +// Definition of audio effect management module +class ITXAudioEffectManager { + protected: + ITXAudioEffectManager() { + } + virtual ~ITXAudioEffectManager() { + } + + public: + ///////////////////////////////////////////////////////////////////////////////// + // + // Voice effect APIs + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Voice effect APIs + /// @{ + + /** + * 1.3 Setting voice reverb effects + * + * This API is used to set reverb effects for human voice. For the effects supported, please see {@link TXVoiceReverbType}. + * + * @note Effects become invalid after room exit. If you want to use the same effect after you enter the room again, you need to set the effect again using this API. + */ + virtual void setVoiceReverbType(TXVoiceReverbType type) = 0; + + /** + * 1.5 Setting speech volume + * + * This API is used to set the volume of speech. It is often used together with the music volume setting API {@link setAllMusicVolume} to balance between the volume of music and speech. + * + * @param volume Volume. Value range: 0-100; default: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ + virtual void setVoiceCaptureVolume(int volume) = 0; + + /** + * 1.6 Setting speech pitch + * + * This API is used to set the pitch of speech. + * + * @param pitch Ptich,Value range: -1.0f~1.0f; default: 0.0f。 + */ + virtual void setVoicePitch(double pitch) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Background music APIs + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Background music APIs + /// @{ + + /** + * 2.0 Setting the background music callback + * + * Before playing background music, please use this API to set the music callback, which can inform you of the playback progress. + * + * @param musicId Music ID + * @param observer For more information, please see the APIs defined in `ITXMusicPlayObserver`. + */ + virtual void setMusicObserver(int musicId, ITXMusicPlayObserver* observer) = 0; + + /** + * 2.1 Starting background music + * + * You must assign an ID to each music track so that you can start, stop, or set the volume of music tracks by ID. + * + * @note + * 1. If you play the same music track multiple times, please use the same ID instead of a separate ID for each playback. + * 2. If you want to play different music tracks at the same time, use different IDs for them. + * 3. If you use the same ID to play a music track different from the current one, the SDK will stop the current one before playing the new one. + * + * @param musicParam Music parameter + * @param startBlock Callback of starting music + * @param progressBlock Callback of playback progress + * @param completeBlock Callback of ending music + */ + virtual void startPlayMusic(AudioMusicParam musicParam) = 0; + + /** + * 2.2 Stopping background music + * + * @param id Music ID + */ + virtual void stopPlayMusic(int id) = 0; + + /** + * 2.3 Pausing background music + * + * @param id Music ID + */ + virtual void pausePlayMusic(int id) = 0; + + /** + * 2.4 Resuming background music + * + * @param id Music ID + */ + virtual void resumePlayMusic(int id) = 0; + + /** + * 2.5 Setting the local and remote playback volume of background music + * + * This API is used to set the local and remote playback volume of background music. + * - Local volume: the volume of music heard by anchors + * - Remote volume: the volume of music heard by audience + * + * @param volume Volume. Value range: 0-100; default: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ + virtual void setAllMusicVolume(int volume) = 0; + + /** + * 2.6 Setting the remote playback volume of a specific music track + * + * This API is used to control the remote playback volume (the volume heard by audience) of a specific music track. + * + * @param id Music ID + * @param volume Volume. Value range: 0-100; default: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ + virtual void setMusicPublishVolume(int id, int volume) = 0; + + /** + * 2.7 Setting the local playback volume of a specific music track + * + * This API is used to control the local playback volume (the volume heard by anchors) of a specific music track. + * + * @param id Music ID + * @param volume Volume. Value range: 0-100. default: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ + virtual void setMusicPlayoutVolume(int id, int volume) = 0; + + /** + * 2.8 Adjusting the pitch of background music + * + * @param id Music ID + * @param pitch Pitch. Value range: floating point numbers in the range of [-1, 1]; default: 0.0f + */ + virtual void setMusicPitch(int id, float pitch) = 0; + + /** + * 2.9 Changing the speed of background music + * + * @param id Music ID + * @param speedRate Music speed. Value range: floating point numbers in the range of [0.5, 2]; default: 1.0f + */ + virtual void setMusicSpeedRate(int id, float speedRate) = 0; + + /** + * 2.10 Getting the playback progress (ms) of background music + * + * @param id Music ID + * @return The milliseconds that have passed since playback started. -1 indicates failure to get the the playback progress. + */ + virtual long getMusicCurrentPosInMS(int id) = 0; + + /** + * 2.11 Getting the total length (ms) of background music + * + * @param path Path of the music file. + * @return The length of the specified music file is returned. -1 indicates failure to get the length. + */ + virtual long getMusicDurationInMS(char* path) = 0; + + /** + * 2.12 Setting the playback progress (ms) of background music + * + * @note Do not call this API frequently as the music file may be read and written to each time the API is called, which can be time-consuming. + * Wait till users finish dragging the progress bar before you call this API. + * The progress bar controller on the UI tends to update the progress at a high frequency as users drag the progress bar. This will result in poor user experience unless you limit the frequency. + * + * @param id Music ID + * @param pts Unit: millisecond + */ + virtual void seekMusicToPosInTime(int id, int pts) = 0; + + /// @} +}; +} // End of namespace liteav + +// the C++ interface will be declared under the namespace liteav after v9.0. To be compatible with the previous usage, trtc will be used as an alias for liteav +namespace trtc = liteav; + +#ifdef _WIN32 +using namespace liteav; +#endif + +#endif /* __ITXAUDIOEFFECTMANAGER_H__ */ + +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXDeviceManager.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXDeviceManager.h new file mode 100644 index 0000000..47341dc --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/ITXDeviceManager.h @@ -0,0 +1,501 @@ +/** + * Module: audio/video device management module + * Description: manages audio/video devices such as camera, mic, and speaker. + */ +/// @defgroup TXDeviceManager_cplusplus TXDeviceManager +/// Tencent Cloud Device Management Module +/// @{ +#ifndef __ITXDEVICEMANAGER_H__ +#define __ITXDEVICEMANAGER_H__ + +#include +#ifdef __APPLE__ +#include +#endif + +namespace liteav { +class ITRTCVideoRenderCallback; + +///////////////////////////////////////////////////////////////////////////////// +// +// Type definitions of audio/video devices +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Type definitions of audio/video devices +/// @{ + +/** + * System volume type (for mobile devices only) + * + * @deprecated This API is not recommended after v9.5. + * + * Smartphones usually have two types of system volume: call volume and media volume. + * - Call volume is designed for call scenarios. It comes with acoustic echo cancellation (AEC) and supports audio capturing by Bluetooth earphones, but its sound quality is average. + * If you cannot turn the volume down to 0 (i.e., mute the phone) using the volume buttons, then your phone is using call volume. + * - Media volume is designed for media scenarios such as music playback. AEC does not work when media volume is used, and Bluetooth earphones cannot be used for audio capturing. However, media volume delivers better music listening experience. + * If you are able to mute your phone using the volume buttons, then your phone is using media volume. + * + * The SDK offers three system volume control modes: auto, call volume, and media volume. + */ +enum TXSystemVolumeType { + + /// Auto + TXSystemVolumeTypeAuto = 0, + + /// Media volume + TXSystemVolumeTypeMedia = 1, + + /// Call volume + TXSystemVolumeTypeVOIP = 2, + +}; + +/** + * Audio route (the route via which audio is played) + * + * Audio route is the route (speaker or receiver) via which audio is played. It applies only to mobile devices such as mobile phones. + * A mobile phone has two speakers: one at the top (receiver) and the other the bottom. + * - If the audio route is set to the receiver, the volume is relatively low, and audio can be heard only when the phone is put near the ear. This mode has a high level of privacy and is suitable for answering calls. + * - If the audio route is set to the speaker, the volume is relatively high, and there is no need to put the phone near the ear. This mode enables the "hands-free" feature. + */ +enum TXAudioRoute { + + /// Speakerphone: the speaker at the bottom is used for playback (hands-free). With relatively high volume, it is used to play music out loud. + TXAudioRouteSpeakerphone = 0, + + /// Earpiece: the receiver at the top is used for playback. With relatively low volume, it is suitable for call scenarios that require privacy. + TXAudioRouteEarpiece = 1, + +}; + +/** + * Device type (for desktop OS) + * + * This enumerated type defines three types of audio/video devices, namely camera, mic and speaker, so that you can use the same device management API to manage three types of devices. + */ +enum TXMediaDeviceType { + TXMediaDeviceTypeUnknown = -1, ///< undefined device type + TXMediaDeviceTypeMic = 0, ///< microphone + TXMediaDeviceTypeSpeaker = 1, ///< speaker or earpiece + TXMediaDeviceTypeCamera = 2, ///< camera +}; + +/** + * Device operation + * + * This enumerated value is used to notify the status change of the local device {@link onDeviceChanged}. + */ +enum TXMediaDeviceState { + + /// The device has been plugged in + TXMediaDeviceStateAdd = 0, + + /// The device has been removed + TXMediaDeviceStateRemove = 1, + + /// The device has been enabled + TXMediaDeviceStateActive = 2, + +}; + +/** + * Camera acquisition preferences + * + * This enum is used to set camera acquisition parameters. + */ +#ifdef _WIN32 +enum TXCameraCaptureMode { + + /// Auto adjustment of camera capture parameters. + /// SDK selects the appropriate camera output parameters according to the actual acquisition device performance and network situation, and maintains a balance between device performance and video preview quality. + TXCameraResolutionStrategyAuto = 0, + + /// Give priority to equipment performance. + /// SDK selects the closest camera output parameters according to the user's encoder resolution and frame rate, so as to ensure the performance of the device. + TXCameraResolutionStrategyPerformance = 1, + + /// Give priority to the quality of video preview. + /// SDK selects higher camera output parameters to improve the quality of preview video. In this case, it will consume more CPU and memory to do video preprocessing. + TXCameraResolutionStrategyHighQuality = 2, + + /// Allows the user to set the width and height of the video captured by the local camera. + TXCameraCaptureManual = 3, + +}; + +/** + * Camera acquisition parameters + * + * This setting determines the quality of the local preview image. + */ +struct TXCameraCaptureParam { + ///**Field description:** camera acquisition preferences + TXCameraCaptureMode mode; + + ///**Field description:** width of acquired image + int width; + + ///**Field description:** height of acquired image + int height; + + TXCameraCaptureParam() : mode(TXCameraResolutionStrategyAuto), width(640), height(360) { + } +}; +#endif + +/** + * Audio/Video device information (for desktop OS) + * + * This structure describes key information (such as device ID and device name) of an audio/video device, so that users can choose on the UI the device to use. + */ +class ITXDeviceInfo { + protected: + virtual ~ITXDeviceInfo() { + } + + public: + /// device name (UTF-8) + virtual const char* getDeviceName() = 0; + /// device PID (UTF-8) + virtual const char* getDevicePID() = 0; + /// release function, don't use delete!!! + virtual void release() = 0; +}; + +/** + * Device information list (for desktop OS) + * + * This structure functions as std::vector does. It solves the binary compatibility issue between different versions of STL containers. + */ +class ITXDeviceCollection { + protected: + virtual ~ITXDeviceCollection() { + } + + public: + /** + * Size of this list. + * + * @return Size of this list. + */ + virtual uint32_t getCount() = 0; + + /** + * device name (UTF-8) + * + * @param index value in [0,getCount) + * @return device name (UTF-8) + */ + virtual const char* getDeviceName(uint32_t index) = 0; + + /** + * device PID (UTF-8) + * + * @param index value in [0,getCount) + * @return device PID (UTF-8) + */ + virtual const char* getDevicePID(uint32_t index) = 0; + + /** + * device properties (json format) + * + * @note + * - examples: {"SupportedResolution":[{"width":640,"height":480},{"width":320,"height":240}]} + * @param index value in [0,getCount) + * @return device properties formatted by json + */ + virtual const char* getDeviceProperties(uint32_t index) = 0; + + /** + * release function, don't use delete!!! + */ + virtual void release() = 0; +}; +/// @} + +#if (__APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE) || _WIN32 +class ITXDeviceObserver { + public: + virtual ~ITXDeviceObserver() { + } + + /** + * The status of a local device changed (for desktop OS only) + * + * The SDK returns this callback when a local device (camera, mic, or speaker) is connected or disconnected. + * + * @param deviceId Device ID + * @param type Device type + * @param state Device status. `0`: connected; `1`: disconnected; `2`: started + */ + virtual void onDeviceChanged(const char* deviceId, TXMediaDeviceType type, TXMediaDeviceState state) { + } + +}; // End of class ITXDeviceObserver +#endif + +class ITXDeviceManager { + protected: + ITXDeviceManager() { + } + virtual ~ITXDeviceManager() { + } + + public: +///////////////////////////////////////////////////////////////////////////////// +// +// Device APIs for mobile OS (iOS and Android) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Device APIs for mobile OS +/// @{ + +/** + * 1.1 Querying whether the front camera is being used + */ +#if __ANDROID__ || (__APPLE__ && TARGET_OS_IOS) + virtual bool isFrontCamera() = 0; + + /** + * 1.2 Switching to the front/rear camera (for mobile OS) + */ + virtual int switchCamera(bool frontCamera) = 0; + + /** + * 1.3 Getting the maximum zoom ratio of the camera (for mobile OS) + */ + virtual float getCameraZoomMaxRatio() = 0; + + /** + * 1.4 Setting the camera zoom ratio (for mobile OS) + * + * @param zoomRatio Value range: 1-5. 1 indicates the widest angle of view (original), and 5 the narrowest angle of view (zoomed in). + */ + virtual int setCameraZoomRatio(float zoomRatio) = 0; + + /** + * 1.5 Querying whether automatic face detection is supported (for mobile OS) + */ + virtual bool isAutoFocusEnabled() = 0; + + /** + * 1.6 Enabling auto focus (for mobile OS) + * + * After auto focus is enabled, the camera will automatically detect and always focus on faces. + */ + virtual int enableCameraAutoFocus(bool enabled) = 0; + + /** + * 1.7 Adjusting the focus (for mobile OS) + * + * This API can be used to achieve the following: + * 1. A user can tap on the camera preview. + * 2. A rectangle will appear where the user taps, indicating the spot the camera will focus on. + * 3. The user passes the coordinates of the spot to the SDK using this API, and the SDK will instruct the camera to focus as required. + * @note Before using this API, you must first disable auto focus using {@link enableCameraAutoFocus}. + * @param position The spot to focus on. Pass in the coordinates of the spot you want to focus on. + * @return 0: operation successful; negative number: operation failed. + */ + virtual int setCameraFocusPosition(float x, float y) = 0; + + /** + * 1.8 Enabling/Disabling flash, i.e., the torch mode (for mobile OS) + */ + virtual int enableCameraTorch(bool enabled) = 0; + + /** + * 1.9 Setting the audio route (for mobile OS) + * + * A mobile phone has two audio playback devices: the receiver at the top and the speaker at the bottom. + * If the audio route is set to the receiver, the volume is relatively low, and audio can be heard only when the phone is put near the ear. This mode has a high level of privacy and is suitable for answering calls. + * If the audio route is set to the speaker, the volume is relatively high, and there is no need to put the phone near the ear. This mode enables the "hands-free" feature. + */ + virtual int setAudioRoute(TXAudioRoute route) = 0; +#endif + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Device APIs for desktop OS (Windows & macOS) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Device APIs for desktop OS +/// @{ + +/** + * 2.1 Getting the device list (for desktop OS) + * + * @param type Device type. Set it to the type of device you want to get. For details, please see the definition of `TXMediaDeviceType`. + * @note + * - To ensure that the SDK can manage the lifecycle of the `ITXDeviceCollection` object, after using this API, please call the `release` method to release the resources. + * - Do not use `delete` to release the Collection object returned as deleting the ITXDeviceCollection* pointer will cause crash. + * - The valid values of `type` are `TXMediaDeviceTypeMic`, `TXMediaDeviceTypeSpeaker`, and `TXMediaDeviceTypeCamera`. + * - This API can be used only on macOS and Windows. + */ +#if (__APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE) || _WIN32 + virtual ITXDeviceCollection* getDevicesList(TXMediaDeviceType type) = 0; + + /** + * 2.2 Setting the device to use (for desktop OS) + * + * @param type Device type. For details, please see the definition of `TXMediaDeviceType`. + * @param deviceId Device ID. You can get the ID of a device using the {@link getDevicesList} API. + * @return 0: operation successful; negative number: operation failed. + */ + virtual int setCurrentDevice(TXMediaDeviceType type, const char* deviceId) = 0; + + /** + * 2.3 Getting the device currently in use (for desktop OS) + */ + virtual ITXDeviceInfo* getCurrentDevice(TXMediaDeviceType type) = 0; + + /** + * 2.4 Setting the volume of the current device (for desktop OS) + * + * This API is used to set the capturing volume of the mic or playback volume of the speaker, but not the volume of the camera. + * @param volume Volume. Value range: 0-100; default: 100 + * @note If 100 is still not loud enough for you, you can set the volume to up to 150, but there may be side effects. + */ + virtual int setCurrentDeviceVolume(TXMediaDeviceType type, uint32_t volume) = 0; + + /** + * 2.5 Getting the volume of the current device (for desktop OS) + * + * This API is used to get the capturing volume of the mic or playback volume of the speaker, but not the volume of the camera. + */ + virtual uint32_t getCurrentDeviceVolume(TXMediaDeviceType type) = 0; + + /** + * 2.6 Muting the current device (for desktop OS) + * + * This API is used to mute the mic or speaker, but not the camera. + */ + virtual int setCurrentDeviceMute(TXMediaDeviceType type, bool mute) = 0; + + /** + * 2.7 Querying whether the current device is muted (for desktop OS) + * + * This API is used to query whether the mic or speaker is muted. Camera muting is not supported. + */ + virtual bool getCurrentDeviceMute(TXMediaDeviceType type) = 0; + + /** + * 2.8 Starting camera testing (for desktop OS) + * + * @note You can use the {@link setCurrentDevice} API to switch between cameras during testing. + */ + virtual int startCameraDeviceTest(void* view) = 0; + + /** + * 2.9 Ending camera testing (for desktop OS) + */ + virtual int stopCameraDeviceTest() = 0; + + /** + * 2.10 Starting mic testing (for desktop OS) + * + * This API is used to test whether the mic functions properly. The mic volume detected (value range: 0-100) is returned via a callback. + * @param interval Interval of volume callbacks + */ + virtual int startMicDeviceTest(uint32_t interval) = 0; + + /** + * 2.11 Ending mic testing (for desktop OS) + */ + virtual int stopMicDeviceTest() = 0; + + /** + * 2.12 Starting speaker testing (for desktop OS) + * + * This API is used to test whether the audio playback device functions properly by playing a specified audio file. If users can hear audio during testing, the device functions properly. + * @param filePath Path of the audio file + */ + virtual int startSpeakerDeviceTest(const char* filePath) = 0; + + /** + * 2.13 Ending speaker testing (for desktop OS) + */ + virtual int stopSpeakerDeviceTest() = 0; +#endif + +/** + * 2.14 Starting camera testing (for Windows) + * + * This API supports custom rendering, meaning that you can use the callback API `ITRTCVideoRenderCallback` to get the images captured by the camera for custom rendering. + */ +#ifdef _WIN32 + virtual int startCameraDeviceTest(ITRTCVideoRenderCallback* callback) = 0; +#endif + +/** + * 2.15 Setting the volume of the current process in the volume mixer (for Windows) + */ +#ifdef _WIN32 + virtual int setApplicationPlayVolume(int volume) = 0; +#endif + +/** + * 2.16 Getting the volume of the current process in the volume mixer (for Windows) + */ +#ifdef _WIN32 + virtual int getApplicationPlayVolume() = 0; +#endif + +/** + * 2.17 Muting the current process in the volume mixer (for Windows) + */ +#ifdef _WIN32 + virtual int setApplicationMuteState(bool bMute) = 0; +#endif + +/** + * 2.18 Querying whether the current process is muted in the volume mixer (for Windows) + */ +#ifdef _WIN32 + virtual bool getApplicationMuteState() = 0; +#endif + +/** + * 2.19 Set camera acquisition preferences + */ +#ifdef _WIN32 + virtual void setCameraCapturerParam(const TXCameraCaptureParam& params) = 0; +#endif + +/** + * 2.20 set onDeviceChanged callback + */ +#if (__APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE) || _WIN32 + virtual void setDeviceObserver(ITXDeviceObserver* observer) = 0; +#endif + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Disused APIs (the corresponding new APIs are recommended) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Disused APIs (the corresponding new APIs are recommended) +/// @{ + +/** + * Setting the system volume type (for mobile OS) + * + * @deprecated This API is not recommended after v9.5. Please use the `startLocalAudio(quality)` API in {@link TRTCCloud} instead, which param `quality` is used to decide audio quality. + */ +#if __ANDROID__ || (__APPLE__ && TARGET_OS_IOS) + virtual int setSystemVolumeType(TXSystemVolumeType type) = 0; +#endif + + /// @} +}; // End of class ITXDeviceManager +} // namespace liteav + +namespace trtc = liteav; + +#ifdef _WIN32 +using namespace liteav; +#endif + +#endif /* __ITXDEVICEMANAGER_H__ */ +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCCloudCallback.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCCloudCallback.h new file mode 100644 index 0000000..f865b3d --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCCloudCallback.h @@ -0,0 +1,1010 @@ +/** + * Module: TRTCCloudDelegate @ TXLiteAVSDK + * Function: event callback APIs for TRTC’s video call feature + */ +/// @defgroup TRTCCloudCallback_cplusplus TRTCCloudCallback +/// Tencent Cloud TRTC Event Notification Interface +/// @{ +#ifndef __TRTCCLOUDCALLBACK_H__ +#define __TRTCCLOUDCALLBACK_H__ + +#include "TRTCTypeDef.h" +#include "ITXDeviceManager.h" +#include "TXLiteAVCode.h" +#include "ITRTCStatistics.h" + +namespace liteav { + +class ITRTCCloudCallback { + public: + virtual ~ITRTCCloudCallback() { + } + + ///////////////////////////////////////////////////////////////////////////////// + // + // Error and warning events + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Error and warning events + /// @{ + + /** + * 1.1 Error event callback + * + * Error event, which indicates that the SDK threw an irrecoverable error such as room entry failure or failure to start device + * For more information, see [Error Codes](https://intl.cloud.tencent.com/document/product/647/35124). + * + * @param errCode Error code + * @param errMsg Error message + * @param extInfo Extended field. Certain error codes may carry extra information for troubleshooting. + */ + virtual void onError(TXLiteAVError errCode, const char* errMsg, void* extraInfo) = 0; + + /** + * 1.2 Warning event callback + * + * Warning event, which indicates that the SDK threw an error requiring attention, such as video lag or high CPU usage + * For more information, see [Error Codes](https://intl.cloud.tencent.com/document/product/647/35124). + * + * @param warningCode Warning code + * @param warningMsg Warning message + * @param extInfo Extended field. Certain warning codes may carry extra information for troubleshooting. + */ + virtual void onWarning(TXLiteAVWarning warningCode, const char* warningMsg, void* extraInfo) = 0; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Room event callback + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Room event callback + /// @{ + + /** + * 2.1 Whether room entry is successful + * + * After calling the `enterRoom()` API in `TRTCCloud` to enter a room, you will receive the `onEnterRoom(result)` callback from `TRTCCloudDelegate`. + * - If room entry succeeded, `result` will be a positive number (`result` > 0), indicating the time in milliseconds (ms) the room entry takes. + * - If room entry failed, `result` will be a negative number (result < 0), indicating the error code for the failure. + * For more information on the error codes for room entry failure, see [Error Codes](https://intl.cloud.tencent.com/document/product/647/35124). + * + * @note + * 1. In TRTC versions below 6.6, the `onEnterRoom(result)` callback is returned only if room entry succeeds, and the `onError()` callback is returned if room entry fails. + * 2. In TRTC 6.6 and above, the `onEnterRoom(result)` callback is returned regardless of whether room entry succeeds or fails, and the `onError()` callback is also returned if room entry fails. + * + * @param result If `result` is greater than 0, it indicates the time (in ms) the room entry takes; if `result` is less than 0, it represents the error code for room entry. + */ + virtual void onEnterRoom(int result) = 0; + + /** + * 2.2 Room exit + * + * Calling the `exitRoom()` API in `TRTCCloud` will trigger the execution of room exit-related logic, such as releasing resources of audio/video devices and codecs. + * After all resources occupied by the SDK are released, the SDK will return the `onExitRoom()` callback. + * + * If you need to call `enterRoom()` again or switch to another audio/video SDK, please wait until you receive the `onExitRoom()` callback. + * Otherwise, you may encounter problems such as the camera or mic being occupied. + * + * @param reason Reason for room exit. `0`: the user called `exitRoom` to exit the room; `1`: the user was removed from the room by the server; `2`: the room was dismissed. + */ + virtual void onExitRoom(int reason) = 0; + + /** + * 2.3 Role switching + * + * You can call the `switchRole()` API in `TRTCCloud` to switch between the anchor and audience roles. This is accompanied by a line switching process. + * After the switching, the SDK will return the `onSwitchRole()` event callback. + * + * @param errCode Error code. `ERR_NULL` indicates a successful switch. For more information, please see [Error Codes](https://intl.cloud.tencent.com/document/product/647/35124). + * @param errMsg Error message + */ + virtual void onSwitchRole(TXLiteAVError errCode, const char* errMsg) { + } + + /** + * 2.4 Result of room switching + * + * You can call the `switchRoom()` API in `TRTCCloud` to switch from one room to another. + * After the switching, the SDK will return the `onSwitchRoom()` event callback. + * + * @param errCode Error code. `ERR_NULL` indicates a successful switch. For more information, please see [Error Codes](https://intl.cloud.tencent.com/document/product/647/35124). + * @param errMsg Error message + */ + virtual void onSwitchRoom(TXLiteAVError errCode, const char* errMsg) { + } + + /** + * 2.5 Result of requesting cross-room call + * + * You can call the `connectOtherRoom()` API in `TRTCCloud` to establish a video call with the anchor of another room. This is the “anchor competition” feature. + * The caller will receive the `onConnectOtherRoom()` callback, which can be used to determine whether the cross-room call is successful. + * If it is successful, all users in either room will receive the `onUserVideoAvailable()` callback from the anchor of the other room. + * + * @param userId The user ID of the anchor (in another room) to be called + * @param errCode Error code. `ERR_NULL` indicates that cross-room connection is established successfully. For more information, please see [Error Codes](https://intl.cloud.tencent.com/document/product/647/35124). + * @param errMsg Error message + */ + virtual void onConnectOtherRoom(const char* userId, TXLiteAVError errCode, const char* errMsg) { + } + + /** + * 2.6 Result of ending cross-room call + */ + virtual void onDisconnectOtherRoom(TXLiteAVError errCode, const char* errMsg) { + } + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // User event callback + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name User event callback + /// @{ + + /** + * 3.1 A user entered the room + * + * Due to performance concerns, this callback works differently in different scenarios (i.e., `AppScene`, which you can specify by setting the second parameter when calling `enterRoom`). + * - Live streaming scenarios (`TRTCAppSceneLIVE` or `TRTCAppSceneVoiceChatRoom`): in live streaming scenarios, a user is either in the role of an anchor or audience. The callback is returned only when an anchor enters the room. + * - Call scenarios (`TRTCAppSceneVideoCall` or `TRTCAppSceneAudioCall`): in call scenarios, the concept of roles does not apply (all users can be considered as anchors), and the callback is returned when any user enters the room. + * + * @note + * 1. The `onRemoteUserEnterRoom` callback indicates that a user entered the room, but it does not necessarily mean that the user enabled audio or video. + * 2. If you want to know whether a user enabled video, we recommend you use the `onUserVideoAvailable()` callback. + * @param userId User ID of the remote user + */ + virtual void onRemoteUserEnterRoom(const char* userId) { + } + + /** + * 3.2 A user exited the room + * + * As with `onRemoteUserEnterRoom`, this callback works differently in different scenarios (i.e., `AppScene`, which you can specify by setting the second parameter when calling `enterRoom`). + * - Live streaming scenarios (`TRTCAppSceneLIVE` or `TRTCAppSceneVoiceChatRoom`): the callback is triggered only when an anchor exits the room. + * - Call scenarios (`TRTCAppSceneVideoCall` or `TRTCAppSceneAudioCall`): in call scenarios, the concept of roles does not apply, and the callback is returned when any user exits the room. + * + * @param userId User ID of the remote user + * @param reason Reason for room exit. `0`: the user exited the room voluntarily; `1`: the user exited the room due to timeout; `2`: the user was removed from the room. + */ + virtual void onRemoteUserLeaveRoom(const char* userId, int reason) { + } + + /** + * 3.3 A remote user published/unpublished primary stream video + * + * The primary stream is usually used for camera images. If you receive the `onUserVideoAvailable(userId, true)` callback, it indicates that the user has available primary stream video. + * You can then call {@link startRemoteView} to subscribe to the remote user’s video. If the subscription is successful, you will receive the `onFirstVideoFrame(userid)` callback, which indicates that the first video frame of the user is + * rendered. + * + * If you receive the `onUserVideoAvailable(userId, false)` callback, it indicates that the video of the remote user is disabled, which may be because the user called {@link muteLocalVideo} or {@link stopLocalPreview}. + * + * @param userId User ID of the remote user + * @param available Whether the user published (or unpublished) primary stream video. `true`: published; `false`: unpublished + */ + virtual void onUserVideoAvailable(const char* userId, bool available) { + } + + /** + * 3.4 A remote user published/unpublished substream video + * + * The substream is usually used for screen sharing images. If you receive the `onUserSubStreamAvailable(userId, true)` callback, it indicates that the user has available substream video. + * You can then call {@link startRemoteSubStreamView} to subscribe to the remote user’s video. If the subscription is successful, you will receive the `onFirstVideoFrame(userid)` callback, which indicates that the first frame of the user is + * rendered. + * + * @note The API used to display substream images is {@link startRemoteSubStreamView}, not {@link startRemoteView}. + * + * @param userId User ID of the remote user + * @param available Whether the user published (or unpublished) substream video. `true`: published; `false`: unpublished + */ + virtual void onUserSubStreamAvailable(const char* userId, bool available) { + } + + /** + * 3.5 A remote user published/unpublished audio + * + * If you receive the `onUserAudioAvailable(userId, true)` callback, it indicates that the user published audio. + * - In auto-subscription mode, the SDK will play the user’s audio automatically. + * - In manual subscription mode, you can call {@link muteRemoteAudio}(userid, false) to play the user’s audio. + * + * @note The auto-subscription mode is used by default. You can switch to the manual subscription mode by calling {@link setDefaultStreamRecvMode}, but it must be called before room entry for the switch to take effect. + * + * @param userId User ID of the remote user + * @param available Whether the user published (or unpublished) audio. `true`: published; `false`: unpublished + */ + virtual void onUserAudioAvailable(const char* userId, bool available) { + } + + /** + * 3.6 The SDK started rendering the first video frame of the local or a remote user + * + * The SDK returns this event callback when it starts rendering your first video frame or that of a remote user. The `userId` in the callback can help you determine whether the frame is yours or a remote user’s. + * - If `userId` is empty, it indicates that the SDK has started rendering your first video frame. The precondition is that you have called {@link startLocalPreview} or {@link startScreenCapture}. + * - If `userId` is not empty, it indicates that the SDK has started rendering the first video frame of a remote user. The precondition is that you have called {@link startRemoteView} to subscribe to the user’s video. + * + * @note + * 1. The callback of the first local video frame being rendered is triggered only after you call {@link startLocalPreview} or {@link startScreenCapture}. + * 2. The callback of the first video frame of a remote user being rendered is triggered only after you call {@link startRemoteView} or {@link startRemoteSubStreamView}. + * + * @param userId The user ID of the local or a remote user. If it is empty, it indicates that the first local video frame is available; if it is not empty, it indicates that the first video frame of a remote user is available. + * @param streamType Video stream type. The primary stream (`Main`) is usually used for camera images, and the substream (`Sub`) for screen sharing images. + * @param width Video width + * @param height Video height + */ + virtual void onFirstVideoFrame(const char* userId, const TRTCVideoStreamType streamType, const int width, const int height) { + } + + /** + * 3.7 The SDK started playing the first audio frame of a remote user + * + * The SDK returns this callback when it plays the first audio frame of a remote user. The callback is not returned for the playing of the first audio frame of the local user. + * + * @param userId User ID of the remote user + */ + virtual void onFirstAudioFrame(const char* userId) { + } + + /** + * 3.8 The first local video frame was published + * + * After you enter a room and call {@link startLocalPreview} or {@link startScreenCapture} to enable local video capturing (whichever happens first), + * the SDK will start video encoding and publish the local video data via its network module to the cloud. + * It returns the `onSendFirstLocalVideoFrame` callback after publishing the first local video frame. + * + * @param streamType Video stream type. The primary stream (`Main`) is usually used for camera images, and the substream (`Sub`) for screen sharing images. + */ + virtual void onSendFirstLocalVideoFrame(const TRTCVideoStreamType streamType) { + } + + /** + * 3.9 The first local audio frame was published + * + * After you enter a room and call {@link startLocalAudio} to enable audio capturing (whichever happens first), + * the SDK will start audio encoding and publish the local audio data via its network module to the cloud. + * The SDK returns the `onSendFirstLocalAudioFrame` callback after sending the first local audio frame. + */ + virtual void onSendFirstLocalAudioFrame() { + } + + /** + * 3.10 Change of remote video status + * + * You can use this callback to get the status (`Playing`, `Loading`, or `Stopped`) of the video of each remote user and display it on the UI. + * @param userId User ID + * @param streamType Video stream type. The primary stream (`Main`) is usually used for camera images, and the substream (`Sub`) for screen sharing images. + * @param status Video status, which may be `Playing`, `Loading`, or `Stopped` + * @param reason Reason for the change of status + * @param extraInfo Extra information + */ + virtual void onRemoteVideoStatusUpdated(const char* userId, TRTCVideoStreamType streamType, TRTCAVStatusType status, TRTCAVStatusChangeReason reason, void* extrainfo) { + } + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Callback of statistics on network and technical metrics + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Callback of statistics on network and technical metrics + /// @{ + + /** + * 4.1 Real-time network quality statistics + * + * This callback is returned every 2 seconds and notifies you of the upstream and downstream network quality detected by the SDK. + * The SDK uses a built-in proprietary algorithm to assess the current latency, bandwidth, and stability of the network and returns a result. + * If the result is `1` (excellent), it means that the current network conditions are excellent; if it is `6` (down), it means that the current network conditions are too bad to support TRTC calls. + * + * @note In the returned parameters `localQuality` and `remoteQuality`, if `userId` is empty, it indicates that the network quality statistics of the local user are returned. Otherwise, the network quality statistics of a remote user are + * returned. + * + * @param localQuality Upstream network quality + * @param remoteQuality Downstream network quality + */ + virtual void onNetworkQuality(TRTCQualityInfo localQuality, TRTCQualityInfo* remoteQuality, uint32_t remoteQualityCount) { + } + + /** + * 4.2 Real-time statistics on technical metrics + * + * This callback is returned every 2 seconds and notifies you of the statistics on technical metrics related to video, audio, and network. The metrics are listed in {@link TRTCStatistics}: + * - Video statistics: video resolution (`resolution`), frame rate (`FPS`), bitrate (`bitrate`), etc. + * - Audio statistics: audio sample rate (`samplerate`), number of audio channels (`channel`), bitrate (`bitrate`), etc. + * - Network statistics: the round trip time (`rtt`) between the SDK and the cloud (SDK -> Cloud -> SDK), package loss rate (`loss`), upstream traffic (`sentBytes`), downstream traffic (`receivedBytes`), etc. + * + * @note If you want to learn about only the current network quality and do not want to spend much time analyzing the statistics returned by this callback, we recommend you use {@link onNetworkQuality}. + * @param statistics Statistics, including local statistics and the statistics of remote users. For details, please see {@link TRTCStatistics}. + */ + virtual void onStatistics(const TRTCStatistics& statistics) { + } + + /** + * 4.3 Callback of network speed test + * + * The callback is triggered by {@link startSpeedTest:}. + * + * @param result Speed test data, including loss rates, rtt and bandwidth rates, please refer to {@link TRTCSpeedTestResult} for details. + */ + virtual void onSpeedTestResult(const TRTCSpeedTestResult& result) { + } + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Callback of connection to the cloud + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Callback of connection to the cloud + /// @{ + + /** + * 5.1 The SDK was disconnected from the cloud + * + * The SDK returns this callback when it is disconnected from the cloud, which may be caused by network unavailability or change of network, for example, when the user walks into an elevator. + * After returning this callback, the SDK will attempt to reconnect to the cloud, and will return the {@link onTryToReconnect} callback. When it is reconnected, it will return the {@link onConnectionRecovery} callback. + * In other words, the SDK proceeds from one event to the next in the following order: + *
+     *         [onConnectionLost] =====> [onTryToReconnect] =====> [onConnectionRecovery]
+     *               /|\                                                     |
+     *                |------------------------------------------------------|
+     * 
+ */ + virtual void onConnectionLost() { + } + + /** + * 5.2 The SDK is reconnecting to the cloud + * + * When the SDK is disconnected from the cloud, it returns the {@link onConnectionLost} callback. It then attempts to reconnect and returns this callback ({@link onTryToReconnect}). After it is reconnected, it returns the {@link + * onConnectionRecovery} callback. + */ + virtual void onTryToReconnect() { + } + + /** + * 5.3 The SDK is reconnected to the cloud + * + * When the SDK is disconnected from the cloud, it returns the {@link onConnectionLost} callback. It then attempts to reconnect and returns the {@link onTryToReconnect} callback. After it is reconnected, it returns this callback ({@link + * onConnectionRecovery}). + */ + virtual void onConnectionRecovery() { + } + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Callback of hardware events + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Callback of hardware events + /// @{ + + /** + * 6.1 The camera is ready + * + * After you call {@link startLocalPreivew}, the SDK will try to start the camera and return this callback if the camera is started. + * If it fails to start the camera, it’s probably because the application does not have access to the camera or the camera is being used. + * You can capture the {@link onError} callback to learn about the exception and let users know via UI messages. + */ + virtual void onCameraDidReady() { + } + + /** + * 6.2 The mic is ready + * + * After you call {@link startLocalAudio}, the SDK will try to start the mic and return this callback if the mic is started. + * If it fails to start the mic, it’s probably because the application does not have access to the mic or the mic is being used. + * You can capture the {@link onError} callback to learn about the exception and let users know via UI messages. + */ + virtual void onMicDidReady() { + } + + /** + * 6.4 Volume + * + * The SDK can assess the volume of each channel and return this callback on a regular basis. You can display, for example, a waveform or volume bar on the UI based on the statistics returned. + * You need to first call {@link enableAudioVolumeEvaluation} to enable the feature and set the interval for the callback. + * Note that the SDK returns this callback at the specified interval regardless of whether someone is speaking in the room. When no one is speaking in the room, `userVolumes` is empty, and `totalVolume` is `0`. + * + * @note `userVolumes` is an array. If `userId` is empty, the elements in the array represent the volume of the local user’s audio. Otherwise, they represent the volume of a remote user’s audio. + * + * @param userVolumes An array that represents the volume of all users who are speaking in the room. Value range: 0-100 + * @param totalVolume The total volume of all remote users. Value range: 0-100 + */ + virtual void onUserVoiceVolume(TRTCVolumeInfo* userVolumes, uint32_t userVolumesCount, uint32_t totalVolume) { + } + +/** + * 6.5 The status of a local device changed (for desktop OS only) + * + * The SDK returns this callback when a local device (camera, mic, or speaker) is connected or disconnected. + * + * @param deviceId Device ID + * @param deviceType Device type + * @param state Device status. `0`: connected; `1`: disconnected; `2`: started + */ +#if TARGET_PLATFORM_DESKTOP + virtual void onDeviceChange(const char* deviceId, TRTCDeviceType type, TRTCDeviceState state) { + } +#endif + +/** + * 6.6 The capturing volume of the mic changed + * + * On desktop OS such as macOS and Windows, users can set the capturing volume of the mic in the audio control panel. + * The higher volume a user sets, the higher the volume of raw audio captured by the mic. + * On some keyboards and laptops, users can also mute the mic by pressing a key (whose icon is a crossed out mic). + * + * When users set the mic capturing volume via the UI or a keyboard shortcut, the SDK will return this callback. + * + * @note You need to call {@link enableAudioVolumeEvaluation} and set the callback interval (`interval` > 0) to enable the callback. To disable the callback, set `interval` to `0`. + * + * @param volume System audio capturing volume, which users can set in the audio control panel. Value range: 0-100 + * @param muted Whether the mic is muted. `true`: muted; `false`: unmuted + */ +#if TARGET_PLATFORM_DESKTOP + virtual void onAudioDeviceCaptureVolumeChanged(uint32_t volume, bool muted) { + } +#endif + +/** + * 6.7 The playback volume changed + * + * On desktop OS such as macOS and Windows, users can set the system’s playback volume in the audio control panel. + * On some keyboards and laptops, users can also mute the speaker by pressing a key (whose icon is a crossed out speaker). + * + * When users set the system’s playback volume via the UI or a keyboard shortcut, the SDK will return this callback. + * + * @note You need to call {@link enableAudioVolumeEvaluation} and set the callback interval (`interval` > 0) to enable the callback. To disable the callback, set `interval` to `0`. + * + * @param volume The system playback volume, which users can set in the audio control panel. Value range: 0-100 + * @param muted Whether the speaker is muted. `true`: muted; `false`: unmuted + */ +#if TARGET_PLATFORM_DESKTOP + virtual void onAudioDevicePlayoutVolumeChanged(uint32_t volume, bool muted) { + } +#endif + +/** + * 6.8 Whether system audio capturing is enabled successfully (for macOS only) + * + * On macOS, you can call {@link startSystemAudioLoopback} to install an audio driver and have the SDK capture the audio played back by the system. + * In use cases such as video teaching and music live streaming, the teacher can use this feature to let the SDK capture the sound of the video played by his or her computer, so that students in the room can hear the sound too. + * The SDK returns this callback after trying to enable system audio capturing. To determine whether it is actually enabled, pay attention to the error parameter in the callback. + * + * @param err If it is `ERR_NULL`, system audio capturing is enabled successfully. Otherwise, it is not. + */ +#if TARGET_PLATFORM_MAC + virtual void onSystemAudioLoopbackError(TXLiteAVError errCode) { + } +#endif + +/** + * 6.9 Volume during mic test + * + * When you call {@link startMicDeviceTest} to test the mic, the SDK will keep returning this callback. The `volume` parameter represents the volume of the audio captured by the mic. + * If the value of the `volume` parameter fluctuates, the mic works properly. If it is `0` throughout the test, it indicates that there is a problem with the mic, and users should be prompted to switch to a different mic. + * + * @param volume Captured mic volume. Value range: 0-100 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void onTestMicVolume(uint32_t volume) { + } +#endif + +/** + * 6.10 Volume during speaker test + * + * When you call {@link startSpeakerDeviceTest} to test the speaker, the SDK will keep returning this callback. + * The `volume` parameter in the callback represents the volume of audio sent by the SDK to the speaker for playback. If its value fluctuates but users cannot hear any sound, the speaker is not working properly. + * + * @param volume The volume of audio sent by the SDK to the speaker for playback. Value range: 0-100 + */ +#if TARGET_PLATFORM_DESKTOP + virtual void onTestSpeakerVolume(uint32_t volume) { + } +#endif + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Callback of the receipt of a custom message + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Callback of the receipt of a custom message + /// @{ + + /** + * 7.1 Receipt of custom message + * + * When a user in a room uses {@link sendCustomCmdMsg} to send a custom message, other users in the room can receive the message through the `onRecvCustomCmdMsg` callback. + * + * @param userId User ID + * @param cmdID Command ID + * @param seq Message serial number + * @param message Message data + */ + virtual void onRecvCustomCmdMsg(const char* userId, int32_t cmdID, uint32_t seq, const uint8_t* message, uint32_t messageSize) { + } + + /** + * 7.2 Loss of custom message + * + * When you use {@link sendCustomCmdMsg} to send a custom UDP message, even if you enable reliable transfer (by setting `reliable` to `true`), there is still a chance of message loss. Reliable transfer only helps maintain a low probability of + * message loss, which meets the reliability requirements in most cases. If the sender sets `reliable` to `true`, the SDK will use this callback to notify the recipient of the number of custom messages lost during a specified time period (usually + * 5s) in the past. + * + * @note The recipient receives this callback only if the sender sets `reliable` to `true`. + * @param userId User ID + * @param cmdID Command ID + * @param errCode Error code + * @param missed Number of lost messages + */ + virtual void onMissCustomCmdMsg(const char* userId, int32_t cmdID, int32_t errCode, int32_t missed) { + } + + /** + * 7.3 Receipt of SEI message + * + * If a user in the room uses {@link sendSEIMsg} to send an SEI message via video frames, other users in the room can receive the message through the `onRecvSEIMsg` callback. + * + * @param userId User ID + * @param message Data + */ + virtual void onRecvSEIMsg(const char* userId, const uint8_t* message, uint32_t messageSize){}; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // CDN event callback + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name CDN event callback + /// @{ + + /** + * 8.1 Started publishing to Tencent Cloud CSS CDN + * + * When you call {@link startPublishing} to publish streams to Tencent Cloud CSS CDN, the SDK will sync the command to the CVM immediately. + * The SDK will then receive the execution result from the CVM and return the result to you via this callback. + * + * @param err `0`: successful; other values: failed + * @param errMsg Error message + */ + virtual void onStartPublishing(int err, const char* errMsg){}; + + /** + * 8.2 Stopped publishing to Tencent Cloud CSS CDN + * + * When you call {@link stopPublishing} to stop publishing streams to Tencent Cloud CSS CDN, the SDK will sync the command to the CVM immediately. + * The SDK will then receive the execution result from the CVM and return the result to you via this callback. + * + * @param err `0`: successful; other values: failed + * @param errMsg Error message + */ + virtual void onStopPublishing(int err, const char* errMsg){}; + + /** + * 8.3 Started publishing to non-Tencent Cloud’s live streaming CDN + * + * When you call {@link startPublishCDNStream} to start publishing streams to a non-Tencent Cloud’s live streaming CDN, the SDK will sync the command to the CVM immediately. + * The SDK will then receive the execution result from the CVM and return the result to you via this callback. + * + * @note If you receive a callback that the command is executed successfully, it only means that your command was sent to Tencent Cloud’s backend server. If the CDN vendor does not accept your streams, the publishing will still fail. + * @param err `0`: successful; other values: failed + * @param errMsg Error message + */ + virtual void onStartPublishCDNStream(int errCode, const char* errMsg){}; + + /** + * 8.4 Stopped publishing to non-Tencent Cloud’s live streaming CDN + * + * When you call {@link stopPublishCDNStream} to stop publishing to a non-Tencent Cloud’s live streaming CDN, the SDK will sync the command to the CVM immediately. + * The SDK will then receive the execution result from the CVM and return the result to you via this callback. + * + * @param err `0`: successful; other values: failed + * @param errMsg Error message + */ + virtual void onStopPublishCDNStream(int errCode, const char* errMsg){}; + + /** + * 8.5 Set the layout and transcoding parameters for On-Cloud MixTranscoding + * + * When you call {@link setMixTranscodingConfig} to modify the layout and transcoding parameters for On-Cloud MixTranscoding, the SDK will sync the command to the CVM immediately. + * The SDK will then receive the execution result from the CVM and return the result to you via this callback. + * + * @param err `0`: successful; other values: failed + * @param errMsg Error message + */ + virtual void onSetMixTranscodingConfig(int err, const char* errMsg){}; + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Screen sharing event callback + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Screen sharing event callback + /// @{ + + /** + * 9.1 Screen sharing started + * + * The SDK returns this callback when you call {@link startScreenCapture} and other APIs to start screen sharing. + */ + virtual void onScreenCaptureStarted(){}; + + /** + * 9.2 Screen sharing was paused + * + * The SDK returns this callback when you call {@link pauseScreenCapture} to pause screen sharing. + * @param reason Reason. + * - `0`: the user paused screen sharing. + * - `1`: screen sharing was paused because the shared window became invisible(Mac). screen sharing was paused because setting parameters(Windows). + * - `2`: screen sharing was paused because the shared window became minimum(only for Windows). + * - `3`: screen sharing was paused because the shared window became invisible(only for Windows). + */ + virtual void onScreenCapturePaused(int reason){}; + + /** + * 9.3 Screen sharing was resumed + * + * The SDK returns this callback when you call {@link resumeScreenCapture} to resume screen sharing. + * @param reason Reason. + * - `0`: the user resumed screen sharing. + * - `1`: screen sharing was resumed automatically after the shared window became visible again(Mac). screen sharing was resumed automatically after setting parameters(Windows). + * - `2`: screen sharing was resumed automatically after the shared window became minimize recovery(only for Windows). + * - `3`: screen sharing was resumed automatically after the shared window became visible again(only for Windows). + */ + virtual void onScreenCaptureResumed(int reason){}; + + /** + * 9.4 Screen sharing stopped + * + * The SDK returns this callback when you call {@link stopScreenCapture} to stop screen sharing. + * @param reason Reason. `0`: the user stopped screen sharing; `1`: screen sharing stopped because the shared window was closed. + */ + virtual void onScreenCaptureStoped(int reason){}; + +/** + * 9.5 The shared window was covered (for Windows only) + * + * The SDK returns this callback when the shared window is covered and cannot be captured. Upon receiving this callback, you can prompt users via the UI to move and expose the window. + */ +#ifdef _WIN32 + virtual void onScreenCaptureCovered(){}; +#endif + + /// @} + ///////////////////////////////////////////////////////////////////////////////// + // + // Callback of local recording and screenshot events + // + ///////////////////////////////////////////////////////////////////////////////// + /// @name Callback of local recording and screenshot events + /// @{ + + /** + * 10.1 Local recording started + * + * When you call {@link startLocalRecording} to start local recording, the SDK returns this callback to notify you whether recording is started successfully. + * @param errCode Error code. `0`: recording started successfully; `-1`: failed to start recording; `-2`: incorrect file extension + * @param storagePath Storage path of recording file + */ + virtual void onLocalRecordBegin(int errCode, const char* storagePath) { + } + + /** + * 10.2 Local media is being recorded + * + * The SDK returns this callback regularly after local recording is started successfully via the calling of {@link startLocalRecording}. + * You can capture this callback to stay up to date with the status of the recording task. + * You can set the callback interval when calling {@link startLocalRecording}. + * + * @param duration Cumulative duration of recording, in milliseconds + * @param storagePath Storage path of recording file + */ + virtual void onLocalRecording(long duration, const char* storagePath) { + } + + /** + * 10.3 Local recording stopped + * + * When you call {@link stopLocalRecording} to stop local recording, the SDK returns this callback to notify you of the recording result. + * @param errCode Error code. `0`: recording succeeded; `-1`: recording failed; `-2`: recording was ended due to change of resolution or switch between the landscape and portrait mode. + * @param storagePath Storage path of recording file + */ + virtual void onLocalRecordComplete(int errCode, const char* storagePath) { + } + + /** + * 10.4 Finished taking a local screenshot + * + * @param userId User ID. If it is empty, the screenshot is a local image. + * @param type Video stream type + * @param data Screenshot data. If it is `nullptr`, it indicates that the SDK failed to take the screenshot. + * @param length Screenshot data length. In BGRA32 format, length = width * height * 4. + * @param width Screenshot width + * @param height Screenshot height + * @param format Screenshot data format. Only `TRTCVideoPixelFormat_BGRA32` is supported now. + */ + virtual void onSnapshotComplete(const char* userId, TRTCVideoStreamType type, char* data, uint32_t length, uint32_t width, uint32_t height, TRTCVideoPixelFormat format) { + } + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Disused callbacks (please use the new ones) +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Disused callbacks (please use the new ones) +/// @{ + +/** + * An anchor entered the room (disused) + * + * @deprecated This callback is not recommended in the new version. Please use {@link onRemoteUserEnterRoom} instead. + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use onRemoteUserEnterRoom instead")) void onUserEnter(const char* userId) { + } +#endif + +/** + * An anchor left the room (disused) + * + * @deprecated This callback is not recommended in the new version. Please use {@link onRemoteUserLeaveRoom} instead. + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use onRemoteUserLeaveRoom instead")) void onUserExit(const char* userId, int reason) { + } +#endif + +/** + * Audio effects ended (disused) + * + * @deprecated This callback is not recommended in the new version. Please use {@link ITXAudioEffectManager} instead. + * Audio effects and background music can be started using the same API ({@link startPlayMusic}) now instead of separate ones. + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use ITXAudioEffectManager.ITXMusicPlayObserver instead")) void onAudioEffectFinished(int effectId, int code){}; +#endif + +/** + * Started playing background music (disused) + * + * @deprecated This callback is not recommended in the new version. Please use {@link ITXMusicPlayObserver} instead. + * Audio effects and background music can be started using the same API ({@link startPlayMusic}) now instead of separate ones. + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use ITXAudioEffectManager.ITXMusicPlayObserver instead")) void onPlayBGMBegin(TXLiteAVError errCode) { + } +#endif + +/** + * Playback progress of background music (disused) + * + * @deprecated This callback is not recommended in the new version. Please use {@link ITXMusicPlayObserver} instead. + * Audio effects and background music can be started using the same API ({@link startPlayMusic}) now instead of separate ones. + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use ITXAudioEffectManager.ITXMusicPlayObserver instead")) void onPlayBGMProgress(uint32_t progressMS, uint32_t durationMS) { + } +#endif + +/** + * Background music stopped (disused) + * + * @deprecated This callback is not recommended in the new version. Please use {@link ITXMusicPlayObserver} instead. + * Audio effects and background music can be started using the same API ({@link startPlayMusic}) now instead of separate ones. + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use ITXAudioEffectManager.ITXMusicPlayObserver instead")) void onPlayBGMComplete(TXLiteAVError errCode) { + } +#endif + +/** + * Result of server speed testing (disused) + * + * @deprecated This callback is not recommended in the new version. Please use {@link onSpeedTestResult:} instead. + */ +#ifdef _WIN32 + virtual __declspec(deprecated("use onSpeedTestResult instead")) void onSpeedTest(const TRTCSpeedTestResult& currentResult, uint32_t finishedCount, uint32_t totalCount) { + } +#elif defined(__APPLE__) + virtual void onSpeedTest(const TRTCSpeedTestResult& currentResult, uint32_t finishedCount, uint32_t totalCount) { + } + __attribute__((deprecated("use onSpeedTestResult instead"))); +#else + virtual void onSpeedTest(const TRTCSpeedTestResult& currentResult, uint32_t finishedCount, uint32_t totalCount) { + } +#endif + + /// @} +}; // End of interface ITRTCCloudCallback + +///////////////////////////////////////////////////////////////////////////////// +// +// Callback of custom video processing +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Callback of custom video processing +/// @{ + +class ITRTCVideoRenderCallback { + public: + virtual ~ITRTCVideoRenderCallback() { + } + + /** + * Custom video rendering + * + * If you have configured the callback of custom rendering for local or remote video, the SDK will return to you via this callback video frames that are otherwise sent to the rendering control, so that you can customize rendering. + * @param frame Video frames to be rendered + * @param userId `userId` of the video source. This parameter can be ignored if the callback is for local video (`setLocalVideoRenderDelegate`). + * @param streamType Stream type. The primary stream (`Main`) is usually used for camera images, and the substream (`Sub`) for screen sharing images. + */ + virtual void onRenderVideoFrame(const char* userId, TRTCVideoStreamType streamType, TRTCVideoFrame* frame) { + } + +}; // End of interface ITRTCVideoRenderCallback + +class ITRTCVideoFrameCallback { + public: + virtual ~ITRTCVideoFrameCallback() { + } + + /** + * Video processing by third-party beauty filters + * + * If you use a third-party beauty filter component, you need to configure this callback in `TRTCCloud` to have the SDK return to you video frames that are otherwise pre-processed by TRTC. + * You can then send the video frames to the third-party beauty filter component for processing. As the data returned can be read and modified, the result of processing can be synced to TRTC for subsequent encoding and publishing. + * + * @param srcFrame Used to carry images captured by TRTC via the camera + * @param dstFrame Used to receive video images processed by third-party beauty filters + * @note Currently, only the OpenGL texture scheme is supported(PC supports TRTCVideoBufferType_Buffer format Only) + * + * Case 1: the beauty filter component generates new textures + * If the beauty filter component you use generates a frame of new texture (for the processed image) during image processing, please set `dstFrame.textureId` to the ID of the new texture in the callback function. + * + * Case 2: you need to provide target textures to the beauty filter component + * If the third-party beauty filter component you use does not generate new textures and you need to manually set an input texture and an output texture for the component, you can consider the following scheme: + * ```ObjectiveC + * uint32_t onProcessVideoFrame(TRTCVideoFrame * _Nonnull)srcFrame dstFrame:(TRTCVideoFrame * _Nonnull)dstFrame{ + * thirdparty_process(srcFrame.textureId, srcFrame.width, srcFrame.height, dstFrame.textureId); + * return 0; + * } + * ``` + * ```java + * int onProcessVideoFrame(TRTCCloudDef.TRTCVideoFrame srcFrame, TRTCCloudDef.TRTCVideoFrame dstFrame) { + * thirdparty_process(srcFrame.texture.textureId, srcFrame.width, srcFrame.height, dstFrame.texture.textureId); + * return 0; + * } + * ``` + */ + virtual int onProcessVideoFrame(TRTCVideoFrame* srcFrame, TRTCVideoFrame* dstFrame) { + return 0; + } + +}; // End of class ITRTCVideoFrameCallback + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Callback of custom audio processing +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Callback of custom audio processing +/// @{ + +class ITRTCAudioFrameCallback { + public: + virtual ~ITRTCAudioFrameCallback() { + } + + /** + * Audio data captured by the local mic and pre-processed by the audio module + * + * After you configure the callback of custom audio processing, the SDK will return via this callback the data captured and pre-processed (ANS, AEC, and AGC) in PCM format. + * - The audio returned is in PCM format and has a fixed frame length (time) of 0.02s. + * - The formula to convert a frame length in seconds to one in bytes is **sample rate * frame length in seconds * number of sound channels * audio bit depth**. + * - Assume that the audio is recorded on a single channel with a sample rate of 48,000 Hz and audio bit depth of 16 bits, which are the default settings of TRTC. The frame length in bytes will be **48000 * 0.02s * 1 * 16 bits = 15360 bits = 1920 + * bytes**. + * + * @param frame Audio frames in PCM format + * @note + * 1. Please avoid time-consuming operations in this callback function. The SDK processes an audio frame every 20 ms, so if your operation takes more than 20 ms, it will cause audio exceptions. + * 2. The audio data returned via this callback can be read and modified, but please keep the duration of your operation short. + * 3. The audio data is returned via this callback after ANS, AEC and AGC, but it **does not include** pre-processing effects like background music, audio effects, or reverb, and therefore has a short delay. + */ + virtual void onCapturedRawAudioFrame(TRTCAudioFrame* frame){}; + + /** + * Audio data captured by the local mic, pre-processed by the audio module, effect-processed and BGM-mixed + * + * After you configure the callback of custom audio processing, the SDK will return via this callback the data captured, pre-processed (ANS, AEC, and AGC), effect-processed and BGM-mixed in PCM format, before it is submitted to the network module + * for encoding. + * - The audio data returned via this callback is in PCM format and has a fixed frame length (time) of 0.02s. + * - The formula to convert a frame length in seconds to one in bytes is **sample rate * frame length in seconds * number of sound channels * audio bit depth**. + * - Assume that the audio is recorded on a single channel with a sample rate of 48,000 Hz and audio bit depth of 16 bits, which are the default settings of TRTC. The frame length in bytes will be **48000 * 0.02s * 1 * 16 bits = 15360 bits = 1920 + * bytes**. + * + * Instructions: + * You could write data to the `TRTCAudioFrame.extraData` filed, in order to achieve the purpose of transmitting signaling. + * Because the data block of the audio frame header cannot be too large, we recommend you limit the size of the signaling data to only a few bytes when using this API. If extra data more than 100 bytes, it won't be sent. + * Other users in the room can receive the message through the `TRTCAudioFrame.extraData` in `onRemoteUserAudioFrame` callback in {@link TRTCAudioFrameDelegate}. + * + * @param frame Audio frames in PCM format + * @note + * 1. Please avoid time-consuming operations in this callback function. The SDK processes an audio frame every 20 ms, so if your operation takes more than 20 ms, it will cause audio exceptions. + * 2. The audio data returned via this callback can be read and modified, but please keep the duration of your operation short. + * 3. Audio data is returned via this callback after ANS, AEC, AGC, effect-processing and BGM-mixing, and therefore the delay is longer than that with {@link onCapturedRawAudioFrame}. + */ + virtual void onLocalProcessedAudioFrame(TRTCAudioFrame* frame){}; + + /** + * Audio data of each remote user before audio mixing + * + * After you configure the callback of custom audio processing, the SDK will return via this callback the raw audio data (PCM format) of each remote user before mixing. + * - The audio data returned via this callback is in PCM format and has a fixed frame length (time) of 0.02s. + * - The formula to convert a frame length in seconds to one in bytes is **sample rate * frame length in seconds * number of sound channels * audio bit depth**. + * - Assume that the audio is recorded on a single channel with a sample rate of 48,000 Hz and audio bit depth of 16 bits, which are the default settings of TRTC. The frame length in bytes will be **48000 * 0.02s * 1 * 16 bits = 15360 bits = 1920 + * bytes**. + * + * @param frame Audio frames in PCM format + * @param userId User ID + * @note The audio data returned via this callback can be read but not modified. + */ + virtual void onPlayAudioFrame(TRTCAudioFrame* frame, const char* userId){}; + + /** + * Data mixed from each channel before being submitted to the system for playback + * + * After you configure the callback of custom audio processing, the SDK will return to you via this callback the data (PCM format) mixed from each channel before it is submitted to the system for playback. + * - The audio data returned via this callback is in PCM format and has a fixed frame length (time) of 0.02s. + * - The formula to convert a frame length in seconds to one in bytes is **sample rate * frame length in seconds * number of sound channels * audio bit depth**. + * - Assume that the audio is recorded on a single channel with a sample rate of 48,000 Hz and audio bit depth of 16 bits, which are the default settings of TRTC. The frame length in bytes will be **48000 * 0.02s * 1 * 16 bits = 15360 bits = 1920 + * bytes**. + * + * @param frame Audio frames in PCM format + * @note + * 1. Please avoid time-consuming operations in this callback function. The SDK processes an audio frame every 20 ms, so if your operation takes more than 20 ms, it will cause audio exceptions. + * 2. The audio data returned via this callback can be read and modified, but please keep the duration of your operation short. + * 3. The audio data returned via this callback is the audio data mixed from each channel before it is played. It does not include the in-ear monitoring data. + */ + virtual void onMixedPlayAudioFrame(TRTCAudioFrame* frame){}; + +}; // End of interface ITRTCAudioFrameCallback + +/// @} +///////////////////////////////////////////////////////////////////////////////// +// +// Other event callbacks +// +///////////////////////////////////////////////////////////////////////////////// +/// @name Other event callbacks +/// @{ + +class ITRTCLogCallback { + public: + virtual ~ITRTCLogCallback() { + } + + /** + * Printing of local log + * + * If you want to capture the local log printing event, you can configure the log callback to have the SDK return to you via this callback all logs that are to be printed. + * @param log Log content + * @param level Log level. For more information, please see `TRTC_LOG_LEVEL`. + * @param module Reserved field, which is not defined at the moment and has a fixed value of `TXLiteAVSDK`. + */ + virtual void onLog(const char* log, TRTCLogLevel level, const char* module) { + } + +}; // End of interface ITRTCLogCallback + +/// @} +} /* namespace liteav*/ +#endif /* __TRTCCLOUDCALLBACK_H__ */ +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCTypeDef.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCTypeDef.h new file mode 100644 index 0000000..7ce6db8 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TRTCTypeDef.h @@ -0,0 +1,1517 @@ +/** + * Module: TRTC key class definition + * Description: definitions of enumerated and constant values such as resolution and quality level + */ +/// @defgroup TRTCCloudDef_cplusplus TRTCCloudDef +/// Tencent Cloud TRTC Key Type Definition +/// @{ +#ifndef __TRTCTYPEDEF_H__ +#define __TRTCTYPEDEF_H__ + +#include "ITXDeviceManager.h" +#include +#include + +#ifdef _WIN32 +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#ifdef TRTC_EXPORTS +#define TRTC_API __declspec(dllexport) +#else +#define TRTC_API __declspec(dllimport) +#endif +#elif __APPLE__ +#include +#define TRTC_API __attribute__((visibility("default"))) +#elif __ANDROID__ +#define TRTC_API __attribute__((visibility("default"))) +#else +#define TRTC_API +#endif + +#define TARGET_PLATFORM_DESKTOP ((__APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE) || _WIN32) +#define TARGET_PLATFORM_PHONE (__ANDROID__ || (__APPLE__ && TARGET_OS_IOS)) +#define TARGET_PLATFORM_MAC (__APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE) + +namespace liteav { +/// @{ +#ifndef _WIN32 +struct RECT { + int left = 0; + int top = 0; + int right = 0; + int bottom = 0; +}; +struct SIZE { + long width = 0; + long height = 0; +}; +#endif + +///////////////////////////////////////////////////////////////////////////////// +// +// Rendering control +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * [VIEW] Rendering control that renders the video image + * + * There are many APIs in TRTC that need to manipulate the video image, for which you should specify the video rendering control. The SDK provides rendering controls for different terminal platforms. + * As the all-platform C++ APIs need to use a unified parameter type, you should uniformly convert the rendering controls into pointers in `TXView` type when calling these APIs: + * - iOS: you can use the `UIView` object as the rendering control. When calling the C++ APIs, please pass in the pointer to the `UIView` object (which needs to be forcibly converted to the `void*` type). + * - macOS: you can use the `NSView` object as the rendering control. When calling the C++ APIs, please pass in the pointer to the `NSView` object (which needs to be forcibly converted to the `void*` type). + * - Android: when calling the C++ APIs, please pass in the `jobject` pointer to the `TXCloudVideoView` object (which needs to be forcibly converted to the `void*` type). + * - Windows: you can use the window handle `HWND` as the rendering control. When calling the C++ APIs, you need to forcibly convert the `HWND` to `void*` type. + * + * Code sample 1. Use the all-platform C++ APIs under QT + *
+ * QWidget *videoView;
+ * // The relevant code for setting the videoView is omitted here...
+ * getTRTCShareInstance()->startLocalPreview(reinterpret_cast(videoView->winId()));
+ * 
+ * + * Code sample 2. Call the all-platform C++ APIs through JNI on Android + *
+ * native void nativeStartLocalPreview(String userId, int streamType, TXCloudVideoView view);
+ * //...
+ * Java_com_example_test_MainActivity_nativeStartRemoteView(JNIEnv *env, jobject thiz, jstring user_id, jint stream_type, jobject view) {
+ *     const char *user_id_chars = env->GetStringUTFChars(user_id, nullptr);
+ *     trtc_cloud->startRemoteView(user_id_chars, (trtc::TRTCVideoStreamType)stream_type, view);
+ *     env->ReleaseStringUTFChars(user_id, user_id_chars);
+ * }
+ * 
+ */ +#ifdef _WIN32 +// Windows: HWND +typedef HWND TXView; +#else +// iOS: UIView; Mac OS: NSView; Android: jobject of TXCloudVideoView +typedef void *TXView; +#endif + +///////////////////////////////////////////////////////////////////////////////// +// +// Definitions of video enumerated values +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 1.1 Video resolution + * + * Here, only the landscape resolution (e.g., 640x360) is defined. If the portrait resolution (e.g., 360x640) needs to be used, `Portrait` must be selected for `TRTCVideoResolutionMode`. + */ +enum TRTCVideoResolution { + + /// Aspect ratio: 1:1; resolution: 120x120; recommended bitrate (VideoCall): 80 Kbps; recommended bitrate (LIVE): 120 Kbps. + TRTCVideoResolution_120_120 = 1, + + /// Aspect ratio: 1:1; resolution: 160x160; recommended bitrate (VideoCall): 100 Kbps; recommended bitrate (LIVE): 150 Kbps. + TRTCVideoResolution_160_160 = 3, + + /// Aspect ratio: 1:1; resolution: 270x270; recommended bitrate (VideoCall): 200 Kbps; recommended bitrate (LIVE): 300 Kbps. + TRTCVideoResolution_270_270 = 5, + + /// Aspect ratio: 1:1; resolution: 480x480; recommended bitrate (VideoCall): 350 Kbps; recommended bitrate (LIVE): 500 Kbps. + TRTCVideoResolution_480_480 = 7, + + /// Aspect ratio: 4:3; resolution: 160x120; recommended bitrate (VideoCall): 100 Kbps; recommended bitrate (LIVE): 150 Kbps. + TRTCVideoResolution_160_120 = 50, + + /// Aspect ratio: 4:3; resolution: 240x180; recommended bitrate (VideoCall): 150 Kbps; recommended bitrate (LIVE): 250 Kbps. + TRTCVideoResolution_240_180 = 52, + + /// Aspect ratio: 4:3; resolution: 280x210; recommended bitrate (VideoCall): 200 Kbps; recommended bitrate (LIVE): 300 Kbps. + TRTCVideoResolution_280_210 = 54, + + /// Aspect ratio: 4:3; resolution: 320x240; recommended bitrate (VideoCall): 250 Kbps; recommended bitrate (LIVE): 375 Kbps. + TRTCVideoResolution_320_240 = 56, + + /// Aspect ratio: 4:3; resolution: 400x300; recommended bitrate (VideoCall): 300 Kbps; recommended bitrate (LIVE): 450 Kbps. + TRTCVideoResolution_400_300 = 58, + + /// Aspect ratio: 4:3; resolution: 480x360; recommended bitrate (VideoCall): 400 Kbps; recommended bitrate (LIVE): 600 Kbps. + TRTCVideoResolution_480_360 = 60, + + /// Aspect ratio: 4:3; resolution: 640x480; recommended bitrate (VideoCall): 600 Kbps; recommended bitrate (LIVE): 900 Kbps. + TRTCVideoResolution_640_480 = 62, + + /// Aspect ratio: 4:3; resolution: 960x720; recommended bitrate (VideoCall): 1000 Kbps; recommended bitrate (LIVE): 1500 Kbps. + TRTCVideoResolution_960_720 = 64, + + /// Aspect ratio: 16:9; resolution: 160x90; recommended bitrate (VideoCall): 150 Kbps; recommended bitrate (LIVE): 250 Kbps. + TRTCVideoResolution_160_90 = 100, + + /// Aspect ratio: 16:9; resolution: 256x144; recommended bitrate (VideoCall): 200 Kbps; recommended bitrate (LIVE): 300 Kbps. + TRTCVideoResolution_256_144 = 102, + + /// Aspect ratio: 16:9; resolution: 320x180; recommended bitrate (VideoCall): 250 Kbps; recommended bitrate (LIVE): 400 Kbps. + TRTCVideoResolution_320_180 = 104, + + /// Aspect ratio: 16:9; resolution: 480x270; recommended bitrate (VideoCall): 350 Kbps; recommended bitrate (LIVE): 550 Kbps. + TRTCVideoResolution_480_270 = 106, + + /// Aspect ratio: 16:9; resolution: 640x360; recommended bitrate (VideoCall): 500 Kbps; recommended bitrate (LIVE): 900 Kbps. + TRTCVideoResolution_640_360 = 108, + + /// Aspect ratio: 16:9; resolution: 960x540; recommended bitrate (VideoCall): 850 Kbps; recommended bitrate (LIVE): 1300 Kbps. + TRTCVideoResolution_960_540 = 110, + + /// Aspect ratio: 16:9; resolution: 1280x720; recommended bitrate (VideoCall): 1200 Kbps; recommended bitrate (LIVE): 1800 Kbps. + TRTCVideoResolution_1280_720 = 112, + + /// Aspect ratio: 16:9; resolution: 1920x1080; recommended bitrate (VideoCall): 2000 Kbps; recommended bitrate (LIVE): 3000 Kbps. + TRTCVideoResolution_1920_1080 = 114, + +}; + +/** + * 1.2 Video aspect ratio mode + * + * Only the landscape resolution (e.g., 640x360) is defined in `TRTCVideoResolution`. If the portrait resolution (e.g., 360x640) needs to be used, `Portrait` must be selected for `TRTCVideoResolutionMode`. + */ +enum TRTCVideoResolutionMode { + + /// Landscape resolution, such as TRTCVideoResolution_640_360 + TRTCVideoResolutionModeLandscape = 640x360. + TRTCVideoResolutionModeLandscape = 0, + + /// Portrait resolution, such as TRTCVideoResolution_640_360 + TRTCVideoResolutionModePortrait = 360x640. + TRTCVideoResolutionModePortrait = 1, + +}; + +/** + * 1.3 Video stream type + * + * TRTC provides three different video streams, including: + * - HD big image: it is generally used to transfer video data from the camera. + * - Smooth small image: it has the same content as the big image, but with lower resolution and bitrate and thus lower definition. + * - Substream image: it is generally used for screen sharing. Only one user in the room is allowed to publish the substream video image at any time, while other users must wait for this user to close the substream before they can publish their own + * substream. + * @note The SDK does not support enabling the smooth small image alone, which must be enabled together with the big image. It will automatically set the resolution and bitrate of the small image. + */ +enum TRTCVideoStreamType { + + /// HD big image: it is generally used to transfer video data from the camera. + TRTCVideoStreamTypeBig = 0, + + /// Smooth small image: it has the same content as the big image, but with lower resolution and bitrate and thus lower definition. + TRTCVideoStreamTypeSmall = 1, + + /// Substream image: it is generally used for screen sharing. Only one user in the room is allowed to publish the substream video image at any time, while other users must wait for this user to close the substream before they can publish their + /// own substream. + TRTCVideoStreamTypeSub = 2, + +}; + +/** + * 1.4 Video image fill mode + * + * If the aspect ratio of the video display area is not equal to that of the video image, you need to specify the fill mode: + */ +enum TRTCVideoFillMode { + + /// Fill mode: the video image will be centered and scaled to fill the entire display area, where parts that exceed the area will be cropped. The displayed image may be incomplete in this mode. + TRTCVideoFillMode_Fill = 0, + + /// Fit mode: the video image will be scaled based on its long side to fit the display area, where the short side will be filled with black bars. The displayed image is complete in this mode, but there may be black bars. + TRTCVideoFillMode_Fit = 1, + +}; + +/** + * 1.5 Video image rotation direction + * + * TRTC provides rotation angle setting APIs for local and remote images. The following rotation angles are all clockwise. + */ +enum TRTCVideoRotation { + + /// No rotation + TRTCVideoRotation0 = 0, + + /// Clockwise rotation by 90 degrees + TRTCVideoRotation90 = 1, + + /// Clockwise rotation by 180 degrees + TRTCVideoRotation180 = 2, + + /// Clockwise rotation by 270 degrees + TRTCVideoRotation270 = 3, + +}; + +/** + * 1.6 Beauty (skin smoothing) filter algorithm + * + * TRTC has multiple built-in skin smoothing algorithms. You can select the one most suitable for your product. + */ +enum TRTCBeautyStyle { + + /// Smooth style, which uses a more radical algorithm for more obvious effect and is suitable for show live streaming. + TRTCBeautyStyleSmooth = 0, + + /// Natural style, which retains more facial details for more natural effect and is suitable for most live streaming use cases. + TRTCBeautyStyleNature = 1, + +}; + +/** + * 1.7 Video pixel format + * + * TRTC provides custom video capturing and rendering features. + * - For the custom capturing feature, you can use the following enumerated values to describe the pixel format of the video you capture. + * - For the custom rendering feature, you can specify the pixel format of the video you expect the SDK to call back. + */ +enum TRTCVideoPixelFormat { + + /// Undefined format + TRTCVideoPixelFormat_Unknown = 0, + + /// YUV420P (I420) format + TRTCVideoPixelFormat_I420 = 1, + + /// OpenGL 2D texture format + TRTCVideoPixelFormat_Texture_2D = 2, + + /// BGRA32 format + TRTCVideoPixelFormat_BGRA32 = 3, + + /// RGBA format + TRTCVideoPixelFormat_RGBA32 = 5, + +}; + +/** + * 1.8 Video data transfer method + * + * For custom capturing and rendering features, you need to use the following enumerated values to specify the method of transferring video data: + * - Method 1. This method uses memory buffer to transfer video data. It is efficient on iOS but inefficient on Android. It is the only method supported on Windows currently. + * - Method 2. This method uses texture to transfer video data. It is efficient on both iOS and Android but is not supported on Windows. To use this method, you should have a general familiarity with OpenGL programming. + */ +enum TRTCVideoBufferType { + + /// Undefined transfer method + TRTCVideoBufferType_Unknown = 0, + + /// Use memory buffer to transfer video data. iOS: `PixelBuffer`; Android: `Direct Buffer` for JNI layer; Windows: memory data block. + TRTCVideoBufferType_Buffer = 1, + + /// Use texture to transfer video data + TRTCVideoBufferType_Texture = 3, + +}; + +/** + * 1.9 Video mirror type + * + * Video mirroring refers to the left-to-right flipping of the video image, especially for the local camera preview image. After mirroring is enabled, it can bring anchors a familiar "look into the mirror" experience. + */ +enum TRTCVideoMirrorType { + +/// Auto mode: mirror the front camera's image but not the rear camera's image (for mobile devices only). +#if TARGET_PLATFORM_PHONE + TRTCVideoMirrorType_Auto = 0, +#endif + + /// Mirror the images of both the front and rear cameras. + TRTCVideoMirrorType_Enable = 1, + + /// Disable mirroring for both the front and rear cameras. + TRTCVideoMirrorType_Disable = 2, + +}; + +/** + * 1.10 Data source of local video screenshot + * + * The SDK can take screenshots from the following two data sources and save them as local files: + * - Video stream: the SDK screencaptures the native video content from the video stream. The screenshots are not controlled by the display of the rendering control. + * - Rendering layer: the SDK screencaptures the displayed video content from the rendering control, which can achieve the effect of WYSIWYG, but if the display area is too small, the screenshots will also be very small. + */ +enum TRTCSnapshotSourceType { + + /// The SDK screencaptures the native video content from the video stream. The screenshots are not controlled by the display of the rendering control. + TRTCSnapshotSourceTypeStream = 0, + + /// The SDK screencaptures the displayed video content from the rendering control, which can achieve the effect of WYSIWYG, but if the display area is too small, the screenshots will also be very small. + TRTCSnapshotSourceTypeView = 1, + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// Definitions of network enumerated values +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 2.1 Use cases + * + * TRTC features targeted optimizations for common audio/video application scenarios to meet the differentiated requirements in various verticals. The main scenarios can be divided into the following two categories: + * - Live streaming scenario (LIVE): including `LIVE` (audio + video) and `VoiceChatRoom` (pure audio). + * In the live streaming scenario, users are divided into two roles: "anchor" and "audience". A single room can sustain up to 100,000 concurrent online users. This is suitable for live streaming to a large audience. + * - Real-Time scenario (RTC): including `VideoCall` (audio + video) and `AudioCall` (pure audio). + * In the real-time scenario, there is no role difference between users, but a single room can sustain only up to 300 concurrent online users. This is suitable for small-scale real-time communication. + */ +enum TRTCAppScene { + + /// In the video call scenario, 720p and 1080p HD image quality is supported. A single room can sustain up to 300 concurrent online users, and up to 50 of them can speak simultaneously. + /// Use cases: [one-to-one video call], [video conferencing with up to 300 participants], [online medical diagnosis], [small class], [video interview], etc. + TRTCAppSceneVideoCall = 0, + + /// In the interactive video live streaming scenario, mic can be turned on/off smoothly without waiting for switchover, and the anchor latency is as low as less than 300 ms. Live streaming to hundreds of thousands of concurrent users in the + /// audience role is supported with the playback latency down to 1,000 ms. + /// Use cases: [low-latency interactive live streaming], [big class], [anchor competition], [video dating room], [online interactive classroom], [remote training], [large-scale conferencing], etc. + ///@note In this scenario, you must use the `role` field in `TRTCParams` to specify the role of the current user. + TRTCAppSceneLIVE = 1, + + /// Audio call scenario, where the `SPEECH` sound quality is used by default. A single room can sustain up to 300 concurrent online users, and up to 50 of them can speak simultaneously. + /// Use cases: [one-to-one audio call], [audio conferencing with up to 300 participants], [audio chat], [online Werewolf], etc. + TRTCAppSceneAudioCall = 2, + + /// In the interactive audio live streaming scenario, mic can be turned on/off smoothly without waiting for switchover, and the anchor latency is as low as less than 300 ms. Live streaming to hundreds of thousands of concurrent users in the + /// audience role is supported with the playback latency down to 1,000 ms. + /// Use cases: [audio club], [online karaoke room], [music live room], [FM radio], etc. + ///@note In this scenario, you must use the `role` field in `TRTCParams` to specify the role of the current user. + TRTCAppSceneVoiceChatRoom = 3, + +}; + +/** + * 2.2 Role + * + * Role is applicable only to live streaming scenarios (`TRTCAppSceneLIVE` and `TRTCAppSceneVoiceChatRoom`). Users are divided into two roles: + * - Anchor, who can publish their audio/video streams. There is a limit on the number of anchors. Up to 50 anchors are allowed to publish streams at the same time in one room. + * - Audience, who can only listen to or watch audio/video streams of anchors in the room. If they want to publish their streams, they need to switch to the "anchor" role first through {@link switchRole}. One room can sustain up to 100,000 concurrent + * online users in the audience role. + */ +enum TRTCRoleType { + + /// An anchor can publish their audio/video streams. There is a limit on the number of anchors. Up to 50 anchors are allowed to publish streams at the same time in one room. + TRTCRoleAnchor = 20, + + /// Audience can only listen to or watch audio/video streams of anchors in the room. If they want to publish their streams, they need to switch to the "anchor" role first through {@link switchRole}. One room can sustain up to 100,000 concurrent + /// online users in the audience role. + TRTCRoleAudience = 21, + +}; + +/** + * 2.3 QoS control mode (disused) + */ +enum TRTCQosControlMode { + + /// Client-based control, which is for internal debugging of SDK and shall not be used by users. + TRTCQosControlModeClient = 0, + + /// On-cloud control, which is the default and recommended mode. + TRTCQosControlModeServer = 1, + +}; + +/** + * 2.4 Image quality preference + * + * TRTC has two control modes in weak network environments: "ensuring clarity" and "ensuring smoothness". Both modes will give priority to the transfer of audio data. + */ +enum TRTCVideoQosPreference { + + /// Ensuring smoothness: in this mode, when the current network is unable to transfer a clear and smooth video image, the smoothness of the image will be given priority, but there will be blurs. + TRTCVideoQosPreferenceSmooth = 1, + + /// Ensuring clarity (default value): in this mode, when the current network is unable to transfer a clear and smooth video image, the clarity of the image will be given priority, but there will be lags. + TRTCVideoQosPreferenceClear = 2, + +}; + +/** + * 2.5 Network quality + * + * TRTC evaluates the current network quality once every two seconds. The evaluation results are divided into six levels: `Excellent` indicates the best, and `Down` indicates the worst. + */ +enum TRTCQuality { + + /// Undefined + TRTCQuality_Unknown = 0, + + /// The current network is excellent + TRTCQuality_Excellent = 1, + + /// The current network is good + TRTCQuality_Good = 2, + + /// The current network is fair + TRTCQuality_Poor = 3, + + /// The current network is bad + TRTCQuality_Bad = 4, + + /// The current network is very bad + TRTCQuality_Vbad = 5, + + /// The current network cannot meet the minimum requirements of TRTC + TRTCQuality_Down = 6, + +}; + +/** + * 2.6 Audio/Video playback status + * + * This enumerated type is used in the video status changed API {@link onRemoteVideoStatusUpdated} to specify the current video status. + */ +enum TRTCAVStatusType { + + /// Stopped + TRTCAVStatusStopped = 0, + + /// Playing + TRTCAVStatusPlaying = 1, + + /// Loading + TRTCAVStatusLoading = 2, + +}; + +/** + * 2.7 Reasons for playback status changes + * + * This enumerated type is used in the video status changed API {@link onRemoteVideoStatusUpdated} to specify the reason for the current video status change. + */ +enum TRTCAVStatusChangeReason { + + /// Default value + TRTCAVStatusChangeReasonInternal = 0, + + /// The stream enters the "Loading" state due to network congestion + TRTCAVStatusChangeReasonBufferingBegin = 1, + + /// The stream enters the "Playing" state after network recovery + TRTCAVStatusChangeReasonBufferingEnd = 2, + + /// As a start-related API was directly called locally, the stream enters the "Playing" state + TRTCAVStatusChangeReasonLocalStarted = 3, + + /// As a stop-related API was directly called locally, the stream enters the "Stopped" state + TRTCAVStatusChangeReasonLocalStopped = 4, + + /// As the remote user started (or resumed) publishing the video stream, the stream enters the "Loading" or "Playing" state + TRTCAVStatusChangeReasonRemoteStarted = 5, + + /// As the remote user stopped (or paused) publishing the video stream, the stream enters the "Stopped" state + TRTCAVStatusChangeReasonRemoteStopped = 6, + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// Definitions of audio enumerated values +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 3.2 Sound quality + * + * TRTC provides three well-tuned modes to meet the differentiated requirements for sound quality in various verticals: + * - Speech mode (Speech): it is suitable for application scenarios that focus on human communication. In this mode, the audio transfer is more resistant, and TRTC uses various voice processing technologies to ensure the optimal smoothness even in + * weak network environments. + * - Music mode (Music): it is suitable for scenarios with demanding requirements for music. In this mode, the amount of transferred audio data is very large, and TRTC uses various technologies to ensure that the high-fidelity details of music + * signals can be restored in each frequency band. + * - Default mode (Default): it is between `Speech` and `Music`. In this mode, the reproduction of music is better than that in `Speech` mode, and the amount of transferred data is much lower than that in `Music` mode; therefore, this mode has good + * adaptability to various scenarios. + */ +enum TRTCAudioQuality { + + /// Speech mode: sample rate: 16 kHz; mono channel; bitrate: 16 Kbps. This mode has the best resistance among all modes and is suitable for audio call scenarios, such as online meeting and audio call. + TRTCAudioQualitySpeech = 1, + + /// Default mode: sample rate: 48 kHz; mono channel; bitrate: 50 Kbps. This mode is between the speech mode and the music mode as the default mode in the SDK and is recommended. + TRTCAudioQualityDefault = 2, + + /// Music mode: sample rate: 48 kHz; full-band stereo; bitrate: 128 Kbps. This mode is suitable for scenarios where Hi-Fi music transfer is required, such as online karaoke and music live streaming. + TRTCAudioQualityMusic = 3, + +}; + +/** + * 3.7 Audio frame content format + */ +enum TRTCAudioFrameFormat { + + /// None + TRTCAudioFrameFormatNone = 0, + + /// Audio data in PCM format + TRTCAudioFrameFormatPCM, + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// Definitions of other enumerated values +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 4.1 Log level + * + * Different log levels indicate different levels of details and number of logs. We recommend you set the log level to `TRTCLogLevelInfo` generally. + */ +enum TRTCLogLevel { + + /// Output logs at all levels + TRTCLogLevelVerbose = 0, + + /// Output logs at the DEBUG, INFO, WARNING, ERROR, and FATAL levels + TRTCLogLevelDebug = 1, + + /// Output logs at the INFO, WARNING, ERROR, and FATAL levels + TRTCLogLevelInfo = 2, + + /// Output logs at the WARNING, ERROR, and FATAL levels + TRTCLogLevelWarn = 3, + + /// Output logs at the ERROR and FATAL levels + TRTCLogLevelError = 4, + + /// Output logs at the FATAL level + TRTCLogLevelFatal = 5, + + /// Do not output any SDK logs + TRTCLogLevelNone = 6, + +}; + +/** + * 4.3 Screen sharing target type (for desktops only) + */ +enum TRTCScreenCaptureSourceType { + + /// Undefined + TRTCScreenCaptureSourceTypeUnknown = -1, + + /// The screen sharing target is the window of an application + TRTCScreenCaptureSourceTypeWindow = 0, + + /// The screen sharing target is the entire screen + TRTCScreenCaptureSourceTypeScreen = 1, + + /// The screen sharing target is a user-defined data source + TRTCScreenCaptureSourceTypeCustom = 2, + +}; + +/** + * 4.4 Layout mode of On-Cloud MixTranscoding + * + * TRTC's On-Cloud MixTranscoding service can mix multiple audio/video streams in the room into one stream. Therefore, you need to specify the layout scheme of the video images. The following layout modes are provided: + */ +enum TRTCTranscodingConfigMode { + + /// Undefined + TRTCTranscodingConfigMode_Unknown = 0, + + /// Manual layout mode + /// In this mode, you need to specify the precise position of each video image. This mode has the highest degree of freedom, but its ease of use is the worst: + ///- You need to enter all the parameters in `TRTCTranscodingConfig`, including the position coordinates of each video image (TRTCMixUser). + ///- You need to listen on the `onUserVideoAvailable()` and `onUserAudioAvailable()` event callbacks in `TRTCCloudDelegate` and constantly adjust the `mixUsers` parameter according to the audio/video status of each user with mic on in the current + /// room. + TRTCTranscodingConfigMode_Manual = 1, + + /// Pure audio mode + /// This mode is suitable for pure audio scenarios such as audio call (AudioCall) and audio chat room (VoiceChatRoom). + ///- You only need to set it once through the `setMixTranscodingConfig()` API after room entry, and then the SDK will automatically mix the audio of all mic-on users in the room into the current user's live stream. + ///- You don't need to set the `mixUsers` parameter in `TRTCTranscodingConfig`; instead, you only need to set the `audioSampleRate`, `audioBitrate` and `audioChannels` parameters. + TRTCTranscodingConfigMode_Template_PureAudio = 2, + + /// Preset layout mode + /// This is the most popular layout mode, because it allows you to set the position of each video image in advance through placeholders, and then the SDK automatically adjusts it dynamically according to the number of video images in the room. + /// In this mode, you still need to set the `mixUsers` parameter, but you can set `userId` as a "placeholder". Placeholder values include: + /// - "$PLACE_HOLDER_REMOTE$": image of remote user. Multiple images can be set. + /// - "$PLACE_HOLDER_LOCAL_MAIN$": local camera image. Only one image can be set. + /// - "$PLACE_HOLDER_LOCAL_SUB$": local screen sharing image. Only one image can be set. + /// In this mode, you don't need to listen on the `onUserVideoAvailable()` and `onUserAudioAvailable()` callbacks in `TRTCCloudDelegate` to make real-time adjustments. + /// Instead, you only need to call `setMixTranscodingConfig()` once after successful room entry. Then, the SDK will automatically populate the placeholders you set with real `userId` values. + TRTCTranscodingConfigMode_Template_PresetLayout = 3, + + /// Screen sharing mode + /// This mode is suitable for screen sharing-based use cases such as online education and supported only by the SDKs for Windows and macOS. + /// In this mode, the SDK will first build a canvas according to the target resolution you set (through the `videoWidth` and `videoHeight` parameters). + ///- Before the teacher enables screen sharing, the SDK will scale up the teacher's camera image and draw it onto the canvas. + ///- After the teacher enables screen sharing, the SDK will draw the video image shared on the screen onto the same canvas. + /// The purpose of this layout mode is to ensure consistency in the output resolution of the mixtranscoding module and avoid problems with blurred screen during course replay and webpage playback (web players don't support adjustable resolution). + /// Meanwhile, the audio of mic-on students will be mixed into the teacher's audio/video stream by default. + ///< br> + /// Video content is primarily the shared screen in teaching mode, and it is a waste of bandwidth to transfer camera image and screen image at the same time. + /// Therefore, the recommended practice is to directly draw the camera image onto the current screen through the `setLocalVideoRenderCallback` API. + /// In this mode, you don't need to set the `mixUsers` parameter in `TRTCTranscodingConfig`, and the SDK will not mix students' images so as not to interfere with the screen sharing effect. + ///< br> + /// You can set width x height in `TRTCTranscodingConfig` to 0 px x 0 px, and the SDK will automatically calculate a suitable resolution based on the aspect ratio of the user's current screen. + ///- If the teacher's current screen width is less than or equal to 1920 px, the SDK will use the actual resolution of the teacher's current screen. + ///- If the teacher's current screen width is greater than 1920 px, the SDK will select one of the three resolutions of 1920x1080 (16:9), 1920x1200 (16:10), and 1920x1440 (4:3) according to the current screen aspect ratio. + TRTCTranscodingConfigMode_Template_ScreenSharing = 4, + +}; + +/** + * 4.5 Media recording type + * + * This enumerated type is used in the local media recording API {@link startLocalRecording} to specify whether to record audio/video files or pure audio files. + */ +enum TRTCLocalRecordType { + + /// Record audio only + TRTCLocalRecordType_Audio = 0, + + /// Record video only + TRTCLocalRecordType_Video = 1, + + /// Record both audio and video + TRTCLocalRecordType_Both = 2, + +}; + +/** + * 4.6 Stream mix input type + */ +enum TRTCMixInputType { + + /// Unspecified. The SDK will determine the stream mix input type according to the value of the `pureAudio` parameter + TRTCMixInputTypeUndefined = 0, + + /// Mix both audio and video + TRTCMixInputTypeAudioVideo = 1, + + /// Mix video only + TRTCMixInputTypePureVideo = 2, + + /// Mix audio only + TRTCMixInputTypePureAudio = 3, + +}; + +/** + * 4.7 Device type (for desktop platforms only) + * + * This enumerated value is used to define three types of audio/video devices, namely, camera, mic, and speaker, so that the same device management API can control the three different types of devices. + * Starting from v8.0, TRTC redefines `TXMediaDeviceType` in `TXDeviceManager` to replace `TRTCMediaDeviceType` on legacy versions. + * Only the definition of `TRTCMediaDeviceType` is retained here for compatibility with customer code on legacy versions. + */ +typedef TXMediaDeviceType TRTCDeviceType; +#define TRTCDeviceTypeUnknow TXMediaDeviceTypeUnknown +#define TRTCDeviceTypeMic TXMediaDeviceTypeMic +#define TRTCDeviceTypeSpeaker TXMediaDeviceTypeSpeaker +#define TRTCDeviceTypeCamera TXMediaDeviceTypeCamera + +/** + * 4.8 Watermark image source type + */ +enum TRTCWaterMarkSrcType { + + /// Path of the image file, which can be in BMP, GIF, JPEG, PNG, TIFF, Exif, WMF, or EMF format + TRTCWaterMarkSrcTypeFile = 0, + + /// Memory block in BGRA32 format + TRTCWaterMarkSrcTypeBGRA32 = 1, + + /// Memory block in RGBA32 format + TRTCWaterMarkSrcTypeRGBA32 = 2, + +}; + +/** + * 4.9 Device operation + * + * This enumerated value is used to notify the status change of the local device {@link onDeviceChange}. + */ +typedef TXMediaDeviceState TRTCDeviceState; +#define TRTCDeviceStateAdd TXMediaDeviceStateAdd +#define TRTCDeviceStateRemove TXMediaDeviceStateRemove +#define TRTCDeviceStateActive TXMediaDeviceStateActive + +/** + * 4.11 Audio recording content type + * + * This enumerated type is used in the audio recording API {@link startAudioRecording} to specify the content of the recorded audio. + */ +enum TRTCAudioRecordingContent { + + /// Record both local and remote audio + TRTCAudioRecordingContentAll = 0, + + /// Record local audio only + TRTCAudioRecordingContentLocal = 1, + + /// Record remote audio only + TRTCAudioRecordingContentRemote = 2, + +}; + +///////////////////////////////////////////////////////////////////////////////// +// +// Definitions of core TRTC classes +// +///////////////////////////////////////////////////////////////////////////////// + +/** + * 5.1 Room entry parameters + * + * As the room entry parameters in the TRTC SDK, these parameters must be correctly set so that the user can successfully enter the audio/video room specified by `roomId` or `strRoomId`. + * For historical reasons, TRTC supports two types of room IDs: `roomId` and `strRoomId`. + * Note: do not mix `roomId` and `strRoomId`, because they are not interchangeable. For example, the number `123` and the string `123` are two completely different rooms in TRTC. + */ +struct TRTCParams { + ///**Field description:** application ID, which is required. Tencent Cloud generates bills based on `sdkAppId`. + ///**Recommended value:** the ID can be obtained on the account information page in the [TRTC console](https://console.cloud.tencent.com/rav/) after the corresponding application is created. + uint32_t sdkAppId; + + ///**Field description:** user ID, which is required. It is the `userId` of the local user in UTF-8 encoding and acts as the username. + ///**Recommended value:** if the ID of a user in your account system is "mike", `userId` can be set to "mike". + const char *userId; + + ///**Field description:** user signature, which is required. It is the authentication signature corresponding to the current `userId` and acts as the login password for Tencent Cloud services. + ///**Recommended value:** for the calculation method, please see [UserSig](https://cloud.tencent.com/document/product/647/17275). + const char *userSig; + + ///**Field description:** numeric room ID. Users (userId) in the same room can see one another and make audio/video calls. + ///**Recommended value:** value range: 1–4294967294. + ///**Note:** `roomId` and `strRoomId` are mutually exclusive. If you decide to use `strRoomId`, then `roomId` should be entered as 0. If both are entered, `roomId` will be used. + ///**Note:** do not mix `roomId` and `strRoomId`, because they are not interchangeable. For example, the number `123` and the string `123` are two completely different rooms in TRTC. + uint32_t roomId; + + ///**Field description:** string-type room ID. Users (userId) in the same room can see one another and make audio/video calls. + ///**Note:** `roomId` and `strRoomId` are mutually exclusive. If you decide to use `strRoomId`, then `roomId` should be entered as 0. If both are entered, `roomId` will be used. + ///**Note:** do not mix `roomId` and `strRoomId`, because they are not interchangeable. For example, the number `123` and the string `123` are two completely different rooms in TRTC. + ///**Recommended value:** the length limit is 64 bytes. The following 89 characters are supported: + /// - Uppercase and lowercase letters (a–z and A–Z) + /// - Digits (0–9) + /// - Space, "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", and ",". + const char *strRoomId; + + ///**Field description:** role in the live streaming scenario, which is applicable only to the live streaming scenario ({@link TRTCAppSceneLIVE} or {@link TRTCAppSceneVoiceChatRoom}) but doesn't take effect in the call scenario. + ///**Recommended value:** default value: anchor ({@link TRTCRoleAnchor}). + TRTCRoleType role; + + ///**Field description:** specified `streamId` in Tencent Cloud CSS, which is optional. After setting this field, you can play back the user's audio/video stream on Tencent Cloud CSS CDN through a standard pull scheme (FLV or HLS). + ///**Recommended value:** this parameter can contain up to 64 bytes and can be left empty. We recommend you use `sdkappid_roomid_userid_main` as the `streamid`, which is easier to identify and will not cause conflicts in your multiple + /// applications. + ///**Note:** to use Tencent Cloud CSS CDN, you need to enable the auto-relayed live streaming feature on the "Function Configuration" page in the [console](https://console.cloud.tencent.com/trtc/) first. + /// For more information, please see [CDN Relayed Live Streaming](https://cloud.tencent.com/document/product/647/16826). + const char *streamId; + + ///**Field description:** on-cloud recording field, which is optional and used to specify whether to record the user's audio/video stream in the cloud. + /// For more information, please see [On-Cloud Recording and Playback](https://cloud.tencent.com/document/product/647/16823). + ///**Recommended value:** it can contain up to 64 bytes. Letters (a–z and A–Z), digits (0–9), underscores, and hyphens are allowed. + ///< p> + /// Scheme 1. Manual recording + /// 1. Enable on-cloud recording in "Application Management" > "On-cloud Recording Configuration" in the [console](https://console.cloud.tencent.com/trtc). + /// 2. Set "Recording Mode" to "Manual Recording". + /// 3. After manual recording is set, in a TRTC room, only users with the `userDefineRecordId` parameter set will have video recording files in the cloud, while users without this parameter set will not. + /// 4. The recording file will be named in the format of "userDefineRecordId_start time_end time" in the cloud. + ///< p> + /// Scheme 2. Auto-recording + /// 1. You need to enable on-cloud recording in "Application Management" > "On-cloud Recording Configuration" in the [console](https://console.cloud.tencent.com/trtc). + /// 2. Set "Recording Mode" to "Auto-recording". + /// 3. After auto-recording is set, any user who upstreams audio/video in a TRTC room will have a video recording file in the cloud. + /// 4. The file will be named in the format of "userDefineRecordId_start time_end time". If `userDefineRecordId` is not specified, the file will be named in the format of "streamId_start time_end time". + ///< br> + const char *userDefineRecordId; + + ///**Field description:** permission credential used for permission control, which is optional. If you want only users with the specified `userId` values to enter a room, you need to use `privateMapKey` to restrict the permission. + ///**Recommended value:** we recommend you use this parameter only if you have high security requirements. For more information, please see [Enabling Advanced Permission Control](https://cloud.tencent.com/document/product/647/32240). + const char *privateMapKey; + + ///**Field description:** business data, which is optional. This field is needed only by some advanced features. + ///**Recommended value:** do not set this field on your own. + const char *businessInfo; + TRTCParams() : sdkAppId(0), roomId(0), strRoomId(nullptr), userId(nullptr), userSig(nullptr), role(TRTCRoleAnchor), privateMapKey(nullptr), businessInfo(nullptr), userDefineRecordId(nullptr), streamId(nullptr) { + } +}; + +/** + * 5.2 Video encoding parameters + * + * These settings determine the quality of image viewed by remote users as well as the image quality of recorded video files in the cloud. + */ +struct TRTCVideoEncParam { + ///**Field description:** video resolution + ///**Recommended value** + /// - For mobile video call, we recommend you select a resolution of 360x640 or below and select `Portrait` (portrait resolution) for `resMode`. + /// - For mobile live streaming, we recommend you select a resolution of 540x960 and select `Portrait` (portrait resolution) for `resMode`. + /// - For desktop platforms (Windows and macOS), we recommend you select a resolution of 640x360 or above and select `Landscape` (landscape resolution) for `resMode`. + ///**Note:** to use a portrait resolution, please specify `resMode` as `Portrait`; for example, when used together with `Portrait`, 640x360 represents 360x640. + TRTCVideoResolution videoResolution; + + ///**Field description:** resolution mode (landscape/portrait) + ///**Recommended value:** for mobile platforms (iOS and Android), `Portrait` is recommended; for desktop platforms (Windows and macOS), `Landscape` is recommended. + ///**Note:** to use a portrait resolution, please specify `resMode` as `Portrait`; for example, when used together with `Portrait`, 640x360 represents 360x640. + TRTCVideoResolutionMode resMode; + + ////// **Field description:** video capturing frame rate + ////// **Recommended value:** 15 or 20 fps. If the frame rate is lower than 5 fps, there will be obvious lagging; if lower than 10 fps but higher than 5 fps, there will be slight lagging; if higher than 20 fps, the bandwidth will be wasted (the + /// frame rate of movies is generally 24 fps). + ////// **Note:** the front cameras on certain Android phones do not support a capturing frame rate higher than 15 fps. For some Android phones that focus on beautification features, the capturing frame rate of the front cameras may be lower than + /// 10 fps. + uint32_t videoFps; + + ///**Field description:** target video bitrate. The SDK encodes streams at the target video bitrate and will actively reduce the bitrate only in weak network environments. + ///**Recommended value:** please see the optimal bitrate for each specification in `TRTCVideoResolution`. You can also slightly increase the optimal bitrate. + /// For example, `TRTCVideoResolution_1280_720` corresponds to the target bitrate of 1,200 Kbps. You can also set the bitrate to 1,500 Kbps for higher definition. + ///**Note:** you can set the `videoBitrate` and `minVideoBitrate` parameters at the same time to restrict the SDK's adjustment range of the video bitrate: + /// - If you want to "ensure clarity while allowing lag in weak network environments", you can set `minVideoBitrate` to 60% of `videoBitrate`. + /// - If you want to "ensure smoothness while allowing blur in weak network environments", you can set `minVideoBitrate` to a low value, for example, 100 Kbps. + /// - If you set `videoBitrate` and `minVideoBitrate` to the same value, it is equivalent to disabling the adaptive adjustment capability of the SDK for the video bitrate. + uint32_t videoBitrate; + + ///**Field description:** minimum video bitrate. The SDK will reduce the bitrate to as low as the value specified by `minVideoBitrate` to ensure the smoothness only if the network conditions are poor. + ///**Note:** default value: 0, indicating that a reasonable value of the lowest bitrate will be automatically calculated by the SDK according to the resolution you specify. + ///**Recommended value:** you can set the `videoBitrate` and `minVideoBitrate` parameters at the same time to restrict the SDK's adjustment range of the video bitrate: + /// - If you want to "ensure clarity while allowing lag in weak network environments", you can set `minVideoBitrate` to 60% of `videoBitrate`. + /// - If you want to "ensure smoothness while allowing blur in weak network environments", you can set `minVideoBitrate` to a low value, for example, 100 Kbps. + /// - If you set `videoBitrate` and `minVideoBitrate` to the same value, it is equivalent to disabling the adaptive adjustment capability of the SDK for the video bitrate. + uint32_t minVideoBitrate; + + ///**Field description:** whether to allow dynamic resolution adjustment. Once enabled, this field will affect on-cloud recording. + ///**Recommended value:** this feature is suitable for scenarios that don't require on-cloud recording. After it is enabled, the SDK will intelligently select a suitable resolution according to the current network conditions to avoid the + /// inefficient encoding mode of "large resolution + small bitrate". + ///**Note:** default value: false. If you need on-cloud recording, please do not enable this feature, because if the video resolution changes, the MP4 file recorded in the cloud cannot be played back normally by common players. + bool enableAdjustRes; + + TRTCVideoEncParam() : videoResolution(TRTCVideoResolution_640_360), resMode(TRTCVideoResolutionModeLandscape), videoFps(15), videoBitrate(550), enableAdjustRes(false), minVideoBitrate(0) { + } +}; + +/** + * 5.3 Network QoS control parameter set + * + * Network QoS control parameter. The settings determine the QoS control policy of the SDK in weak network conditions (e.g., whether to "ensure clarity" or "ensure smoothness"). + */ +struct TRTCNetworkQosParam { + ///**Field description:** whether to ensure smoothness or clarity + ///**Recommended value:** ensuring clarity + ///**Note:** this parameter mainly affects the audio/video performance of TRTC in weak network environments: + /// - Ensuring smoothness: in this mode, when the current network is unable to transfer a clear and smooth video image, the smoothness of the image will be given priority, but there will be blurs. + /// - Ensuring clarity (default value): in this mode, when the current network is unable to transfer a clear and smooth video image, the clarity of the image will be given priority, but there will be lags. + TRTCVideoQosPreference preference; + + ///**Field description:** QoS control mode (disused) + ///**Recommended value:** on-cloud control + ///**Note:** please set the on-cloud control mode (TRTCQosControlModeServer). + TRTCQosControlMode controlMode; + + TRTCNetworkQosParam() : preference(TRTCVideoQosPreferenceClear), controlMode(TRTCQosControlModeServer) { + } +}; + +/** + * 5.4 Rendering parameters of video image + * + * You can use these parameters to control the video image rotation angle, fill mode, and mirror mode. + */ +struct TRTCRenderParams { + ///**Field description:** clockwise image rotation angle + ///**Recommended value:** rotation angles of 90, 180, and 270 degrees are supported. Default value: {@link TRTCVideoRotation_0} + TRTCVideoRotation rotation; + + ///**Field description:** image fill mode + ///**Recommended value:** fill (the image may be stretched or cropped) or fit (there may be black bars in unmatched areas). Default value: {@link TRTCVideoFillMode_Fill} + TRTCVideoFillMode fillMode; + + ///**Field description:** image mirror mode + ///**Recommended value:** default value: {@link TRTCVideoMirrorType_Auto} + TRTCVideoMirrorType mirrorType; + + TRTCRenderParams() : rotation(TRTCVideoRotation0), fillMode(TRTCVideoFillMode_Fit), mirrorType(TRTCVideoMirrorType_Disable) { + } +}; + +/** + * 5.5 Network quality + * + * This indicates the quality of the network. You can use it to display the network quality of each user on the UI. + */ +struct TRTCQualityInfo { + /// User ID + const char *userId; + + /// Network quality + TRTCQuality quality; + + TRTCQualityInfo() : userId(nullptr), quality(TRTCQuality_Unknown) { + } +}; + +/** + * 5.6 Volume + * + * This indicates the audio volume value. You can use it to display the volume of each user in the UI. + */ +struct TRTCVolumeInfo { + ///`userId` of the speaker. An empty value indicates the local user. + const char *userId; + + /// Volume of the speaker. Value range: 0–100. + uint32_t volume; + + TRTCVolumeInfo() : userId(nullptr), volume(0) { + } +}; + +/** + * 5.7 Network speed testing parameters + * + * You can test the network speed through the {@link startSpeedTest:} interface before the user enters the room (this API cannot be called during a call). + */ +struct TRTCSpeedTestParams { + /// Application identification, please refer to the relevant instructions in {@link TRTCParams}. + int sdkAppId; + + /// User identification, please refer to the relevant instructions in {@link TRTCParams}. + const char *userId; + + /// User signature, please refer to the relevant instructions in {@link TRTCParams}. + const char *userSig; + + /// Expected upstream bandwidth (kbps, value range: 10 to 5000, no uplink bandwidth test when it is 0). + int expectedUpBandwidth; + + /// Expected downstream bandwidth (kbps, value range: 10 to 5000, no downlink bandwidth test when it is 0). + int expectedDownBandwidth; + + TRTCSpeedTestParams() : sdkAppId(0), userId(nullptr), userSig(nullptr), expectedUpBandwidth(0), expectedDownBandwidth(0) { + } +}; + +/** + * 5.8 Network speed test result + * + * The {@link startSpeedTest:} API can be used to test the network speed before a user enters a room (this API cannot be called during a call). + */ +struct TRTCSpeedTestResult { + /// Whether the network speed test is successful. + bool success; + + /// Error message for network speed test. + const char *errMsg; + + /// Server IP address. + const char *ip; + + /// Network quality, which is tested and calculated based on the internal evaluation algorithm. For more information, please see {@link TRTCQuality} + TRTCQuality quality; + + /// Upstream packet loss rate between 0 and 1.0. For example, 0.3 indicates that 3 data packets may be lost in every 10 packets sent to the server. + float upLostRate; + + /// Downstream packet loss rate between 0 and 1.0. For example, 0.2 indicates that 2 data packets may be lost in every 10 packets received from the server. + float downLostRate; + + /// Delay in milliseconds, which is the round-trip time between the current device and TRTC server. The smaller the value, the better. The normal value range is 10–100 ms. + int rtt; + + /// Upstream bandwidth (in kbps, -1: invalid value). + int availableUpBandwidth; + + /// Downstream bandwidth (in kbps, -1: invalid value). + int availableDownBandwidth; + + TRTCSpeedTestResult() : success(false), errMsg(nullptr), ip(nullptr), quality(TRTCQuality_Unknown), upLostRate(0.0f), downLostRate(0.0f), rtt(0), availableUpBandwidth(0), availableDownBandwidth(0) { + } +}; + +/** + * 5.10 Video frame information + * + * `TRTCVideoFrame` is used to describe the raw data of a frame of the video image, which is the image data before frame encoding or after frame decoding. + */ +struct TRTCVideoFrame { + ///**Field description:** video pixel format + TRTCVideoPixelFormat videoFormat; + + ///**Field description:** video data structure type + TRTCVideoBufferType bufferType; + + ///**Field description:** video data when `bufferType` is {@link TRTCVideoBufferType_Buffer}, which carries the memory data blocks for the C++ layer. + char *data; + + ///**Field description:** video texture ID, i.e., video data when `bufferType` is {@link TRTCVideoBufferType_Texture}, which carries the texture data used for OpenGL rendering. + int textureId; + + ///**Field description:** video data length in bytes. For I420, length = width * height * 3 / 2; for BGRA32, length = width * height * 4. + uint32_t length; + + ///**Field description:** video width + ///**Recommended value:** please enter the width of the video data passed in. + uint32_t width; + + ///**Field description:** video height + ///**Recommended value:** please enter the height of the video data passed in. + uint32_t height; + + ///**Field description:** video frame timestamp in milliseconds + ///**Recommended value:** this parameter can be set to 0 for custom video capturing. In this case, the SDK will automatically set the `timestamp` field. However, please "evenly" set the calling interval of `sendCustomVideoData`. + uint64_t timestamp; + + ///**Field description:** clockwise rotation angle of video pixels + TRTCVideoRotation rotation; + + TRTCVideoFrame() : videoFormat(TRTCVideoPixelFormat_Unknown), bufferType(TRTCVideoBufferType_Unknown), data(nullptr), textureId(-1), length(0), width(640), height(360), timestamp(0), rotation(TRTCVideoRotation0) { + } +}; + +/** + * 5.11 Audio frame data + */ +struct TRTCAudioFrame { + ///**Field description:** audio frame format + TRTCAudioFrameFormat audioFormat; + + ///**Field description:** audio data + char *data; + + ///**Field description:** audio data length + uint32_t length; + + ///**Field description:** sample rate + uint32_t sampleRate; + + ///**Field description:** number of sound channels + uint32_t channel; + + ///**Field description:** timestamp in ms + uint64_t timestamp; + + TRTCAudioFrame() : audioFormat(TRTCAudioFrameFormatNone), data(nullptr), length(0), sampleRate(48000), channel(1), timestamp(0) { + } +}; + +/** + * 5.12 Description information of each video image in On-Cloud MixTranscoding + * + * `TRTCMixUser` is used to specify the location, size, layer, and stream type of each video image in On-Cloud MixTranscoding. + */ +struct TRTCMixUser { + ///**Field description:** user ID + const char *userId; + + ///**Field description:** ID of the room where this audio/video stream is located (an empty value indicates the local room ID) + const char *roomId; + + ///**Field description:** specify the coordinate area of this video image in px + RECT rect; + + ///**Field description:** specify the level of this video image (value range: 1–15; the value must be unique) + int zOrder; + + ///**Field description:** specify whether this video image is the primary stream image ({@link TRTCVideoStreamTypeBig}) or substream image ({@link TRTCVideoStreamTypeSub}). + TRTCVideoStreamType streamType; + + ///**Field description:** specify whether this stream mixes audio only + ///**Recommended value:** default value: false + ///**Note:** this field has been disused. We recommend you use the new field `inputType` introduced in v8.5. + bool pureAudio; + + ///**Field description:** specify the mixed content of this stream (audio only, video only, or audio and video). This field is an upgrade to the `pureAudio` field. + ///**Recommended value:** default value: TRTCMixInputTypeUndefined, which represents the value of `pureAudio`. + /// - If you are using TRTC for the first time and have not set the `pureAudio` field before, you can set this field according to your actual needs. We recommend you not set `pureAudio` again. + /// - If you have already used the `pureAudio` field on a legacy version and want to keep its setting, you can set `inputType` to `TRTCMixInputTypeUndefined`. + TRTCMixInputType inputType; + + TRTCMixUser() : userId(nullptr), roomId(nullptr), rect(), zOrder(0), pureAudio(false), streamType(TRTCVideoStreamTypeBig), inputType(TRTCMixInputTypeUndefined) { + rect.left = 0; + rect.top = 0; + rect.right = 0; + rect.bottom = 0; + } +}; + +/** + * 5.13 Layout and transcoding parameters of On-Cloud MixTranscoding + * + * These parameters are used to specify the layout position information of each video image and the encoding parameters of mixtranscoding during On-Cloud MixTranscoding. + */ +struct TRTCTranscodingConfig { + ///**Field description:** layout mode + ///**Recommended value:** please choose a value according to your business needs. The preset mode has better applicability. + TRTCTranscodingConfigMode mode; + + ///**Field description:** `appId` of Tencent Cloud CSS + ///**Recommended value:** please click **Application Management** > **Application Information** in the [TRTC console](https://console.cloud.tencent.com/trtc) and get the `appId` in **Relayed Live Streaming Info**. + uint32_t appId; + + ///**Field description:** `bizId` of Tencent Cloud CSS + ///**Recommended value:** please click **Application Management** > **Application Information** in the [TRTC console](https://console.cloud.tencent.com/trtc) and get the `bizId` in **Relayed Live Streaming Info**. + uint32_t bizId; + + ///**Field description:** specify the target resolution (width) of On-Cloud MixTranscoding + ///**Recommended value:** 360 px. If you only mix audio streams, please set both `width` and `height` to 0; otherwise, there will be a black background in the live stream after mixtranscoding. + uint32_t videoWidth; + + ///**Field description:** specify the target resolution (height) of On-Cloud MixTranscoding + ///**Recommended value:** 640 px. If you only mix audio streams, please set both `width` and `height` to 0; otherwise, there will be a black background in the live stream after mixtranscoding. + uint32_t videoHeight; + + ///**Field description:** specify the target video bitrate (Kbps) of On-Cloud MixTranscoding + ///**Recommended value:** if you enter 0, TRTC will estimate a reasonable bitrate value based on `videoWidth` and `videoHeight`. You can also refer to the recommended bitrate value in the video resolution enumeration definition (in the comment + /// section). + uint32_t videoBitrate; + + ///**Field description:** specify the target video frame rate (fps) of On-Cloud MixTranscoding + ///**Recommended value:** default value: 15 fps. Value range: (0,30]. + uint32_t videoFramerate; + + ///**Field description:** specify the target video keyframe interval (GOP) of On-Cloud MixTranscoding + ///**Recommended value:** default value: 2 (in seconds). Value range: [1,8]. + uint32_t videoGOP; + + ///**Field description:** specify the background color of the mixed video image. + ///**Recommended value:** default value: 0x000000, which means black and is in the format of hex number; for example: "0x61B9F1" represents the RGB color (97,158,241). + uint32_t backgroundColor; + + ///**Field description:** specify the background image of the mixed video image. + ///**Recommended value:** default value: null, indicating not to set the background image. + ///**Note:** you need to upload the background image by clicking **Add image** in "Application Management" > "Function Configuration" > "Material Management" in the [console](https://console.cloud.tencent.com/trtc) in advance. + /// After the upload is successful, you can get the corresponding "image ID". Then, you need to convert it into a string and set it as `backgroundImage`. + /// For example, if the "image ID" is 63, you can set `backgroundImage = @"63"`; + const char *backgroundImage; + + ///**Field description:** specify the target audio sample rate of On-Cloud MixTranscoding + ///**Recommended value:** default value: 48000 Hz. Valid values: 12000 Hz, 16000 Hz, 22050 Hz, 24000 Hz, 32000 Hz, 44100 Hz, 48000 Hz. + uint32_t audioSampleRate; + + ///**Field description:** specify the target audio bitrate of On-Cloud MixTranscoding + ///**Recommended value:** default value: 64 Kbps. Value range: [32,192]. + uint32_t audioBitrate; + + ///**Field description:** specify the number of sound channels of On-Cloud MixTranscoding + ///**Recommended value:** default value: 1, which means mono channel. Valid values: 1: mono channel; 2: dual channel. + uint32_t audioChannels; + + ///**Field description:** specify the audio encoding type of On-Cloud MixTranscoding + ///**Recommended value:** default value: 0, which means LC-AAC. Valid values: 0: LC-AAC; 1: HE-AAC; 2: HE-AACv2. + ///**Note:** HE-AAC and HE-AACv2 only support [48000, 44100, 32000, 24000, 16000] sample rate. + ///**Note:** HE-AACv2 only support dual channel. + ///**Note:** HE-AAC and HE-AACv2 take effects iff the output streamId is specified. + uint32_t audioCodec; + + ///**Field description:** specify the position, size, layer, and stream type of each video image in On-Cloud MixTranscoding + ///**Recommended value:** this field is an array in `TRTCMixUser` type, where each element represents the information of a video image. + TRTCMixUser *mixUsersArray; + + ///**Field description:** number of elements in the `mixUsersArray` array + uint32_t mixUsersArraySize; + + ///**Field description:** ID of the live stream output to CDN + ///**Recommended value:** default value: null, that is, the audio/video streams in the room will be mixed into the audio/video stream of the caller of this API. + /// - If you don't set this parameter, the SDK will execute the default logic, that is, it will mix the multiple audio/video streams in the room into the audio/video stream of the caller of this API, i.e., A + B => A. + /// - If you set this parameter, the SDK will mix the audio/video streams in the room into the live stream you specify, i.e., A + B => C (C is the `streamId` you specify). + const char *streamId; + + TRTCTranscodingConfig() + : mode(TRTCTranscodingConfigMode_Unknown), + appId(0), + bizId(0), + videoWidth(0), + videoHeight(0), + videoBitrate(0), + videoFramerate(15), + videoGOP(2), + audioSampleRate(48000), + audioBitrate(64), + audioChannels(1), + audioCodec(0), + mixUsersArray(nullptr), + mixUsersArraySize(0), + backgroundColor(0), + backgroundImage(nullptr), + streamId(nullptr) { + } +}; + +/** + * 5.14 Push parameters required to be set when publishing audio/video streams to non-Tencent Cloud CDN + * + * TRTC's backend service supports publishing audio/video streams to third-party live CDN service providers through the standard RTMP protocol. + * If you use the Tencent Cloud CSS CDN service, you don't need to care about this parameter; instead, just use the {@link startPublish} API. + */ +struct TRTCPublishCDNParam { + ///**Field description:** `appId` of Tencent Cloud CSS + ///**Recommended value:** please click **Application Management** > **Application Information** in the [TRTC console](https://console.cloud.tencent.com/trtc) and get the `appId` in **Relayed Live Streaming Info**. + uint32_t appId; + + ///**Field description:** `bizId` of Tencent Cloud CSS + ///**Recommended value:** please click **Application Management** > **Application Information** in the [TRTC console](https://console.cloud.tencent.com/trtc) and get the `bizId` in **Relayed Live Streaming Info**. + uint32_t bizId; + + ///**Field description:** specify the push address (in RTMP format) of this audio/video stream at the third-party live streaming service provider + ///**Recommended value:** the push URL rules vary greatly by service provider. Please enter a valid push URL according to the requirements of the target service provider. TRTC's backend server will push audio/video streams in the standard format + /// to the third-party service provider according to the URL you enter. + ///**Note:** the push URL must be in RTMP format and meet the specifications of your target live streaming service provider; otherwise, the target service provider will reject the push requests from TRTC's backend service. + const char *url; + + ///**Field description:** specify the push address (in RTMP format) of this audio/video stream at the third-party live streaming service provider + ///**Recommended value:** default value: null,that is, the audio/video streams in the room will be pushed to the target service provider of the caller of this API. + const char *streamId; + + TRTCPublishCDNParam() : url(nullptr), streamId(nullptr), appId(0), bizId(0) { + } +}; + +/** + * 5.15 Local audio file recording parameters + * + * This parameter is used to specify the recording parameters in the audio recording API {@link startAudioRecording}. + */ +struct TRTCAudioRecordingParams { + ///**Field description:** storage path of the audio recording file, which is required. + ///**Note:** this path must be accurate to the file name and extension. The extension determines the format of the audio recording file. Currently, supported formats include PCM, WAV, and AAC. + /// For example, if you specify the path as `mypath/record/audio.aac`, it means that you want the SDK to generate an audio recording file in AAC format. + /// Please specify a valid path with read/write permissions; otherwise, the audio recording file cannot be generated. + const char *filePath; + + ///**Field description:** Audio recording content type. + ///**Note:** Record all local and remote audio by default. + TRTCAudioRecordingContent recordingContent; + + TRTCAudioRecordingParams() : filePath(nullptr), recordingContent(TRTCAudioRecordingContentAll) { + } +}; + +/** + * 5.16 Local media file recording parameters + * + * This parameter is used to specify the recording parameters in the local media file recording API {@link startLocalRecording}. + * The `startLocalRecording` API is an enhanced version of the `startAudioRecording` API. The former can record video files, while the latter can only record audio files. + */ +struct TRTCLocalRecordingParams { + ///**Field description:** address of the recording file, which is required. Please ensure that the path is valid with read/write permissions; otherwise, the recording file cannot be generated. + ///**Note:** this path must be accurate to the file name and extension. The extension determines the format of the recording file. Currently, only the MP4 format is supported. + /// For example, if you specify the path as `mypath/record/test.mp4`, it means that you want the SDK to generate a local video file in MP4 format. + /// Please specify a valid path with read/write permissions; otherwise, the recording file cannot be generated. + const char *filePath = ""; + + ///**Field description:** media recording type, which is `TRTCRecordTypeBoth` by default, indicating to record both audio and video. + TRTCLocalRecordType recordType = TRTCLocalRecordType_Both; + + ///**Field description:** `interval` is the update frequency of the recording information in milliseconds. Value range: 1000–10000. Default value: -1, indicating not to call back + int interval = -1; +}; + +/** + * 5.17 Sound effect parameter (disused) + * + * "Sound effects" in TRTC refer to some short audio files (usually only a few seconds), such as "applause" and "laughter". + * This parameter is used to specify the path and number of playback times of a sound effect file (short audio file) in the sound effect playback API {@link TRTCCloud#playAudioEffect} on legacy versions. + * After v7.3, the sound effect API has been replaced by a new {@link TXAudioEffectManager#startPlayMusic} API. + * When you specify the {@link TXAudioMusicParam} parameter of `startPlayMusic`, if `isShortFile` is set to `true`, the file is a "sound effect" file. + */ +struct TRTCAudioEffectParam { + ///**Field description:** sound effect ID + ///**Note:** the SDK supports playing multiple sound effects. IDs are used to distinguish different sound effects and control their start, end, volume, etc. + int effectId; + + ///**Field description:** sound effect file path. Supported file formats include AAC, MP3, and M4A. + const char *path; + + ///**Field description:** number of times the sound effect is looped + ///**Valid values:** 0 or any positive integer. 0 (default) indicates that the sound effect is played once, 1 twice, and so on. + int loopCount; + + ///**Field description:** whether the sound effect is upstreamed + ///**Recommended value:** true: when the sound effect is played back locally, it will be upstreamed to the cloud and can be heard by remote users. false: the sound effect will not be upstreamed to the cloud and can only be heard locally. Default + /// value: false + bool publish; + + ///**Field description:** sound effect volume + ///**Recommended value:** value range: 0–100. Default value: 100 + int volume; + + TRTCAudioEffectParam(const int _effectId, const char *_path) : loopCount(0), publish(false), volume(100) { + effectId = _effectId; + path = _path; + } +}; + +/** + * 5.18 Room switch parameter + * + * This parameter is used for the room switch API {@link switchRoom}, which can quickly switch a user from one room to another. + */ +struct TRTCSwitchRoomConfig { + ///**Field description:** numeric room ID, which is optional. Users in the same room can see one another and make audio/video calls. + ///**Recommended value:** value range: 1–4294967294. + ///**Note:** either `roomId` or `strRoomId` must be entered. If both are entered, `roomId` will be used. + uint32_t roomId; + + ///**Field description:** string-type room ID, which is optional. Users in the same room can see one another and make audio/video calls. + ///**Note:** either `roomId` or `strRoomId` must be entered. If both are entered, `roomId` will be used. + const char *strRoomId; + + ///**Field description:** user signature, which is optional. It is the authentication signature corresponding to the current `userId` and acts as the login password. + /// If you don't specify the newly calculated `userSig` during room switch, the SDK will continue to use the `userSig` you specified during room entry (enterRoom). + /// This requires you to ensure that the old `userSig` is still within the validity period allowed by the signature at the moment of room switch; otherwise, room switch will fail. + ///**Recommended value:** for the calculation method, please see [UserSig](https://cloud.tencent.com/document/product/647/17275). + const char *userSig; + + ///**Field description:** permission credential used for permission control, which is optional. If you want only users with the specified `userId` values to enter a room, you need to use `privateMapKey` to restrict the permission. + ///**Recommended value:** we recommend you use this parameter only if you have high security requirements. For more information, please see [Enabling Advanced Permission Control](https://cloud.tencent.com/document/product/647/32240). + const char *privateMapKey; + + TRTCSwitchRoomConfig() : roomId(0), strRoomId(nullptr), userSig(nullptr), privateMapKey(nullptr) { + } +}; + +/** + * 5.19 Format parameter of custom audio callback + * + * This parameter is used to set the relevant format (including sample rate and number of channels) of the audio data called back by the SDK in the APIs related to custom audio callback. + */ +struct TRTCAudioFrameCallbackFormat { + ///**Field description:** sample rate + ///**Recommended value:** default value: 48000 Hz. Valid values: 16000, 32000, 44100, 48000. + int sampleRate; + + ///**Field description:** number of sound channels + ///**Recommended value:** default value: 1, which means mono channel. Valid values: 1: mono channel; 2: dual channel. + int channel; + + ///**Field description:** number of sample points + ///**Recommended value:** the value must be an integer multiple of sampleRate/100. + int samplesPerCall; + + TRTCAudioFrameCallbackFormat() : sampleRate(0), channel(0), samplesPerCall(0) { + } +}; + +/** + * 5.21 Screen sharing target information (for desktop systems only) + * + * When users share the screen, they can choose to share the entire desktop or only the window of a certain program. + * `TRTCScreenCaptureSourceInfo` is used to describe the information of the target to be shared, including ID, name, and thumbnail. The fields in this structure are read-only. + */ +// Structure for storing window thumbnails and icons. +struct TRTCImageBuffer { + const char *buffer; ///< image content in BGRA format + uint32_t length; ///< buffer size + uint32_t width; ///< image width + uint32_t height; ///< image height + TRTCImageBuffer() : buffer(nullptr), length(0), width(0), height(0){}; +}; + +struct TRTCScreenCaptureSourceInfo { + ///**Field description:** capturing source type (i.e., whether to share the entire screen or a certain window) + TRTCScreenCaptureSourceType type; + + ///**Field description:** capturing source ID. For a window, this field indicates a window ID; for a screen, this field indicates a display ID. + TXView sourceId; + + ///**Field description:** capturing source name (encoded in UTF-8) + const char *sourceName; + + ///**Field description:** thumbnail of the shared window + TRTCImageBuffer thumbBGRA; + + ///**Field description:** icon of the shared window + TRTCImageBuffer iconBGRA; + + ///**Field description:** is minimized window or not + bool isMinimizeWindow; + + ///**Field description:** Whether it is the main display (applicable to the case of multiple displays) + bool isMainScreen; + + TRTCScreenCaptureSourceInfo() : type(TRTCScreenCaptureSourceTypeUnknown), sourceId(nullptr), sourceName(nullptr), isMinimizeWindow(false), isMainScreen(false){}; +}; + +/** + * 5.22 List of sharable screens and windows + * + * This structure is equivalent to `std::vector`, which is used to solve the binary compatibility problem of different versions of STL containers. + */ +class ITRTCScreenCaptureSourceList { + protected: + virtual ~ITRTCScreenCaptureSourceList() { + } + + public: + /** + * Size of this list. + */ + virtual uint32_t getCount() = 0; + /** + * Get element(TRTCScreenCaptureSourceInfo) by index. + */ + virtual TRTCScreenCaptureSourceInfo getSourceInfo(uint32_t index) = 0; + /** + * Don't use delete!!! + */ + virtual void release() = 0; +}; + +/** + * 5.23 Advanced control parameter of screen sharing + * + * This parameter is used in the screen sharing-related API {@link selectScreenCaptureTarget} to set a series of advanced control parameters when specifying the sharing target. + * For example, whether to capture the cursor, whether to capture the subwindow, and whether to draw a frame around the shared target. + */ +struct TRTCScreenCaptureProperty { + ///**Field description:** whether to capture the cursor while capturing the target content. Default value: true. + bool enableCaptureMouse; + + ///**Field description:** whether to highlight the window being shared (i.e., drawing a frame around the shared target). Default value: true. + bool enableHighLight; + + ///**Field description:** whether to enable the high performance mode (which will take effect only during screen sharing). Default value: true. + ///**Note:** the screen capturing performance is the best after this mode is enabled, but the anti-blocking ability will be lost. If you enable `enableHighLight` and `enableHighPerformance` at the same time, remote users will see the highlighted + /// frame. + bool enableHighPerformance; + + ///**Field description:** specify the color of the highlighted frame in RGB format. 0 indicates to use the default color of #8CBF26. + int highLightColor; + + ///**Field description:** specify the width of the highlighted frame. 0 indicates to use the default width of 5 px. The maximum value you can set is 50. + int highLightWidth; + + ///**Field description:** whether to capture the subwindow during window capturing (the subwindow and the captured window need to have an `Owner` or `Popup` attribute). Default value: false. + bool enableCaptureChildWindow; + TRTCScreenCaptureProperty() : enableCaptureMouse(true), enableHighLight(true), enableHighPerformance(true), highLightColor(0), highLightWidth(0), enableCaptureChildWindow(false) { + } +}; + +typedef ITXDeviceCollection ITRTCDeviceCollection; +typedef ITXDeviceInfo ITRTCDeviceInfo; + +/** + * 5.24 parameter of the parallel strategy of remote audio streams + * + * This parameter is used to set the parallel strategy of remote audio streams. + */ +struct TRTCAudioParallelParams { + ///**Field description:** Max number of remote audio streams. Default value: 0 + /// if maxCount > 0 and the number of people in the room is more than `maxCount`,SDK will select `maxCount` of remote audio streams in real time, which can reduce performance cost greatly. + /// if maxCount = 0,SDK won't limit the number of remote audio streams, which may cause performance cost when there are many speakers in one room. + uint32_t maxCount; + + ///**Field description:** Users included that must be able to play. + ///**Note:** A list of user IDs. These users must be able to play and do not participate in smart selection. + /// The number of `incluseUsers` must be less than `maxCount`. Otherwise, the setting of the parallel strategy of remote audio streams is invalid. + ///`incluseUsers` is valid when `maxCount` > 0. When `incluseUsers` takes effect, the max number of remote audio streams is (`maxCount` - the number of valid users in `incluseUsers`). + char **includeUsers; + uint32_t includeUsersCount; + + TRTCAudioParallelParams() : maxCount(0), includeUsers(nullptr), includeUsersCount(0) { + } +}; + +/// @} +} // namespace liteav + +namespace trtc = liteav; + +#ifdef _WIN32 +using namespace liteav; +#endif + +#endif /* __TRTCCLOUDDEF_H__ */ +/// @} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TXLiteAVCode.h b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TXLiteAVCode.h new file mode 100644 index 0000000..d518a35 --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Headers/cpp_interface/TXLiteAVCode.h @@ -0,0 +1,430 @@ +#ifndef __TXLITEAVCODE_H__ +#define __TXLITEAVCODE_H__ + +///////////////////////////////////////////////////////////////////////////////// +// +// 错误码 +// +///////////////////////////////////////////////////////////////////////////////// + +typedef enum TXLiteAVError +{ + ///////////////////////////////////////////////////////////////////////////////// + // + // 基础错误码 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_NULL = 0, ///< 无错误 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 进房(enterRoom)相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##onEnterRoom() 和 TRTCCloudDelegate##OnError() 通知 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_ROOM_ENTER_FAIL = -3301, ///< 进入房间失败,请查看 onError 中的 -3301 对应的 msg 提示确认失败原因 + ERR_ROOM_REQUEST_IP_TIMEOUT = -3307, ///< 请求 IP 和 sig 超时,请检查网络是否正常,或网络防火墙是否放行 UDP。可尝试访问下列 IP:162.14.22.165:8000 162.14.6.105:8000 和域名:default-query.trtc.tencent-cloud.com:8000 + ERR_ROOM_REQUEST_ENTER_ROOM_TIMEOUT = -3308, ///< 请求进房超时,请检查是否断网或者是否开启vpn,您也可以切换4G进行测试确认 + ERR_ENTER_ROOM_PARAM_NULL = -3316, ///< 进房参数为空,请检查: enterRoom:appScene: 接口调用是否传入有效的 param + ERR_SDK_APPID_INVALID = -3317, ///< 进房参数 sdkAppId 错误,请检查 TRTCParams.sdkAppId 是否为空 + ERR_ROOM_ID_INVALID = -3318, ///< 进房参数 roomId 错误,请检查 TRTCParams.roomId 或 TRTCParams.strRoomId 是否为空,注意 roomId 和 strRoomId 不可混用 + ERR_USER_ID_INVALID = -3319, ///< 进房参数 userId 不正确,请检查 TRTCParams.userId 是否为空 + ERR_USER_SIG_INVALID = -3320, ///< 进房参数 userSig 不正确,请检查 TRTCParams.userSig 是否为空 + ERR_ROOM_REQUEST_ENTER_ROOM_REFUSED = -3340, ///< 进房请求被拒绝,请检查是否连续调用 enterRoom 进入相同 Id 的房间 + ERR_SERVER_INFO_PRIVILEGE_FLAG_ERROR = -100006, ///< 您开启了高级权限控制,但参数 TRTCParams.privateMapKey 校验失败,您可参考 https://cloud.tencent.com/document/product/647/32240 进行检查 + ERR_SERVER_INFO_SERVICE_SUSPENDED = -100013, ///< 服务不可用。请检查:套餐包剩余分钟数是否大于0,腾讯云账号是否欠费。您可参考 https://cloud.tencent.com/document/product/647/50492 进行查看与配置 + ERR_SERVER_INFO_ECDH_GET_TINYID = -100018, ///< UserSig 校验失败,请检查参数 TRTCParams.userSig 是否填写正确,或是否已经过期。您可参考 https://cloud.tencent.com/document/product/647/50686 进行校验 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 退房(exitRoom)相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_ROOM_REQUEST_QUIT_ROOM_TIMEOUT = -3325, ///< 请求退房超时 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 设备(摄像头、麦克风、扬声器)相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 + // 区段:-6000 ~ -6999 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_CAMERA_START_FAIL = -1301, ///< 打开摄像头失败,例如在 Windows 或 Mac 设备,摄像头的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 + ERR_CAMERA_NOT_AUTHORIZED = -1314, ///< 摄像头设备未授权,通常在移动设备出现,可能是权限被用户拒绝了 + ERR_CAMERA_SET_PARAM_FAIL = -1315, ///< 摄像头参数设置出错(参数不支持或其它) + ERR_CAMERA_OCCUPY = -1316, ///< 摄像头正在被占用中,可尝试打开其他摄像头 + ERR_MIC_START_FAIL = -1302, ///< 打开麦克风失败,例如在 Windows 或 Mac 设备,麦克风的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 + ERR_MIC_NOT_AUTHORIZED = -1317, ///< 麦克风设备未授权,通常在移动设备出现,可能是权限被用户拒绝了 + ERR_MIC_SET_PARAM_FAIL = -1318, ///< 麦克风设置参数失败 + ERR_MIC_OCCUPY = -1319, ///< 麦克风正在被占用中,例如移动设备正在通话时,打开麦克风会失败 + ERR_MIC_STOP_FAIL = -1320, ///< 停止麦克风失败 + ERR_SPEAKER_START_FAIL = -1321, ///< 打开扬声器失败,例如在 Windows 或 Mac 设备,扬声器的配置程序(驱动程序)异常,禁用后重新启用设备,或者重启机器,或者更新配置程序 + ERR_SPEAKER_SET_PARAM_FAIL = -1322, ///< 扬声器设置参数失败 + ERR_SPEAKER_STOP_FAIL = -1323, ///< 停止扬声器失败 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 系统声音采集相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##onSystemAudioLoopbackError() 通知 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_AUDIO_PLUGIN_START_FAIL = -1330, ///< 开启系统声音录制失败,例如音频驱动插件不可用 + ERR_AUDIO_PLUGIN_INSTALL_NOT_AUTHORIZED = -1331, ///< 安装音频驱动插件未授权 + ERR_AUDIO_PLUGIN_INSTALL_FAILED = -1332, ///< 安装音频驱动插件失败 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 屏幕分享相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_SCREEN_CAPTURE_START_FAIL = -1308, ///< 开始录屏失败,如果在移动设备出现,可能是权限被用户拒绝了,如果在 Windows 或 Mac 系统的设备出现,请检查录屏接口的参数是否符合要求 + ERR_SCREEN_CAPTURE_UNSURPORT = -1309, ///< 录屏失败,在 Android 平台,需要5.0以上的系统,在 iOS 平台,需要11.0以上的系统 + ERR_SERVER_CENTER_NO_PRIVILEDGE_PUSH_SUB_VIDEO = -102015, ///< 没有权限上行辅路 + ERR_SERVER_CENTER_ANOTHER_USER_PUSH_SUB_VIDEO = -102016, ///< 其他用户正在上行辅路 + ERR_SCREEN_CAPTURE_STOPPED = -7001, ///< 录屏被系统中止 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 编解码相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_VIDEO_ENCODE_FAIL = -1303, ///< 视频帧编码失败,例如 iOS 设备切换到其他应用时,硬编码器可能被系统释放,再切换回来时,硬编码器重启前,可能会抛出 + ERR_UNSUPPORTED_RESOLUTION = -1305, ///< 不支持的视频分辨率 + ERR_AUDIO_ENCODE_FAIL = -1304, ///< 音频帧编码失败,例如传入自定义音频数据,SDK 无法处理 + ERR_UNSUPPORTED_SAMPLERATE = -1306, ///< 不支持的音频采样率 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 自定义采集相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##OnError() 通知 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_PIXEL_FORMAT_UNSUPPORTED = -1327, ///< 设置的 pixel format 不支持 + ERR_BUFFER_TYPE_UNSUPPORTED = -1328, ///< 设置的 buffer type 不支持 + + ///////////////////////////////////////////////////////////////////////////////// + // + // CDN 绑定和混流相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##onStartPublishing() 和 TRTCCloudDelegate##onSetMixTranscodingConfig 通知。 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_PUBLISH_CDN_STREAM_REQUEST_TIME_OUT = -3321, ///< 旁路转推请求超时 + ERR_CLOUD_MIX_TRANSCODING_REQUEST_TIME_OUT = -3322, ///< 云端混流请求超时 + ERR_PUBLISH_CDN_STREAM_SERVER_FAILED = -3323, ///< 旁路转推回包异常 + ERR_CLOUD_MIX_TRANSCODING_SERVER_FAILED = -3324, ///< 云端混流回包异常 + ERR_ROOM_REQUEST_START_PUBLISHING_TIMEOUT = -3333, ///< 开始向腾讯云的直播 CDN 推流信令超时 + ERR_ROOM_REQUEST_START_PUBLISHING_ERROR = -3334, ///< 开始向腾讯云的直播 CDN 推流信令异常 + ERR_ROOM_REQUEST_STOP_PUBLISHING_TIMEOUT = -3335, ///< 停止向腾讯云的直播 CDN 推流信令超时 + ERR_ROOM_REQUEST_STOP_PUBLISHING_ERROR = -3336, ///< 停止向腾讯云的直播 CDN 推流信令异常 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 跨房连麦(ConnectOtherRoom)相关错误码 + // NOTE: 通过回调函数 TRTCCloudDelegate##onConnectOtherRoom() 通知。 + // + ///////////////////////////////////////////////////////////////////////////////// + ERR_ROOM_REQUEST_CONN_ROOM_TIMEOUT = -3326, ///< 请求连麦超时 + ERR_ROOM_REQUEST_DISCONN_ROOM_TIMEOUT = -3327, ///< 请求退出连麦超时 + ERR_ROOM_REQUEST_CONN_ROOM_INVALID_PARAM = -3328, ///< 无效参数 + ERR_CONNECT_OTHER_ROOM_AS_AUDIENCE = -3330, ///< 当前是观众角色,不能请求或断开跨房连麦,需要先 switchRole() 到主播 + ERR_SERVER_CENTER_CONN_ROOM_NOT_SUPPORT = -102031, ///< 不支持跨房间连麦 + ERR_SERVER_CENTER_CONN_ROOM_REACH_MAX_NUM = -102032, ///< 达到跨房间连麦上限 + ERR_SERVER_CENTER_CONN_ROOM_REACH_MAX_RETRY_TIMES = -102033, ///< 跨房间连麦重试次数耗尽 + ERR_SERVER_CENTER_CONN_ROOM_REQ_TIMEOUT = -102034, ///< 跨房间连麦请求超时 + ERR_SERVER_CENTER_CONN_ROOM_REQ = -102035, ///< 跨房间连麦请求格式错误 + ERR_SERVER_CENTER_CONN_ROOM_NO_SIG = -102036, ///< 跨房间连麦无签名 + ERR_SERVER_CENTER_CONN_ROOM_DECRYPT_SIG = -102037, ///< 跨房间连麦签名解密失败 + ERR_SERVER_CENTER_CONN_ROOM_NO_KEY = -102038, ///< 未找到跨房间连麦签名解密密钥 + ERR_SERVER_CENTER_CONN_ROOM_PARSE_SIG = -102039, ///< 跨房间连麦签名解析错误 + ERR_SERVER_CENTER_CONN_ROOM_INVALID_SIG_TIME = -102040, ///< 跨房间连麦签名时间戳错误 + ERR_SERVER_CENTER_CONN_ROOM_SIG_GROUPID = -102041, ///< 跨房间连麦签名不匹配 + ERR_SERVER_CENTER_CONN_ROOM_NOT_CONNED = -102042, ///< 本房间无连麦 + ERR_SERVER_CENTER_CONN_ROOM_USER_NOT_CONNED = -102043, ///< 本用户未发起连麦 + ERR_SERVER_CENTER_CONN_ROOM_FAILED = -102044, ///< 跨房间连麦失败 + ERR_SERVER_CENTER_CONN_ROOM_CANCEL_FAILED = -102045, ///< 取消跨房间连麦失败 + ERR_SERVER_CENTER_CONN_ROOM_CONNED_ROOM_NOT_EXIST = -102046, ///< 被连麦房间不存在 + ERR_SERVER_CENTER_CONN_ROOM_CONNED_REACH_MAX_ROOM = -102047, ///< 被连麦房间达到连麦上限 + ERR_SERVER_CENTER_CONN_ROOM_CONNED_USER_NOT_EXIST = -102048, ///< 被连麦用户不存在 + ERR_SERVER_CENTER_CONN_ROOM_CONNED_USER_DELETED = -102049, ///< 被连麦用户已被删除 + ERR_SERVER_CENTER_CONN_ROOM_CONNED_USER_FULL = -102050, ///< 被连麦用户达到资源上限 + ERR_SERVER_CENTER_CONN_ROOM_INVALID_SEQ = -102051, ///< 连麦请求序号错乱 + + ///////////////////////////////////////////////////////////////////////////////// + // + // 客户无需关心的内部错误码 + // + ///////////////////////////////////////////////////////////////////////////////// + + // - Remove From Head + ERR_RTMP_PUSH_NET_DISCONNECT = -1307, ///< 直播,推流出现网络断开,且经过多次重试无法恢复 + ERR_RTMP_PUSH_INVALID_ADDRESS = -1313, ///< 直播,推流地址非法,例如不是 RTMP 协议的地址 + ERR_RTMP_PUSH_NET_ALLADDRESS_FAIL = -1324, ///< 直播,连接推流服务器失败(若支持智能选路,IP 全部失败) + ERR_RTMP_PUSH_NO_NETWORK = -1325, ///< 直播,网络不可用,请确认 Wi-Fi、移动数据或者有线网络是否正常 + ERR_RTMP_PUSH_SERVER_REFUSE = -1326, ///< 直播,服务器拒绝连接请求,可能是该推流地址已经被占用,或者 TXSecret 校验失败,或者是过期了,或者是欠费了 + + ERR_PLAY_LIVE_STREAM_NET_DISCONNECT = -2301, ///< 直播,网络断连,且经多次重连抢救无效,可以放弃治疗,更多重试请自行重启播放 + ERR_GET_RTMP_ACC_URL_FAIL = -2302, ///< 直播,获取加速拉流的地址失败 + ERR_FILE_NOT_FOUND = -2303, ///< 播放的文件不存在 + ERR_HEVC_DECODE_FAIL = -2304, ///< H265 解码失败 + ERR_VOD_DECRYPT_FAIL = -2305, ///< 点播,音视频流解密失败 + ERR_GET_VODFILE_MEDIAINFO_FAIL = -2306, ///< 点播,获取点播文件信息失败 + ERR_PLAY_LIVE_STREAM_SWITCH_FAIL = -2307, ///< 直播,切流失败(切流可以播放不同画面大小的视频) + ERR_PLAY_LIVE_STREAM_SERVER_REFUSE = -2308, ///< 直播,服务器拒绝连接请求 + ERR_RTMP_ACC_FETCH_STREAM_FAIL = -2309, ///< 直播,RTMPACC 低延时拉流失败,且经过多次重试无法恢复 + ERR_HEVC_ENCODE_FAIL = -2310, ///< 265编码失败 + ERR_HEVC_ENCODE_NOT_SUPPORT = -2311, ///< 265编码判断不支持 + ERR_HEVC_SOFTDECODER_START_FAIL = -2312, ///< 265软解启动失败 + + ERR_ROOM_HEARTBEAT_FAIL = -3302, ///< 心跳失败,客户端定时向服务器发送数据包,告诉服务器自己活着,这个错误通常是发包超时 + ERR_ROOM_REQUEST_IP_FAIL = -3303, ///< 拉取接口机服务器地址失败 + ERR_ROOM_CONNECT_FAIL = -3304, ///< 连接接口机服务器失败 + ERR_ROOM_REQUEST_AVSEAT_FAIL = -3305, ///< 请求视频位失败 + ERR_ROOM_REQUEST_TOKEN_HTTPS_TIMEOUT = -3306, ///< 请求 token HTTPS 超时,请检查网络是否正常,或网络防火墙是否放行 HTTPS 访问 official.opensso.tencent-cloud.com:443 + ERR_ROOM_REQUEST_VIDEO_FLAG_TIMEOUT = -3309, ///< 请求视频位超时 + ERR_ROOM_REQUEST_VIDEO_DATA_ROOM_TIMEOUT = -3310, ///< 请求视频数据超时 + ERR_ROOM_REQUEST_CHANGE_ABILITY_TIMEOUT = -3311, ///< 请求修改视频能力项超时 + ERR_ROOM_REQUEST_STATUS_REPORT_TIMEOUT = -3312, ///< 请求状态上报超时 + ERR_ROOM_REQUEST_CLOSE_VIDEO_TIMEOUT = -3313, ///< 请求关闭视频超时 + ERR_ROOM_REQUEST_SET_RECEIVE_TIMEOUT = -3314, ///< 请求接收视频项超时 + ERR_ROOM_REQUEST_TOKEN_INVALID_PARAMETER = -3315, ///< 请求 token 无效参数,请检查 TRTCParams.userSig 是否填写正确 + ERR_ROOM_REQUEST_EXIT_ROOM_WHEN_ENTERING_ROOM = -3341, ///< 进房尚未成功时,收到了退房请求 + + ERR_ROOM_REQUEST_AES_TOKEN_RETURN_ERROR = -3329, ///< 请求 AES TOKEN 时,server 返回的内容是空的 + ERR_ACCIP_LIST_EMPTY = -3331, ///< 请求接口机 IP 返回的列表为空的 + ERR_ROOM_REQUEST_SEND_JSON_CMD_TIMEOUT = -3332, ///< 请求发送Json 信令超时 + + // Info 服务器(查询接口机 IP), 服务器错误码,数值范围[-100000, -110000] + ERR_SERVER_INFO_UNPACKING_ERROR = -100000, ///< server 解包错误,可能请求数据被篡改 + ERR_SERVER_INFO_TOKEN_ERROR = -100001, ///< TOKEN 错误 + ERR_SERVER_INFO_ALLOCATE_ACCESS_FAILED = -100002, ///< 分配接口机错误 + ERR_SERVER_INFO_GENERATE_SIGN_FAILED = -100003, ///< 生成签名错误 + ERR_SERVER_INFO_TOKEN_TIMEOUT = -100004, ///< HTTPS token 超时 + ERR_SERVER_INFO_INVALID_COMMAND = -100005, ///< 无效的命令字 + ERR_SERVER_INFO_GENERATE_KEN_ERROR = -100007, ///< HTTPS 请求时,生成加密 key 错误 + ERR_SERVER_INFO_GENERATE_TOKEN_ERROR = -100008, ///< HTTPS 请求时,生成 token 错误 + ERR_SERVER_INFO_DATABASE = -100009, ///< 数据库查询失败(房间相关存储信息) + ERR_SERVER_INFO_BAD_ROOMID = -100010, ///< 房间号错误 + ERR_SERVER_INFO_BAD_SCENE_OR_ROLE = -100011, ///< 场景或角色错误 + ERR_SERVER_INFO_ROOMID_EXCHANGE_FAILED = -100012, ///< 房间号转换出错 + ERR_SERVER_INFO_STRGROUP_HAS_INVALID_CHARS = -100014, ///< 房间号非法 + ERR_SERVER_INFO_LACK_SDKAPPID = -100015, ///< 非法SDKAppid + ERR_SERVER_INFO_INVALID = -100016, ///< 无效请求, 分配接口机失败 + ERR_SERVER_INFO_ECDH_GET_KEY = -100017, ///< 生成公钥失败 + + // Access 接口机 + ERR_SERVER_ACC_TOKEN_TIMEOUT = -101000, ///< token 过期 + ERR_SERVER_ACC_SIGN_ERROR = -101001, ///< 签名错误 + ERR_SERVER_ACC_SIGN_TIMEOUT = -101002, ///< 签名超时 + ERR_SERVER_ACC_ROOM_NOT_EXIST = -101003, ///< 房间不存在 + ERR_SERVER_ACC_ROOMID = -101004, ///< 后台房间标识 roomId 错误 + ERR_SERVER_ACC_LOCATIONID = -101005, ///< 后台用户位置标识 locationId 错误 + ERR_SERVER_ACC_TOKEN_EORROR = -101006, ///< token里面的tinyid和进房信令tinyid不同 或是 进房信令没有token + + // Center 服务器(信令和流控处理等任务) + ERR_SERVER_CENTER_SYSTEM_ERROR = -102000, ///< 后台错误 + + ERR_SERVER_CENTER_INVALID_ROOMID = -102001, ///< 无效的房间 Id + ERR_SERVER_CENTER_CREATE_ROOM_FAILED = -102002, ///< 创建房间失败 + ERR_SERVER_CENTER_SIGN_ERROR = -102003, ///< 签名错误 + ERR_SERVER_CENTER_SIGN_TIMEOUT = -102004, ///< 签名过期 + ERR_SERVER_CENTER_ROOM_NOT_EXIST = -102005, ///< 房间不存在 + ERR_SERVER_CENTER_ADD_USER_FAILED = -102006, ///< 房间添加用户失败 + ERR_SERVER_CENTER_FIND_USER_FAILED = -102007, ///< 查找用户失败 + ERR_SERVER_CENTER_SWITCH_TERMINATION_FREQUENTLY = -102008, ///< 频繁切换终端 + ERR_SERVER_CENTER_LOCATION_NOT_EXIST = -102009, ///< locationid 错误 + ERR_SERVER_CENTER_NO_PRIVILEDGE_CREATE_ROOM = -102010, ///< 没有权限创建房间 + ERR_SERVER_CENTER_NO_PRIVILEDGE_ENTER_ROOM = -102011, ///< 没有权限进入房间 + ERR_SERVER_CENTER_INVALID_PARAMETER_SUB_VIDEO = -102012, ///< 辅路抢视频位、申请辅路请求类型参数错误 + ERR_SERVER_CENTER_NO_PRIVILEDGE_PUSH_VIDEO = -102013, ///< 没有权限上视频 + ERR_SERVER_CENTER_ROUTE_TABLE_ERROR = -102014, ///< 没有空闲路由表 + ERR_SERVER_CENTER_NOT_PUSH_SUB_VIDEO = -102017, ///< 当前用户没有上行辅路 + ERR_SERVER_CENTER_USER_WAS_DELETED = -102018, ///< 用户被删除状态 + ERR_SERVER_CENTER_NO_PRIVILEDGE_REQUEST_VIDEO = -102019, ///< 没有权限请求视频 + ERR_SERVER_CENTER_INVALID_PARAMETER = -102023, ///< 进房参数 bussInfo 错误 + ERR_SERVER_CENTER_I_FRAME_UNKNOW_TYPE = -102024, ///< 请求 I 帧未知 opType + ERR_SERVER_CENTER_I_FRAME_INVALID_PACKET = -102025, ///< 请求 I 帧包格式错误 + ERR_SERVER_CENTER_I_FRAME_DEST_USER_NOT_EXIST = -102026, ///< 请求 I 帧目标用户不存在 + ERR_SERVER_CENTER_I_FRAME_ROOM_TOO_BIG = -102027, ///< 请求 I 帧房间用户太多 + ERR_SERVER_CENTER_I_FRAME_RPS_INVALID_PARAMETER = -102028, ///< 请求 I 帧参数错误 + ERR_SERVER_CENTER_INVALID_ROOM_ID = -102029, ///< 房间号非法 + ERR_SERVER_CENTER_ROOM_ID_TOO_LONG = -102030, ///< 房间号超过限制 + ERR_SERVER_CENTER_ROOM_FULL = -102052, ///< 房间满员 + ERR_SERVER_CENTER_DECODE_JSON_FAIL = -102053, ///< JSON 串解析失败 + ERR_SERVER_CENTER_UNKNOWN_SUB_CMD = -102054, ///< 未定义命令字 + ERR_SERVER_CENTER_INVALID_ROLE = -102055, ///< 未定义角色 + ERR_SERVER_CENTER_REACH_PROXY_MAX = -102056, ///< 代理机超出限制 + ERR_SERVER_CENTER_RECORDID_STORE = -102057, ///< 无法保存用户自定义 recordId + ERR_SERVER_CENTER_PB_SERIALIZE = -102058, ///< Protobuf 序列化错误 + + ERR_SERVER_SSO_SIG_EXPIRED = -70001, ///< sig 过期,请尝试重新生成。如果是刚生成,就过期,请检查有效期填写的是否过小,或者填的 0 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_1 = -70003, ///< sig 校验失败,请确认下 sig 内容是否被截断,如缓冲区长度不够导致的内容截断 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_2 = -70004, ///< sig 校验失败,请确认下 sig 内容是否被截断,如缓冲区长度不够导致的内容截断 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_3 = -70005, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_4 = -70006, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_5 = -70007, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_6 = -70008, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_7 = -70009, ///< 用业务公钥验证 sig 失败,请确认生成的 usersig 使用的私钥和 sdkAppId 是否对应 + ERR_SERVER_SSO_SIG_VERIFICATION_FAILED_8 = -70010, ///< sig 校验失败,可用工具自行验证生成的 sig 是否正确 + ERR_SERVER_SSO_SIG_VERIFICATION_ID_NOT_MATCH = -70013, ///< sig 中 identifier 与请求时的 identifier 不匹配,请检查登录时填写的 identifier 与 sig 中的是否一致 + ERR_SERVER_SSO_APPID_NOT_MATCH = -70014, ///< sig 中 sdkAppId 与请求时的 sdkAppId 不匹配,请检查登录时填写的 sdkAppId 与 sig 中的是否一致 + ERR_SERVER_SSO_VERIFICATION_EXPIRED = -70017, ///< 内部第三方票据验证超时,请重试,如多次重试不成功,请@TLS 帐号支持,QQ 3268519604 + ERR_SERVER_SSO_VERIFICATION_FAILED = -70018, ///< 内部第三方票据验证超时,请重试,如多次重试不成功,请@TLS 帐号支持,QQ 3268519604 + + ERR_SERVER_SSO_APPID_NOT_FOUND = -70020, ///< sdkAppId 未找到,请确认是否已经在腾讯云上配置 + ERR_SERVER_SSO_ACCOUNT_IN_BLACKLIST = -70051, ///< 帐号已被拉入黑名单,请联系 TLS 帐号支持 QQ 3268519604 + ERR_SERVER_SSO_SIG_INVALID = -70052, ///< usersig 已经失效,请重新生成,再次尝试 + ERR_SERVER_SSO_LIMITED_BY_SECURITY = -70114, ///< 安全原因被限制 + ERR_SERVER_SSO_INVALID_LOGIN_STATUS = -70221, ///< 登录状态无效,请使用 usersig 重新鉴权 + ERR_SERVER_SSO_APPID_ERROR = -70252, ///< sdkAppId 填写错误 + ERR_SERVER_SSO_TICKET_VERIFICATION_FAILED = -70346, ///< 票据校验失败,请检查各项参数是否正确 + ERR_SERVER_SSO_TICKET_EXPIRED = -70347, ///< 票据因过期原因校验失败 + ERR_SERVER_SSO_ACCOUNT_EXCEED_PURCHASES = -70398, ///< 创建账号数量超过已购买预付费数量限制 + ERR_SERVER_SSO_INTERNAL_ERROR = -70500, ///< 服务器内部错误,请重试 + + //秒级监控上报错误码 + ERR_REQUEST_QUERY_CONFIG_TIMEOUT = -4001, ///< 请求通用配置超时 + ERR_CUSTOM_STREAM_INVALID = -4002, ///< 自定义流id错误 + ERR_USER_DEFINE_RECORD_ID_INVALID = -4003, ///< userDefineRecordId错误 + ERR_MIX_PARAM_INVALID = -4004, ///< 混流参数校验失败 + ERR_REQUEST_ACC_BY_HOST_IP = -4005, ///< 通过域名进行0x1请求 + // - /Remove From Head +} TXLiteAVError; + +///////////////////////////////////////////////////////////////////////////////// +// +// 警告码 +// +//> 不需要特别关注,但您可以根据其中某些感兴趣的警告码,对当前用户进行相应的提示 +// +///////////////////////////////////////////////////////////////////////////////// + +typedef enum TXLiteAVWarning +{ + WARNING_HW_ENCODER_START_FAIL = 1103, ///< 硬编码启动出现问题,自动切换到软编码 + WARNING_CURRENT_ENCODE_TYPE_CHANGED = 1104, ///< 当前编码格式, 通过key 为type获取,值为1时是265编码,值为0时是264编码 + WARNING_VIDEO_ENCODER_SW_TO_HW = 1107, ///< 当前 CPU 使用率太高,无法满足软件编码需求,自动切换到硬件编码 + WARNING_INSUFFICIENT_CAPTURE_FPS = 1108, ///< 摄像头采集帧率不足,部分自带美颜算法的 Android 手机上会出现 + WARNING_SW_ENCODER_START_FAIL = 1109, ///< 软编码启动失败 + WARNING_REDUCE_CAPTURE_RESOLUTION = 1110, ///< 摄像头采集分辨率被降低,以满足当前帧率和性能最优解。 + WARNING_CAMERA_DEVICE_EMPTY = 1111, ///< 没有检测到可用的摄像头设备 + WARNING_CAMERA_NOT_AUTHORIZED = 1112, ///< 用户未授权当前应用使用摄像头 + WARNING_MICROPHONE_DEVICE_EMPTY = 1201, ///< 没有检测到可用的麦克风设备 + WARNING_SPEAKER_DEVICE_EMPTY = 1202, ///< 没有检测到可用的扬声器设备 + WARNING_MICROPHONE_NOT_AUTHORIZED = 1203, ///< 用户未授权当前应用使用麦克风 + WARNING_MICROPHONE_DEVICE_ABNORMAL = 1204, ///< 音频采集设备不可用(例如被占用或者PC判定无效设备) + WARNING_SPEAKER_DEVICE_ABNORMAL = 1205, ///< 音频播放设备不可用(例如被占用或者PC判定无效设备) + WARNING_SCREEN_CAPTURE_NOT_AUTHORIZED = 1206, ///< 用户未授权当前应用使用屏幕录制 + WARNING_VIDEO_FRAME_DECODE_FAIL = 2101, ///< 当前视频帧解码失败 + WARNING_AUDIO_FRAME_DECODE_FAIL = 2102, ///< 当前音频帧解码失败 + WARNING_VIDEO_PLAY_LAG = 2105, ///< 当前视频播放出现卡顿 + WARNING_HW_DECODER_START_FAIL = 2106, ///< 硬解启动失败,采用软解码 + WARNING_VIDEO_DECODER_HW_TO_SW = 2108, ///< 当前流硬解第一个 I 帧失败,SDK 自动切软解 + WARNING_SW_DECODER_START_FAIL = 2109, ///< 软解码器启动失败 + WARNING_VIDEO_RENDER_FAIL = 2110, ///< 视频渲染失败 + WARNING_START_CAPTURE_IGNORED = 4000, ///< 已经在采集,启动采集被忽略 + WARNING_AUDIO_RECORDING_WRITE_FAIL = 7001, ///< 音频录制写入文件失败 + WARNING_ROOM_DISCONNECT = 5101, ///< 网络断开连接 + WARNING_IGNORE_UPSTREAM_FOR_AUDIENCE = 6001, ///< 当前是观众角色,忽略上行音视频数据 + WARNING_MICROPHONE_HOWLING_DETECTED = 7002, ///< 录制音频时监测到啸叫。请调节两台客户端之间的距离或降低播放音量,检测到啸叫后,5s后会再次进行重新检测 + + // - Remove From Head + WARNING_NET_BUSY = 1101, ///< 网络状况不佳:上行带宽太小,上传数据受阻 + WARNING_RTMP_SERVER_RECONNECT = 1102, ///< 直播,网络断连, 已启动自动重连(自动重连连续失败超过三次会放弃) + WARNING_LIVE_STREAM_SERVER_RECONNECT = 2103, ///< 直播,网络断连, 已启动自动重连(自动重连连续失败超过三次会放弃) + WARNING_RECV_DATA_LAG = 2104, ///< 网络来包不稳:可能是下行带宽不足,或由于主播端出流不均匀 + WARNING_RTMP_DNS_FAIL = 3001, ///< 直播,DNS 解析失败 + WARNING_RTMP_SEVER_CONN_FAIL = 3002, ///< 直播,服务器连接失败 + WARNING_RTMP_SHAKE_FAIL = 3003, ///< 直播,与 RTMP 服务器握手失败 + WARNING_RTMP_SERVER_BREAK_CONNECT = 3004, ///< 直播,服务器主动断开 + WARNING_RTMP_READ_WRITE_FAIL = 3005, ///< 直播,RTMP 读/写失败,将会断开连接 + WARNING_RTMP_WRITE_FAIL = 3006, ///< 直播,RTMP 写失败(SDK 内部错误码,不会对外抛出) + WARNING_RTMP_READ_FAIL = 3007, ///< 直播,RTMP 读失败(SDK 内部错误码,不会对外抛出) + WARNING_RTMP_NO_DATA = 3008, ///< 直播,超过30s 没有数据发送,主动断开连接 + WARNING_PLAY_LIVE_STREAM_INFO_CONNECT_FAIL = 3009, ///< 直播,connect 服务器调用失败(SDK 内部错误码,不会对外抛出) + WARNING_NO_STEAM_SOURCE_FAIL = 3010, ///< 直播,连接失败,该流地址无视频(SDK 内部错误码,不会对外抛出) + WARNING_ROOM_RECONNECT = 5102, ///< 网络断连,已启动自动重连 + WARNING_ROOM_NET_BUSY = 5103, ///< 网络状况不佳:上行带宽太小,上传数据受阻 + // - /Remove From Head +} TXLiteAVWarning; + +// - Remove From Head +///////////////////////////////////////////////////////////////////////////////// +// +// (三)事件列表 +// +///////////////////////////////////////////////////////////////////////////////// + +typedef enum TXLiteAVEvent +{ + EVT_RTMP_PUSH_CONNECT_SUCC = 1001, ///< 直播,已经连接 RTMP 推流服务器 + EVT_RTMP_PUSH_BEGIN = 1002, ///< 直播,已经与 RTMP 服务器握手完毕,开始推流 + EVT_CAMERA_START_SUCC = 1003, ///< 打开摄像头成功 + EVT_SCREEN_CAPTURE_SUCC = 1004, ///< 录屏启动成功 + EVT_UP_CHANGE_RESOLUTION = 1005, ///< 上行动态调整分辨率 + EVT_UP_CHANGE_BITRATE = 1006, ///< 码率动态调整 + EVT_FIRST_FRAME_AVAILABLE = 1007, ///< 首帧画面采集完成 + EVT_START_VIDEO_ENCODER = 1008, ///< 编码器启动成功 + EVT_SNAPSHOT_COMPLETE = 1022, ///< 一帧截图完成 + EVT_CAMERA_REMOVED = 1023, ///< 摄像头设备已被移出(Windows 和 Mac 版 SDK 使用) + EVT_CAMERA_AVAILABLE = 1024, ///< 摄像头设备重新可用(Windows 和 Mac 版 SDK 使用) + EVT_CAMERA_CLOSE = 1025, ///< 关闭摄像头完成(Windows 和 Mac 版 SDK 使用) + EVT_RTMP_PUSH_PUBLISH_START = 1026, ///< 直播,与 RTMP 服务器连接后,收到 NetStream.Publish.Start 消息,表明流发布成功(SDK 内部事件,不会对外抛出) + EVT_HW_ENCODER_START_SUCC = 1027, ///< 硬编码器启动成功 + EVT_SW_ENCODER_START_SUCC = 1028, ///< 软编码器启动成功 + EVT_LOCAL_RECORD_RESULT = 1029, ///< 本地录制结果 + EVT_LOCAL_RECORD_PROGRESS = 1030, ///< 本地录制状态通知 + + EVT_PLAY_LIVE_STREAM_CONNECT_SUCC = 2001, ///< 直播,已经连接 RTMP 拉流服务器 + EVT_PLAY_LIVE_STREAM_BEGIN = 2002, ///< 直播,已经与 RTMP 服务器握手完毕,开始拉流 + EVT_RENDER_FIRST_I_FRAME = 2003, ///< 渲染首个视频数据包(IDR) + EVT_VIDEO_PLAY_BEGIN = 2004, ///< 视频播放开始 + EVT_VIDEO_PLAY_PROGRESS = 2005, ///< 视频播放进度 + EVT_VIDEO_PLAY_END = 2006, ///< 视频播放结束 + EVT_VIDEO_PLAY_LOADING = 2007, ///< 视频播放 loading + EVT_START_VIDEO_DECODER = 2008, ///< 解码器启动 + EVT_DOWN_CHANGE_RESOLUTION = 2009, ///< 下行视频分辨率改变 + EVT_GET_VODFILE_MEDIAINFO_SUCC = 2010, ///< 点播,获取点播文件信息成功 + EVT_VIDEO_CHANGE_ROTATION = 2011, ///< 视频旋转角度发生改变 + EVT_PLAY_GET_MESSAGE = 2012, ///< 消息事件 + EVT_VOD_PLAY_PREPARED = 2013, ///< 点播,视频加载完毕 + EVT_VOD_PLAY_LOADING_END = 2014, ///< 点播,loading 结束 + EVT_PLAY_LIVE_STREAM_SWITCH_SUCC = 2015, ///< 直播,切流成功(切流可以播放不同画面大小的视频) + EVT_VOD_PLAY_TCP_CONNECT_SUCC = 2016, ///< 点播,TCP 连接成功(SDK 内部事件,不会对外抛出) + EVT_VOD_PLAY_FIRST_VIDEO_PACKET = 2017, ///< 点播,收到首帧数据(SDK 内部事件,不会对外抛出) + EVT_VOD_PLAY_DNS_RESOLVED = 2018, ///< 点播,DNS 解析完成(SDK 内部事件,不会对外抛出) + EVT_VOD_PLAY_SEEK_COMPLETE = 2019, ///< 点播,视频播放 Seek 完成(SDK 内部事件,不会对外抛出) + EVT_VIDEO_DECODER_CACHE_TOO_MANY_FRAMES = 2020, ///< 视频解码器缓存帧数过多,超过40帧(SDK 内部事件,不会对外抛出) + EVT_HW_DECODER_START_SUCC = 2021, ///< 硬解码器启动成功(SDK 内部事件,不会对外抛出) + EVT_SW_DECODER_START_SUCC = 2022, ///< 软解码器启动成功(SDK 内部事件,不会对外抛出) + EVT_AUDIO_JITTER_STATE_FIRST_LOADING = 2023, ///< 音频首次加载(SDK 内部事件,不会对外抛出) + EVT_AUDIO_JITTER_STATE_LOADING = 2024, ///< 音频正在加载(SDK 内部事件,不会对外抛出) + EVT_AUDIO_JITTER_STATE_PLAYING = 2025, ///< 音频正在播放(SDK 内部事件,不会对外抛出) + EVT_AUDIO_JITTER_STATE_FIRST_PLAY = 2026, ///< 音频首次播放(SDK 内部事件,不会对外抛出) + EVT_MIC_START_SUCC = 2027, ///< 麦克风启动成功 + EVT_PLAY_GET_METADATA = 2028, ///< 视频流MetaData事件 + EVT_MIC_RELEASE_SUCC = 2029, ///< 释放麦克风占用 + EVT_AUDIO_DEVICE_ROUTE_CHANGED = 2030, ///< 音频设备的route发生改变,即当前的输入输出设备发生改变,比如耳机被拔出 + EVT_PLAY_GET_FLVSESSIONKEY = 2031, ///< TXLivePlayer 接收到http响应头中的 flvSessionKey 信息 + EVT_AUDIO_SESSION_INTERRUPT = 2032, ///< Audio Session Interrupt事件 + + + EVT_ROOM_ENTER = 1018, ///< 进入房间成功 + EVT_ROOM_EXIT = 1019, ///< 退出房间 + EVT_ROOM_USERLIST = 1020, ///< 下发房间成员列表(不包括自己) + EVT_ROOM_NEED_REENTER = 1021, ///< WiFi 切换到4G 会触发断线重连,此时需要重新进入房间(拉取最优的服务器地址) + EVT_ROOM_ENTER_FAILED = 1022, ///< 自己进入房间失败 + EVT_ROOM_USER_ENTER = 1031, ///< 进房通知 + EVT_ROOM_USER_EXIT = 1032, ///< 退房通知 + EVT_ROOM_USER_VIDEO_STATE = 1033, ///< 视频状态位变化通知 + EVT_ROOM_USER_AUDIO_STATE = 1034, ///< 音频状态位变化通知 + + EVT_ROOM_REQUEST_IP_SUCC = 8001, ///< 拉取接口机服务器地址成功 + EVT_ROOM_CONNECT_SUCC = 8002, ///< 连接接口机服务器成功 + EVT_ROOM_REQUEST_AVSEAT_SUCC = 8003, ///< 请求视频位成功 +} TXLiteAVEvent; +// - /Remove From Head + +#endif /* __TXLITEAVCODE_H__ */ diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Info.plist b/src/ios/TXLiteAVSDK_TRTC.framework/Info.plist new file mode 100644 index 0000000..a1fb51f Binary files /dev/null and b/src/ios/TXLiteAVSDK_TRTC.framework/Info.plist differ diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/Modules/module.modulemap b/src/ios/TXLiteAVSDK_TRTC.framework/Modules/module.modulemap new file mode 100644 index 0000000..c5fe93e --- /dev/null +++ b/src/ios/TXLiteAVSDK_TRTC.framework/Modules/module.modulemap @@ -0,0 +1,13 @@ +framework module TXLiteAVSDK_TRTC { + umbrella header "TXLiteAVSDK.h" + exclude header "TXLiteAVEncodedDataProcessingListener.h" + exclude header "TXLiteAVBuffer.h" + exclude header "cpp_interface/ITRTCCloud.h" + exclude header "cpp_interface/ITRTCStatistics.h" + exclude header "cpp_interface/ITXAudioEffectManager.h" + exclude header "cpp_interface/ITXDeviceManager.h" + exclude header "cpp_interface/TRTCCloudCallback.h" + exclude header "cpp_interface/TRTCTypeDef.h" + exclude header "cpp_interface/TXLiteAVCode.h" + export * +} diff --git a/src/ios/TXLiteAVSDK_TRTC.framework/TXLiteAVSDK_TRTC b/src/ios/TXLiteAVSDK_TRTC.framework/TXLiteAVSDK_TRTC new file mode 100644 index 0000000..c53ca3d Binary files /dev/null and b/src/ios/TXLiteAVSDK_TRTC.framework/TXLiteAVSDK_TRTC differ diff --git a/src/ios/Toast/UIView+Toast.h b/src/ios/Toast/UIView+Toast.h new file mode 100644 index 0000000..e2b258b --- /dev/null +++ b/src/ios/Toast/UIView+Toast.h @@ -0,0 +1,446 @@ +// +// UIView+Toast.h +// Toast +// +// Copyright (c) 2011-2017 Charles Scalesse. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +#import + +extern const NSString * CSToastPositionTop; +extern const NSString * CSToastPositionCenter; +extern const NSString * CSToastPositionBottom; + +@class CSToastStyle; + +/** + Toast is an Objective-C category that adds toast notifications to the UIView + object class. It is intended to be simple, lightweight, and easy to use. Most + toast notifications can be triggered with a single line of code. + + The `makeToast:` methods create a new view and then display it as toast. + + The `showToast:` methods display any view as toast. + + */ +@interface UIView (Toast) + +/** + Creates and presents a new toast view with a message and displays it with the + default duration and position. Styled using the shared style. + + @param message The message to be displayed + */ +- (void)makeToast:(NSString *)message; + +/** + Creates and presents a new toast view with a message. Duration and position + can be set explicitly. Styled using the shared style. + + @param message The message to be displayed + @param duration The toast duration + @param position The toast's center point. Can be one of the predefined CSToastPosition + constants or a `CGPoint` wrapped in an `NSValue` object. + */ +- (void)makeToast:(NSString *)message + duration:(NSTimeInterval)duration + position:(id)position; + +/** + Creates and presents a new toast view with a message. Duration, position, and + style can be set explicitly. + + @param message The message to be displayed + @param duration The toast duration + @param position The toast's center point. Can be one of the predefined CSToastPosition + constants or a `CGPoint` wrapped in an `NSValue` object. + @param style The style. The shared style will be used when nil + */ +- (void)makeToast:(NSString *)message + duration:(NSTimeInterval)duration + position:(id)position + style:(CSToastStyle *)style; + +/** + Creates and presents a new toast view with a message, title, and image. Duration, + position, and style can be set explicitly. The completion block executes when the + toast view completes. `didTap` will be `YES` if the toast view was dismissed from + a tap. + + @param message The message to be displayed + @param duration The toast duration + @param position The toast's center point. Can be one of the predefined CSToastPosition + constants or a `CGPoint` wrapped in an `NSValue` object. + @param title The title + @param image The image + @param style The style. The shared style will be used when nil + @param completion The completion block, executed after the toast view disappears. + didTap will be `YES` if the toast view was dismissed from a tap. + */ +- (void)makeToast:(NSString *)message + duration:(NSTimeInterval)duration + position:(id)position + title:(NSString *)title + image:(UIImage *)image + style:(CSToastStyle *)style + completion:(void(^)(BOOL didTap))completion; + +/** + Creates a new toast view with any combination of message, title, and image. + The look and feel is configured via the style. Unlike the `makeToast:` methods, + this method does not present the toast view automatically. One of the showToast: + methods must be used to present the resulting view. + + @warning if message, title, and image are all nil, this method will return nil. + + @param message The message to be displayed + @param title The title + @param image The image + @param style The style. The shared style will be used when nil + @return The newly created toast view + */ +- (UIView *)toastViewForMessage:(NSString *)message + title:(NSString *)title + image:(UIImage *)image + style:(CSToastStyle *)style; + +/** + Hides the active toast. If there are multiple toasts active in a view, this method + hides the oldest toast (the first of the toasts to have been presented). + + @see `hideAllToasts` to remove all active toasts from a view. + + @warning This method has no effect on activity toasts. Use `hideToastActivity` to + hide activity toasts. + */ +- (void)hideToast; + +/** + Hides an active toast. + + @param toast The active toast view to dismiss. Any toast that is currently being displayed + on the screen is considered active. + + @warning this does not clear a toast view that is currently waiting in the queue. + */ +- (void)hideToast:(UIView *)toast; + +/** + Hides all active toast views and clears the queue. + */ +- (void)hideAllToasts; + +/** + Hides all active toast views, with options to hide activity and clear the queue. + + @param includeActivity If `true`, toast activity will also be hidden. Default is `false`. + @param clearQueue If `true`, removes all toast views from the queue. Default is `true`. + */ +- (void)hideAllToasts:(BOOL)includeActivity clearQueue:(BOOL)clearQueue; + +/** + Removes all toast views from the queue. This has no effect on toast views that are + active. Use `hideAllToasts` to hide the active toasts views and clear the queue. + */ +- (void)clearToastQueue; + +/** + Creates and displays a new toast activity indicator view at a specified position. + + @warning Only one toast activity indicator view can be presented per superview. Subsequent + calls to `makeToastActivity:` will be ignored until hideToastActivity is called. + + @warning `makeToastActivity:` works independently of the showToast: methods. Toast activity + views can be presented and dismissed while toast views are being displayed. `makeToastActivity:` + has no effect on the queueing behavior of the showToast: methods. + + @param position The toast's center point. Can be one of the predefined CSToastPosition + constants or a `CGPoint` wrapped in an `NSValue` object. + */ +- (void)makeToastActivity:(id)position; + +/** + Dismisses the active toast activity indicator view. + */ +- (void)hideToastActivity; + +/** + Displays any view as toast using the default duration and position. + + @param toast The view to be displayed as toast + */ +- (void)showToast:(UIView *)toast; + +/** + Displays any view as toast at a provided position and duration. The completion block + executes when the toast view completes. `didTap` will be `YES` if the toast view was + dismissed from a tap. + + @param toast The view to be displayed as toast + @param duration The notification duration + @param position The toast's center point. Can be one of the predefined CSToastPosition + constants or a `CGPoint` wrapped in an `NSValue` object. + @param completion The completion block, executed after the toast view disappears. + didTap will be `YES` if the toast view was dismissed from a tap. + */ +- (void)showToast:(UIView *)toast + duration:(NSTimeInterval)duration + position:(id)position + completion:(void(^)(BOOL didTap))completion; + +@end + +/** + `CSToastStyle` instances define the look and feel for toast views created via the + `makeToast:` methods as well for toast views created directly with + `toastViewForMessage:title:image:style:`. + + @warning `CSToastStyle` offers relatively simple styling options for the default + toast view. If you require a toast view with more complex UI, it probably makes more + sense to create your own custom UIView subclass and present it with the `showToast:` + methods. + */ +@interface CSToastStyle : NSObject + +/** + The background color. Default is `[UIColor blackColor]` at 80% opacity. + */ +@property (strong, nonatomic) UIColor *backgroundColor; + +/** + The title color. Default is `[UIColor whiteColor]`. + */ +@property (strong, nonatomic) UIColor *titleColor; + +/** + The message color. Default is `[UIColor whiteColor]`. + */ +@property (strong, nonatomic) UIColor *messageColor; + +/** + A percentage value from 0.0 to 1.0, representing the maximum width of the toast + view relative to it's superview. Default is 0.8 (80% of the superview's width). + */ +@property (assign, nonatomic) CGFloat maxWidthPercentage; + +/** + A percentage value from 0.0 to 1.0, representing the maximum height of the toast + view relative to it's superview. Default is 0.8 (80% of the superview's height). + */ +@property (assign, nonatomic) CGFloat maxHeightPercentage; + +/** + The spacing from the horizontal edge of the toast view to the content. When an image + is present, this is also used as the padding between the image and the text. + Default is 10.0. + */ +@property (assign, nonatomic) CGFloat horizontalPadding; + +/** + The spacing from the vertical edge of the toast view to the content. When a title + is present, this is also used as the padding between the title and the message. + Default is 10.0. + */ +@property (assign, nonatomic) CGFloat verticalPadding; + +/** + The corner radius. Default is 10.0. + */ +@property (assign, nonatomic) CGFloat cornerRadius; + +/** + The title font. Default is `[UIFont boldSystemFontOfSize:16.0]`. + */ +@property (strong, nonatomic) UIFont *titleFont; + +/** + The message font. Default is `[UIFont systemFontOfSize:16.0]`. + */ +@property (strong, nonatomic) UIFont *messageFont; + +/** + The title text alignment. Default is `NSTextAlignmentLeft`. + */ +@property (assign, nonatomic) NSTextAlignment titleAlignment; + +/** + The message text alignment. Default is `NSTextAlignmentLeft`. + */ +@property (assign, nonatomic) NSTextAlignment messageAlignment; + +/** + The maximum number of lines for the title. The default is 0 (no limit). + */ +@property (assign, nonatomic) NSInteger titleNumberOfLines; + +/** + The maximum number of lines for the message. The default is 0 (no limit). + */ +@property (assign, nonatomic) NSInteger messageNumberOfLines; + +/** + Enable or disable a shadow on the toast view. Default is `NO`. + */ +@property (assign, nonatomic) BOOL displayShadow; + +/** + The shadow color. Default is `[UIColor blackColor]`. + */ +@property (strong, nonatomic) UIColor *shadowColor; + +/** + A value from 0.0 to 1.0, representing the opacity of the shadow. + Default is 0.8 (80% opacity). + */ +@property (assign, nonatomic) CGFloat shadowOpacity; + +/** + The shadow radius. Default is 6.0. + */ +@property (assign, nonatomic) CGFloat shadowRadius; + +/** + The shadow offset. The default is `CGSizeMake(4.0, 4.0)`. + */ +@property (assign, nonatomic) CGSize shadowOffset; + +/** + The image size. The default is `CGSizeMake(80.0, 80.0)`. + */ +@property (assign, nonatomic) CGSize imageSize; + +/** + The size of the toast activity view when `makeToastActivity:` is called. + Default is `CGSizeMake(100.0, 100.0)`. + */ +@property (assign, nonatomic) CGSize activitySize; + +/** + The fade in/out animation duration. Default is 0.2. + */ +@property (assign, nonatomic) NSTimeInterval fadeDuration; + +/** + Creates a new instance of `CSToastStyle` with all the default values set. + */ +- (instancetype)initWithDefaultStyle NS_DESIGNATED_INITIALIZER; + +/** + @warning Only the designated initializer should be used to create + an instance of `CSToastStyle`. + */ +- (instancetype)init NS_UNAVAILABLE; + +@end + +/** + `CSToastManager` provides general configuration options for all toast + notifications. Backed by a singleton instance. + */ +@interface CSToastManager : NSObject + +/** + Sets the shared style on the singleton. The shared style is used whenever + a `makeToast:` method (or `toastViewForMessage:title:image:style:`) is called + with with a nil style. By default, this is set to `CSToastStyle`'s default + style. + + @param sharedStyle the shared style + */ ++ (void)setSharedStyle:(CSToastStyle *)sharedStyle; + +/** + Gets the shared style from the singlton. By default, this is + `CSToastStyle`'s default style. + + @return the shared style + */ ++ (CSToastStyle *)sharedStyle; + +/** + Enables or disables tap to dismiss on toast views. Default is `YES`. + + @param tapToDismissEnabled YES or NO + */ ++ (void)setTapToDismissEnabled:(BOOL)tapToDismissEnabled; + +/** + Returns `YES` if tap to dismiss is enabled, otherwise `NO`. + Default is `YES`. + + @return BOOL YES or NO + */ ++ (BOOL)isTapToDismissEnabled; + +/** + Enables or disables queueing behavior for toast views. When `YES`, + toast views will appear one after the other. When `NO`, multiple Toast + views will appear at the same time (potentially overlapping depending + on their positions). This has no effect on the toast activity view, + which operates independently of normal toast views. Default is `NO`. + + @param queueEnabled YES or NO + */ ++ (void)setQueueEnabled:(BOOL)queueEnabled; + +/** + Returns `YES` if the queue is enabled, otherwise `NO`. + Default is `NO`. + + @return BOOL + */ ++ (BOOL)isQueueEnabled; + +/** + Sets the default duration. Used for the `makeToast:` and + `showToast:` methods that don't require an explicit duration. + Default is 3.0. + + @param duration The toast duration + */ ++ (void)setDefaultDuration:(NSTimeInterval)duration; + +/** + Returns the default duration. Default is 3.0. + + @return duration The toast duration +*/ ++ (NSTimeInterval)defaultDuration; + +/** + Sets the default position. Used for the `makeToast:` and + `showToast:` methods that don't require an explicit position. + Default is `CSToastPositionBottom`. + + @param position The default center point. Can be one of the predefined + CSToastPosition constants or a `CGPoint` wrapped in an `NSValue` object. + */ ++ (void)setDefaultPosition:(id)position; + +/** + Returns the default toast position. Default is `CSToastPositionBottom`. + + @return position The default center point. Will be one of the predefined + CSToastPosition constants or a `CGPoint` wrapped in an `NSValue` object. + */ ++ (id)defaultPosition; + +@end diff --git a/src/ios/Toast/UIView+Toast.m b/src/ios/Toast/UIView+Toast.m new file mode 100644 index 0000000..4e5131a --- /dev/null +++ b/src/ios/Toast/UIView+Toast.m @@ -0,0 +1,586 @@ +// +// UIView+Toast.m +// Toast +// +// Copyright (c) 2011-2017 Charles Scalesse. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +#import "UIView+Toast.h" +#import +#import + +// Positions +NSString * CSToastPositionTop = @"CSToastPositionTop"; +NSString * CSToastPositionCenter = @"CSToastPositionCenter"; +NSString * CSToastPositionBottom = @"CSToastPositionBottom"; + +// Keys for values associated with toast views +static const NSString * CSToastTimerKey = @"CSToastTimerKey"; +static const NSString * CSToastDurationKey = @"CSToastDurationKey"; +static const NSString * CSToastPositionKey = @"CSToastPositionKey"; +static const NSString * CSToastCompletionKey = @"CSToastCompletionKey"; + +// Keys for values associated with self +static const NSString * CSToastActiveKey = @"CSToastActiveKey"; +static const NSString * CSToastActivityViewKey = @"CSToastActivityViewKey"; +static const NSString * CSToastQueueKey = @"CSToastQueueKey"; + +@interface UIView (ToastPrivate) + +/** + These private methods are being prefixed with "cs_" to reduce the likelihood of non-obvious + naming conflicts with other UIView methods. + + @discussion Should the public API also use the cs_ prefix? Technically it should, but it + results in code that is less legible. The current public method names seem unlikely to cause + conflicts so I think we should favor the cleaner API for now. + */ +- (void)cs_showToast:(UIView *)toast duration:(NSTimeInterval)duration position:(id)position; +- (void)cs_hideToast:(UIView *)toast; +- (void)cs_hideToast:(UIView *)toast fromTap:(BOOL)fromTap; +- (void)cs_toastTimerDidFinish:(NSTimer *)timer; +- (void)cs_handleToastTapped:(UITapGestureRecognizer *)recognizer; +- (CGPoint)cs_centerPointForPosition:(id)position withToast:(UIView *)toast; +- (NSMutableArray *)cs_toastQueue; + +@end + +@implementation UIView (Toast) + +#pragma mark - Make Toast Methods + +- (void)makeToast:(NSString *)message { + [self makeToast:message duration:[CSToastManager defaultDuration] position:[CSToastManager defaultPosition] style:nil]; +} + +- (void)makeToast:(NSString *)message duration:(NSTimeInterval)duration position:(id)position { + [self makeToast:message duration:duration position:position style:nil]; +} + +- (void)makeToast:(NSString *)message duration:(NSTimeInterval)duration position:(id)position style:(CSToastStyle *)style { + UIView *toast = [self toastViewForMessage:message title:nil image:nil style:style]; + [self showToast:toast duration:duration position:position completion:nil]; +} + +- (void)makeToast:(NSString *)message duration:(NSTimeInterval)duration position:(id)position title:(NSString *)title image:(UIImage *)image style:(CSToastStyle *)style completion:(void(^)(BOOL didTap))completion { + UIView *toast = [self toastViewForMessage:message title:title image:image style:style]; + [self showToast:toast duration:duration position:position completion:completion]; +} + +#pragma mark - Show Toast Methods + +- (void)showToast:(UIView *)toast { + [self showToast:toast duration:[CSToastManager defaultDuration] position:[CSToastManager defaultPosition] completion:nil]; +} + +- (void)showToast:(UIView *)toast duration:(NSTimeInterval)duration position:(id)position completion:(void(^)(BOOL didTap))completion { + // sanity + if (toast == nil) return; + + // store the completion block on the toast view + objc_setAssociatedObject(toast, &CSToastCompletionKey, completion, OBJC_ASSOCIATION_RETAIN_NONATOMIC); + + if ([CSToastManager isQueueEnabled] && [self.cs_activeToasts count] > 0) { + // we're about to queue this toast view so we need to store the duration and position as well + objc_setAssociatedObject(toast, &CSToastDurationKey, @(duration), OBJC_ASSOCIATION_RETAIN_NONATOMIC); + objc_setAssociatedObject(toast, &CSToastPositionKey, position, OBJC_ASSOCIATION_RETAIN_NONATOMIC); + + // enqueue + [self.cs_toastQueue addObject:toast]; + } else { + // present + [self cs_showToast:toast duration:duration position:position]; + } +} + +#pragma mark - Hide Toast Methods + +- (void)hideToast { + [self hideToast:[[self cs_activeToasts] firstObject]]; +} + +- (void)hideToast:(UIView *)toast { + // sanity + if (!toast || ![[self cs_activeToasts] containsObject:toast]) return; + + [self cs_hideToast:toast]; +} + +- (void)hideAllToasts { + [self hideAllToasts:NO clearQueue:YES]; +} + +- (void)hideAllToasts:(BOOL)includeActivity clearQueue:(BOOL)clearQueue { + if (clearQueue) { + [self clearToastQueue]; + } + + for (UIView *toast in [self cs_activeToasts]) { + [self hideToast:toast]; + } + + if (includeActivity) { + [self hideToastActivity]; + } +} + +- (void)clearToastQueue { + [[self cs_toastQueue] removeAllObjects]; +} + +#pragma mark - Private Show/Hide Methods + +- (void)cs_showToast:(UIView *)toast duration:(NSTimeInterval)duration position:(id)position { + toast.center = [self cs_centerPointForPosition:position withToast:toast]; + toast.alpha = 0.0; + + if ([CSToastManager isTapToDismissEnabled]) { + UITapGestureRecognizer *recognizer = [[UITapGestureRecognizer alloc] initWithTarget:self action:@selector(cs_handleToastTapped:)]; + [toast addGestureRecognizer:recognizer]; + toast.userInteractionEnabled = YES; + toast.exclusiveTouch = YES; + } + + [[self cs_activeToasts] addObject:toast]; + + [self addSubview:toast]; + + [UIView animateWithDuration:[[CSToastManager sharedStyle] fadeDuration] + delay:0.0 + options:(UIViewAnimationOptionCurveEaseOut | UIViewAnimationOptionAllowUserInteraction) + animations:^{ + toast.alpha = 1.0; + } completion:^(BOOL finished) { + NSTimer *timer = [NSTimer timerWithTimeInterval:duration target:self selector:@selector(cs_toastTimerDidFinish:) userInfo:toast repeats:NO]; + [[NSRunLoop mainRunLoop] addTimer:timer forMode:NSRunLoopCommonModes]; + objc_setAssociatedObject(toast, &CSToastTimerKey, timer, OBJC_ASSOCIATION_RETAIN_NONATOMIC); + }]; +} + +- (void)cs_hideToast:(UIView *)toast { + [self cs_hideToast:toast fromTap:NO]; +} + +- (void)cs_hideToast:(UIView *)toast fromTap:(BOOL)fromTap { + NSTimer *timer = (NSTimer *)objc_getAssociatedObject(toast, &CSToastTimerKey); + [timer invalidate]; + + [UIView animateWithDuration:[[CSToastManager sharedStyle] fadeDuration] + delay:0.0 + options:(UIViewAnimationOptionCurveEaseIn | UIViewAnimationOptionBeginFromCurrentState) + animations:^{ + toast.alpha = 0.0; + } completion:^(BOOL finished) { + [toast removeFromSuperview]; + + // remove + [[self cs_activeToasts] removeObject:toast]; + + // execute the completion block, if necessary + void (^completion)(BOOL didTap) = objc_getAssociatedObject(toast, &CSToastCompletionKey); + if (completion) { + completion(fromTap); + } + + if ([self.cs_toastQueue count] > 0) { + // dequeue + UIView *nextToast = [[self cs_toastQueue] firstObject]; + [[self cs_toastQueue] removeObjectAtIndex:0]; + + // present the next toast + NSTimeInterval duration = [objc_getAssociatedObject(nextToast, &CSToastDurationKey) doubleValue]; + id position = objc_getAssociatedObject(nextToast, &CSToastPositionKey); + [self cs_showToast:nextToast duration:duration position:position]; + } + }]; +} + +#pragma mark - View Construction + +- (UIView *)toastViewForMessage:(NSString *)message title:(NSString *)title image:(UIImage *)image style:(CSToastStyle *)style { + // sanity + if (message == nil && title == nil && image == nil) return nil; + + // default to the shared style + if (style == nil) { + style = [CSToastManager sharedStyle]; + } + + // dynamically build a toast view with any combination of message, title, & image + UILabel *messageLabel = nil; + UILabel *titleLabel = nil; + UIImageView *imageView = nil; + + UIView *wrapperView = [[UIView alloc] init]; + wrapperView.autoresizingMask = (UIViewAutoresizingFlexibleLeftMargin | UIViewAutoresizingFlexibleRightMargin | UIViewAutoresizingFlexibleTopMargin | UIViewAutoresizingFlexibleBottomMargin); + wrapperView.layer.cornerRadius = style.cornerRadius; + + if (style.displayShadow) { + wrapperView.layer.shadowColor = style.shadowColor.CGColor; + wrapperView.layer.shadowOpacity = style.shadowOpacity; + wrapperView.layer.shadowRadius = style.shadowRadius; + wrapperView.layer.shadowOffset = style.shadowOffset; + } + + wrapperView.backgroundColor = style.backgroundColor; + + if(image != nil) { + imageView = [[UIImageView alloc] initWithImage:image]; + imageView.contentMode = UIViewContentModeScaleAspectFit; + imageView.frame = CGRectMake(style.horizontalPadding, style.verticalPadding, style.imageSize.width, style.imageSize.height); + } + + CGRect imageRect = CGRectZero; + + if(imageView != nil) { + imageRect.origin.x = style.horizontalPadding; + imageRect.origin.y = style.verticalPadding; + imageRect.size.width = imageView.bounds.size.width; + imageRect.size.height = imageView.bounds.size.height; + } + + if (title != nil) { + titleLabel = [[UILabel alloc] init]; + titleLabel.numberOfLines = style.titleNumberOfLines; + titleLabel.font = style.titleFont; + titleLabel.textAlignment = style.titleAlignment; + titleLabel.lineBreakMode = NSLineBreakByTruncatingTail; + titleLabel.textColor = style.titleColor; + titleLabel.backgroundColor = [UIColor clearColor]; + titleLabel.alpha = 1.0; + titleLabel.text = title; + + // size the title label according to the length of the text + CGSize maxSizeTitle = CGSizeMake((self.bounds.size.width * style.maxWidthPercentage) - imageRect.size.width, self.bounds.size.height * style.maxHeightPercentage); + CGSize expectedSizeTitle = [titleLabel sizeThatFits:maxSizeTitle]; + // UILabel can return a size larger than the max size when the number of lines is 1 + expectedSizeTitle = CGSizeMake(MIN(maxSizeTitle.width, expectedSizeTitle.width), MIN(maxSizeTitle.height, expectedSizeTitle.height)); + titleLabel.frame = CGRectMake(0.0, 0.0, expectedSizeTitle.width, expectedSizeTitle.height); + } + + if (message != nil) { + messageLabel = [[UILabel alloc] init]; + messageLabel.numberOfLines = style.messageNumberOfLines; + messageLabel.font = style.messageFont; + messageLabel.textAlignment = style.messageAlignment; + messageLabel.lineBreakMode = NSLineBreakByTruncatingTail; + messageLabel.textColor = style.messageColor; + messageLabel.backgroundColor = [UIColor clearColor]; + messageLabel.alpha = 1.0; + messageLabel.text = message; + + CGSize maxSizeMessage = CGSizeMake((self.bounds.size.width * style.maxWidthPercentage) - imageRect.size.width, self.bounds.size.height * style.maxHeightPercentage); + CGSize expectedSizeMessage = [messageLabel sizeThatFits:maxSizeMessage]; + // UILabel can return a size larger than the max size when the number of lines is 1 + expectedSizeMessage = CGSizeMake(MIN(maxSizeMessage.width, expectedSizeMessage.width), MIN(maxSizeMessage.height, expectedSizeMessage.height)); + messageLabel.frame = CGRectMake(0.0, 0.0, expectedSizeMessage.width, expectedSizeMessage.height); + } + + CGRect titleRect = CGRectZero; + + if(titleLabel != nil) { + titleRect.origin.x = imageRect.origin.x + imageRect.size.width + style.horizontalPadding; + titleRect.origin.y = style.verticalPadding; + titleRect.size.width = titleLabel.bounds.size.width; + titleRect.size.height = titleLabel.bounds.size.height; + } + + CGRect messageRect = CGRectZero; + + if(messageLabel != nil) { + messageRect.origin.x = imageRect.origin.x + imageRect.size.width + style.horizontalPadding; + messageRect.origin.y = titleRect.origin.y + titleRect.size.height + style.verticalPadding; + messageRect.size.width = messageLabel.bounds.size.width; + messageRect.size.height = messageLabel.bounds.size.height; + } + + CGFloat longerWidth = MAX(titleRect.size.width, messageRect.size.width); + CGFloat longerX = MAX(titleRect.origin.x, messageRect.origin.x); + + // Wrapper width uses the longerWidth or the image width, whatever is larger. Same logic applies to the wrapper height. + CGFloat wrapperWidth = MAX((imageRect.size.width + (style.horizontalPadding * 2.0)), (longerX + longerWidth + style.horizontalPadding)); + CGFloat wrapperHeight = MAX((messageRect.origin.y + messageRect.size.height + style.verticalPadding), (imageRect.size.height + (style.verticalPadding * 2.0))); + + wrapperView.frame = CGRectMake(0.0, 0.0, wrapperWidth, wrapperHeight); + + if(titleLabel != nil) { + titleLabel.frame = titleRect; + [wrapperView addSubview:titleLabel]; + } + + if(messageLabel != nil) { + messageLabel.frame = messageRect; + [wrapperView addSubview:messageLabel]; + } + + if(imageView != nil) { + [wrapperView addSubview:imageView]; + } + + return wrapperView; +} + +#pragma mark - Storage + +- (NSMutableArray *)cs_activeToasts { + NSMutableArray *cs_activeToasts = objc_getAssociatedObject(self, &CSToastActiveKey); + if (cs_activeToasts == nil) { + cs_activeToasts = [[NSMutableArray alloc] init]; + objc_setAssociatedObject(self, &CSToastActiveKey, cs_activeToasts, OBJC_ASSOCIATION_RETAIN_NONATOMIC); + } + return cs_activeToasts; +} + +- (NSMutableArray *)cs_toastQueue { + NSMutableArray *cs_toastQueue = objc_getAssociatedObject(self, &CSToastQueueKey); + if (cs_toastQueue == nil) { + cs_toastQueue = [[NSMutableArray alloc] init]; + objc_setAssociatedObject(self, &CSToastQueueKey, cs_toastQueue, OBJC_ASSOCIATION_RETAIN_NONATOMIC); + } + return cs_toastQueue; +} + +#pragma mark - Events + +- (void)cs_toastTimerDidFinish:(NSTimer *)timer { + [self cs_hideToast:(UIView *)timer.userInfo]; +} + +- (void)cs_handleToastTapped:(UITapGestureRecognizer *)recognizer { + UIView *toast = recognizer.view; + NSTimer *timer = (NSTimer *)objc_getAssociatedObject(toast, &CSToastTimerKey); + [timer invalidate]; + + [self cs_hideToast:toast fromTap:YES]; +} + +#pragma mark - Activity Methods + +- (void)makeToastActivity:(id)position { + // sanity + UIView *existingActivityView = (UIView *)objc_getAssociatedObject(self, &CSToastActivityViewKey); + if (existingActivityView != nil) return; + + CSToastStyle *style = [CSToastManager sharedStyle]; + + UIView *activityView = [[UIView alloc] initWithFrame:CGRectMake(0.0, 0.0, style.activitySize.width, style.activitySize.height)]; + activityView.center = [self cs_centerPointForPosition:position withToast:activityView]; + activityView.backgroundColor = style.backgroundColor; + activityView.alpha = 0.0; + activityView.autoresizingMask = (UIViewAutoresizingFlexibleLeftMargin | UIViewAutoresizingFlexibleRightMargin | UIViewAutoresizingFlexibleTopMargin | UIViewAutoresizingFlexibleBottomMargin); + activityView.layer.cornerRadius = style.cornerRadius; + + if (style.displayShadow) { + activityView.layer.shadowColor = style.shadowColor.CGColor; + activityView.layer.shadowOpacity = style.shadowOpacity; + activityView.layer.shadowRadius = style.shadowRadius; + activityView.layer.shadowOffset = style.shadowOffset; + } + + UIActivityIndicatorView *activityIndicatorView = [[UIActivityIndicatorView alloc] initWithActivityIndicatorStyle:UIActivityIndicatorViewStyleWhiteLarge]; + activityIndicatorView.center = CGPointMake(activityView.bounds.size.width / 2, activityView.bounds.size.height / 2); + [activityView addSubview:activityIndicatorView]; + [activityIndicatorView startAnimating]; + + // associate the activity view with self + objc_setAssociatedObject (self, &CSToastActivityViewKey, activityView, OBJC_ASSOCIATION_RETAIN_NONATOMIC); + + [self addSubview:activityView]; + + [UIView animateWithDuration:style.fadeDuration + delay:0.0 + options:UIViewAnimationOptionCurveEaseOut + animations:^{ + activityView.alpha = 1.0; + } completion:nil]; +} + +- (void)hideToastActivity { + UIView *existingActivityView = (UIView *)objc_getAssociatedObject(self, &CSToastActivityViewKey); + if (existingActivityView != nil) { + [UIView animateWithDuration:[[CSToastManager sharedStyle] fadeDuration] + delay:0.0 + options:(UIViewAnimationOptionCurveEaseIn | UIViewAnimationOptionBeginFromCurrentState) + animations:^{ + existingActivityView.alpha = 0.0; + } completion:^(BOOL finished) { + [existingActivityView removeFromSuperview]; + objc_setAssociatedObject (self, &CSToastActivityViewKey, nil, OBJC_ASSOCIATION_RETAIN_NONATOMIC); + }]; + } +} + +#pragma mark - Helpers + +- (CGPoint)cs_centerPointForPosition:(id)point withToast:(UIView *)toast { + CSToastStyle *style = [CSToastManager sharedStyle]; + + UIEdgeInsets safeInsets = UIEdgeInsetsZero; + if (@available(iOS 11.0, *)) { + safeInsets = self.safeAreaInsets; + } + + CGFloat topPadding = style.verticalPadding + safeInsets.top; + CGFloat bottomPadding = style.verticalPadding + safeInsets.bottom; + + if([point isKindOfClass:[NSString class]]) { + if([point caseInsensitiveCompare:CSToastPositionTop] == NSOrderedSame) { + return CGPointMake(self.bounds.size.width / 2.0, (toast.frame.size.height / 2.0) + topPadding); + } else if([point caseInsensitiveCompare:CSToastPositionCenter] == NSOrderedSame) { + return CGPointMake(self.bounds.size.width / 2.0, self.bounds.size.height / 2.0); + } + } else if ([point isKindOfClass:[NSValue class]]) { + return [point CGPointValue]; + } + + // default to bottom + return CGPointMake(self.bounds.size.width / 2.0, (self.bounds.size.height - (toast.frame.size.height / 2.0)) - bottomPadding); +} + +@end + +@implementation CSToastStyle + +#pragma mark - Constructors + +- (instancetype)initWithDefaultStyle { + self = [super init]; + if (self) { + self.backgroundColor = [[UIColor blackColor] colorWithAlphaComponent:0.8]; + self.titleColor = [UIColor whiteColor]; + self.messageColor = [UIColor whiteColor]; + self.maxWidthPercentage = 0.8; + self.maxHeightPercentage = 0.8; + self.horizontalPadding = 10.0; + self.verticalPadding = 10.0; + self.cornerRadius = 10.0; + self.titleFont = [UIFont boldSystemFontOfSize:16.0]; + self.messageFont = [UIFont systemFontOfSize:16.0]; + self.titleAlignment = NSTextAlignmentLeft; + self.messageAlignment = NSTextAlignmentLeft; + self.titleNumberOfLines = 0; + self.messageNumberOfLines = 0; + self.displayShadow = NO; + self.shadowOpacity = 0.8; + self.shadowRadius = 6.0; + self.shadowOffset = CGSizeMake(4.0, 4.0); + self.imageSize = CGSizeMake(80.0, 80.0); + self.activitySize = CGSizeMake(100.0, 100.0); + self.fadeDuration = 0.2; + } + return self; +} + +- (void)setMaxWidthPercentage:(CGFloat)maxWidthPercentage { + _maxWidthPercentage = MAX(MIN(maxWidthPercentage, 1.0), 0.0); +} + +- (void)setMaxHeightPercentage:(CGFloat)maxHeightPercentage { + _maxHeightPercentage = MAX(MIN(maxHeightPercentage, 1.0), 0.0); +} + +- (instancetype)init NS_UNAVAILABLE { + return nil; +} + +@end + +@interface CSToastManager () + +@property (strong, nonatomic) CSToastStyle *sharedStyle; +@property (assign, nonatomic, getter=isTapToDismissEnabled) BOOL tapToDismissEnabled; +@property (assign, nonatomic, getter=isQueueEnabled) BOOL queueEnabled; +@property (assign, nonatomic) NSTimeInterval defaultDuration; +@property (strong, nonatomic) id defaultPosition; + +@end + +@implementation CSToastManager + +#pragma mark - Constructors + ++ (instancetype)sharedManager { + static CSToastManager *_sharedManager = nil; + static dispatch_once_t oncePredicate; + dispatch_once(&oncePredicate, ^{ + _sharedManager = [[self alloc] init]; + }); + + return _sharedManager; +} + +- (instancetype)init { + self = [super init]; + if (self) { + self.sharedStyle = [[CSToastStyle alloc] initWithDefaultStyle]; + self.tapToDismissEnabled = YES; + self.queueEnabled = NO; + self.defaultDuration = 3.0; + self.defaultPosition = CSToastPositionBottom; + } + return self; +} + +#pragma mark - Singleton Methods + ++ (void)setSharedStyle:(CSToastStyle *)sharedStyle { + [[self sharedManager] setSharedStyle:sharedStyle]; +} + ++ (CSToastStyle *)sharedStyle { + return [[self sharedManager] sharedStyle]; +} + ++ (void)setTapToDismissEnabled:(BOOL)tapToDismissEnabled { + [[self sharedManager] setTapToDismissEnabled:tapToDismissEnabled]; +} + ++ (BOOL)isTapToDismissEnabled { + return [[self sharedManager] isTapToDismissEnabled]; +} + ++ (void)setQueueEnabled:(BOOL)queueEnabled { + [[self sharedManager] setQueueEnabled:queueEnabled]; +} + ++ (BOOL)isQueueEnabled { + return [[self sharedManager] isQueueEnabled]; +} + ++ (void)setDefaultDuration:(NSTimeInterval)duration { + [[self sharedManager] setDefaultDuration:duration]; +} + ++ (NSTimeInterval)defaultDuration { + return [[self sharedManager] defaultDuration]; +} + ++ (void)setDefaultPosition:(id)position { + if ([position isKindOfClass:[NSString class]] || [position isKindOfClass:[NSValue class]]) { + [[self sharedManager] setDefaultPosition:position]; + } +} + ++ (id)defaultPosition { + return [[self sharedManager] defaultPosition]; +} + +@end diff --git a/src/ios/Trtc/TCLiveConfigDefine.h b/src/ios/Trtc/TCLiveConfigDefine.h deleted file mode 100644 index 0b5a783..0000000 --- a/src/ios/Trtc/TCLiveConfigDefine.h +++ /dev/null @@ -1,15 +0,0 @@ -// -// TCLiveConfigDefine.h -// TRTC -// -// Created by xiaowei li on 2018/6/22. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#ifndef TCLiveConfigDefine_h -#define TCLiveConfigDefine_h - -#define Login_Info_Url @"https://xzb.qcloud.com/webrtc/weapp/webrtc_room/get_login_info" //业务后台登录信息拉取,可替换为自己的业务后台 -#define AuthBuffer_Info_Url @"https://xzb.qcloud.com/webrtc/weapp/webrtc_room/get_privatemapkey" -#define Default_Role @"ed640" //用户角色配置画面参数,可在控制台进行配置 https://cloud.tencent.com/document/product/647/17308 -#endif /* TCLiveConfigDefine_h */ diff --git a/src/ios/Trtc/TCLiveJoinRoomViewController.h b/src/ios/Trtc/TCLiveJoinRoomViewController.h deleted file mode 100644 index b5a1a98..0000000 --- a/src/ios/Trtc/TCLiveJoinRoomViewController.h +++ /dev/null @@ -1,17 +0,0 @@ -// -// ViewController.h -// TRTC -// -// Created by Tencent on 2018/5/31. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import - -@interface TCLiveJoinRoomViewController : UIViewController - -@property (nonatomic) NSString *defaultRoomId; - -@end - - diff --git a/src/ios/Trtc/TCLiveJoinRoomViewController.m b/src/ios/Trtc/TCLiveJoinRoomViewController.m deleted file mode 100644 index d5ec8ee..0000000 --- a/src/ios/Trtc/TCLiveJoinRoomViewController.m +++ /dev/null @@ -1,112 +0,0 @@ -// -// ViewController.m -// TRTC -// -// Created by Tencent on 2018/5/31. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import "TCLiveJoinRoomViewController.h" -#import "UIColorEX.h" -#import "TCLiveRoomViewController.h" -#import "UIToastView.h" -#import "TCLiveConfigDefine.h" - -@interface TCLiveJoinRoomViewController () -@property(nonatomic,strong)UITextField *inputTextField; -@property(nonatomic,strong)UIButton *joinRoomBtn; -@property(nonatomic,strong) UIView *botoomLine; -@property(nonatomic,strong) UIImageView *bgImageView; -@end - -@implementation TCLiveJoinRoomViewController - -- (void)viewDidLoad { - [super viewDidLoad]; - // Do any additional setup after loading the view. - self.title = @"视频通话"; - - [self.navigationController.navigationBar setTranslucent:NO]; - - [self.view addSubview:self.bgImageView]; - - [self.view addSubview:self.inputTextField]; - - [self.view addSubview:self.joinRoomBtn]; - -} --(void)viewWillAppear:(BOOL)animated{ - if (self.defaultRoomId) { - self.inputTextField.text = self.defaultRoomId; - } - [super viewWillAppear:animated]; - [self.navigationController.navigationBar setTitleTextAttributes:@{NSForegroundColorAttributeName:[UIColor blackColor]}]; -} --(UIImageView *)bgImageView{ - if (!_bgImageView) { - _bgImageView = [[UIImageView alloc] initWithFrame:self.view.bounds]; - [_bgImageView setImage:[UIImage imageNamed:@"bg.png"]]; - _bgImageView.userInteractionEnabled = YES; - } - return _bgImageView; -} - -- (UIButton *)joinRoomBtn{ - if (!_joinRoomBtn) { - _joinRoomBtn = [[UIButton alloc] initWithFrame:CGRectMake(20, _inputTextField.frame.size.height + _inputTextField.frame.origin.y + 50, self.view.frame.size.width - 40, 50)]; - _joinRoomBtn.layer.cornerRadius = 25; - [_joinRoomBtn setTitle:@"加入房间" forState:UIControlStateNormal]; - _joinRoomBtn.backgroundColor = [UIColor colorWithRGBHex:0x1472fc]; - [_joinRoomBtn addTarget:self action:@selector(joinRoomBtnClick:) forControlEvents:UIControlEventTouchUpInside]; - [_joinRoomBtn setTitleColor:[UIColor whiteColor] forState:UIControlStateNormal]; - } - return _joinRoomBtn; -} -- (UITextField *)inputTextField{ - if (!_inputTextField) { - _inputTextField = [[UITextField alloc] initWithFrame:CGRectMake(5, 20 , self.view.frame.size.width-10, 40)]; - _inputTextField.delegate = self; - _inputTextField.backgroundColor= [UIColor clearColor]; - NSMutableAttributedString *str = [[NSMutableAttributedString alloc] initWithString:@"请输入房间号码" attributes:@{NSForegroundColorAttributeName:[UIColor grayColor]}]; - _inputTextField.attributedPlaceholder = str; - _inputTextField.textColor = [UIColor grayColor]; - _inputTextField.returnKeyType = UIReturnKeyDone; - _inputTextField.keyboardType = UIKeyboardTypeNumberPad; - - _botoomLine = [[UIView alloc] initWithFrame:CGRectMake(0, 40-1, _inputTextField.frame.size.width, 1)]; - _botoomLine.backgroundColor = [UIColor colorWithRGBHex:0x1472fc]; - [_inputTextField addSubview:_botoomLine]; - } - return _inputTextField; -} - -- (void)joinRoomBtnClick:(UIButton *)sender{ - if (self.inputTextField.text.length > 0) { - TCLiveRoomViewController *vc = [[TCLiveRoomViewController alloc] initWithRoomID:self.inputTextField.text role:Default_Role]; - [self.navigationController pushViewController:vc animated:YES]; - } - else{ - [[UIToastView getInstance] showToastWithMessage:@"请输入房间号" toastMode:UIToastShowMode_fail]; - } -} - -// 自动跳转 -//- (void)autoNav:(NSString *)roomId { -// TCLiveRoomViewController *vc = [[TCLiveRoomViewController alloc] initWithRoomID:roomId role:Default_Role]; -// [self.navigationController pushViewController:vc animated:YES]; -//} - - -#pragma mark - UITextFieldDelegate -- (BOOL)textFieldShouldReturn:(UITextField *)textField{ - [textField resignFirstResponder]; - return YES; -} - -- (BOOL)textField:(UITextField *)textField shouldChangeCharactersInRange:(NSRange)range replacementString:(NSString *)string -{ - NSCharacterSet *cs = [[NSCharacterSet characterSetWithCharactersInString:@"0123456789"] invertedSet]; - NSString *filtered = [[string componentsSeparatedByCharactersInSet:cs] componentsJoinedByString:@""]; - return [string isEqualToString:filtered]; -} -@end diff --git a/src/ios/Trtc/TCLiveRequestManager.h b/src/ios/Trtc/TCLiveRequestManager.h deleted file mode 100644 index 47ebda8..0000000 --- a/src/ios/Trtc/TCLiveRequestManager.h +++ /dev/null @@ -1,21 +0,0 @@ -// -// TCLiveRequestManager.h -// TRTC -// -// Created by Tencent on 2018/5/31. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import - -typedef void (^LiveLoginInfoBlock)(int code); -typedef void (^LiveAuthBufferBlock)(NSDictionary *info); -@interface TCLiveRequestManager : NSObject -@property(nonatomic,assign)int sdkAppID; //app标识,可在实时音视频控制台(https://console.cloud.tencent.com/rav)创建自己的应用生成 -@property(nonatomic,assign)int accountType; //登录实时音视频应用的帐号类型,在控制台创建应用后分配 -@property(nonatomic,strong)NSString *userID; //用户id标识(可由业务后台自己管理) -@property(nonatomic,strong)NSString *userSig; //用于用户鉴权,生成方法https://cloud.tencent.com/document/product/647/17275 (可由业务后台自己管理) -+ (TCLiveRequestManager *)getInstance; -- (void)requestLoginInfo:(LiveLoginInfoBlock)block; -- (void)reqGetAuthBufferInfoWithParams:(NSDictionary *)params block:(LiveAuthBufferBlock)block; -@end diff --git a/src/ios/Trtc/TCLiveRequestManager.m b/src/ios/Trtc/TCLiveRequestManager.m deleted file mode 100644 index 51b7489..0000000 --- a/src/ios/Trtc/TCLiveRequestManager.m +++ /dev/null @@ -1,98 +0,0 @@ -// -// TCLiveRequestManager.m -// TRTC -// -// Created by Tencent on 2018/5/31. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import "TCLiveRequestManager.h" -#import "UIToastView.h" -#import "TCLiveConfigDefine.h" -@implementation TCLiveRequestManager -+ (TCLiveRequestManager *)getInstance{ - static TCLiveRequestManager *singleTon = nil; - static dispatch_once_t onceToken; - dispatch_once(&onceToken, ^{ - singleTon = [TCLiveRequestManager new]; - }); - return singleTon; -} - -- (void)requestLoginInfo:(LiveLoginInfoBlock)block{ - NSString *user = [[NSUserDefaults standardUserDefaults] objectForKey:@"TCLIVE_USER"]; - if (user.length == 0) { - user = @""; - } - NSDictionary *params = @{@"userID":user}; - NSMutableURLRequest *request = [self getSendPostRequest:Login_Info_Url body:params];//加备注 - - NSURLSessionConfiguration *sessionConfig = [NSURLSessionConfiguration defaultSessionConfiguration]; - [sessionConfig setTimeoutIntervalForRequest:30]; - - __weak TCLiveRequestManager *weakself = self; - NSURLSession *session = [NSURLSession sessionWithConfiguration:sessionConfig]; - NSURLSessionDataTask *task = [session dataTaskWithRequest:request completionHandler:^(NSData * _Nullable data, NSURLResponse * _Nullable response, NSError * _Nullable error) { - if (error|| data == nil) { - block(-1); - [[UIToastView getInstance] showToastWithMessage:@"登录请求失败" toastMode:UIToastShowMode_fail]; - } - else{ - //无error data解不出 - NSDictionary *info = [NSJSONSerialization JSONObjectWithData:data options:0 error:nil]; - if (info) { - weakself.sdkAppID = [info[@"sdkAppID"] intValue]; - weakself.accountType = [info[@"accountType"]intValue]; - weakself.userID = info[@"userID"]; - weakself.userSig = info[@"userSig"]; - [[NSUserDefaults standardUserDefaults] setObject:info[@"userID"] forKey:@"TCLIVE_USER"]; - block(0); - } - else{ - block(-1); - [[UIToastView getInstance] showToastWithMessage:@"登录信息解包失败" toastMode:UIToastShowMode_fail]; - } - } - }]; - [task resume]; -} - --(void)reqGetAuthBufferInfoWithParams:(NSDictionary *)params block:(LiveAuthBufferBlock)block{ - NSMutableURLRequest *request = [self getSendPostRequest:AuthBuffer_Info_Url body:params]; - - NSURLSessionConfiguration *sessionConfig = [NSURLSessionConfiguration defaultSessionConfiguration]; - [sessionConfig setTimeoutIntervalForRequest:30]; - - NSURLSession *session = [NSURLSession sessionWithConfiguration:sessionConfig]; - NSURLSessionDataTask *task = [session dataTaskWithRequest:request completionHandler:^(NSData * _Nullable data, NSURLResponse * _Nullable response, NSError * _Nullable error) { - if (error|| data == nil) { - [[UIToastView getInstance] showToastWithMessage:@"获取authBuffer请求失败" toastMode:UIToastShowMode_fail]; - } - else{ - //无error data解不出 - NSDictionary *info = [NSJSONSerialization JSONObjectWithData:data options:0 error:nil]; - if (info) { - block(info); - } - else{ - [[UIToastView getInstance] showToastWithMessage:@"获取authBuffer解包失败" toastMode:UIToastShowMode_fail]; - } - } - }]; - [task resume]; -} - -- (NSMutableURLRequest *)getSendPostRequest:(NSString *)url body:(NSDictionary *)body{ - - NSData *dataBody = [NSJSONSerialization dataWithJSONObject:body options:NSJSONWritingPrettyPrinted error:nil]; - NSMutableURLRequest *request = [NSMutableURLRequest requestWithURL:[NSURL URLWithString:url] cachePolicy:NSURLRequestReloadIgnoringLocalAndRemoteCacheData timeoutInterval:10]; - [request setValue:[NSString stringWithFormat:@"%ld", (long) [dataBody length]] forHTTPHeaderField:@"Content-Length"]; - [request setHTTPMethod:@"POST"]; - [request setValue:@"application/json; charset=UTF-8" forHTTPHeaderField:@"Content-Type"]; - [request setValue:@"gzip" forHTTPHeaderField:@"Accept-Encoding"]; - [request setHTTPBody:dataBody]; - - return request; -} -@end - diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveChatTableView.h b/src/ios/Trtc/TCLiveRoomVC/TCLiveChatTableView.h deleted file mode 100644 index 383bdb1..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveChatTableView.h +++ /dev/null @@ -1,17 +0,0 @@ -// -// TCLiveChatTableView.h -// TRTC -// -// Created by Tencent on 2018/6/3. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import -#import - -@interface TCLiveChatTableView : UITableView -//添加信息到聊天列表 -- (void)addChatMessage:(NSArray *)msgList withContentColor:(UIColor *)contentColor nickColor:(UIColor *)nickColor; -//发送信息 -- (void)sendMessage:(NSString *)message; -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveChatTableView.m b/src/ios/Trtc/TCLiveRoomVC/TCLiveChatTableView.m deleted file mode 100644 index eb15ce5..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveChatTableView.m +++ /dev/null @@ -1,177 +0,0 @@ -// -// TCLiveChatTableView.m -// TRTC -// -// Created by Tencent on 2018/6/3. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import -#import -#import -#import "TCLiveChatTableView.h" -#import "TCLiveChatTableViewCell.h" -#import "UIColorEX.h" -#import "UIToastView.h" -#import "TCLiveRequestManager.h" - -@interface TCLiveChatTableView () -@property(nonatomic,strong)NSMutableArray *chatMessageList; -@property(nonatomic,strong)UIColor *contentColor; -@property(nonatomic,strong)UIColor *nickColor; -@end - -@implementation TCLiveChatTableView - -- (instancetype)initWithFrame:(CGRect)frame style:(UITableViewStyle)style{ - if (self = [super initWithFrame:frame style:style]) { - _chatMessageList = [[NSMutableArray alloc] initWithCapacity:1]; - self.delegate = self; - self.dataSource = self; - self.separatorStyle = UITableViewCellSeparatorStyleNone; - self.transform = CGAffineTransformMakeScale(1, -1); - self.layer.cornerRadius = 4; - self.layer.masksToBounds = YES; - self.scrollEnabled = NO; - //设置消息监听 - [[[ILiveSDK getInstance] getTIMManager] setMessageListener:self]; - } - return self; -} - -//接收消息 --(void)onNewMessage:(NSArray *)msgs{ - [self addChatMessage:msgs withContentColor:nil nickColor:nil]; -} -//消息处理 -- (void)addChatMessage:(NSArray *)msgList withContentColor:(UIColor *)contentColor nickColor:(UIColor *)nickColor{ - self.contentColor = contentColor; - self.nickColor = nickColor; - for (id item in msgList) { - [self.chatMessageList insertObject:item atIndex:0]; - } - //过滤非文本消息 - NSMutableArray *tempArr = [NSMutableArray array]; - for (int i = 0; i < msgList.count;i++) { - TIMMessage *msg = msgList[i]; - if (![self isTextMsg:msg]) { - [tempArr addObject:msg]; - } - } - [self.chatMessageList removeObjectsInArray:tempArr]; - - dispatch_async(dispatch_get_main_queue(), ^{ - [self reloadData]; - }); -} -- (BOOL)isTextMsg:(TIMMessage *)msg { - TIMOfflinePushInfo *info = msg.getOfflinePushInfo; - if ([info.ext hasPrefix:@"TEXT"]) { - return YES; - } - int count = [msg elemCount]; - for(int i = 0; i < count; i++) { - TIMElem *elem = [msg getElem:i]; - if ([elem isKindOfClass:[TIMCustomElem class]]){ - if ([((TIMCustomElem*)elem).ext hasPrefix:@"TEXT"]) { - return YES; - } - } - else if ([elem isKindOfClass:[TIMTextElem class]]){ - return YES; - } - } - return NO; -} -//发送消息 -- (void)sendMessage:(NSString *)message{ - //消息组装 - TIMMessage *msge = [[TIMMessage alloc] init]; - TIMCustomElem *textElem = [[TIMCustomElem alloc] init]; - textElem.data = [message dataUsingEncoding:NSUTF8StringEncoding]; - NSDictionary *descDic = @{@"nickName":[TCLiveRequestManager getInstance].userID}; - NSString *desc = [[NSString alloc] initWithData:[NSJSONSerialization dataWithJSONObject:descDic options:NSJSONReadingAllowFragments error:nil] encoding:NSUTF8StringEncoding]; - textElem.desc = desc; - textElem.ext = @"TEXT"; - [msge addElem:textElem]; - //调用发送接口 - [[ILiveRoomManager getInstance] sendGroupMessage:msge succ:^{ - NSLog(@"send message succ"); - } failed:^(NSString *module, int errId, NSString *errMsg) { - NSLog(@"send message fail"); - [[UIToastView getInstance] showToastWithMessage:@"发送消息失败" toastMode:UIToastShowMode_fail]; - }]; - [self addChatMessage:@[msge] withContentColor:nil nickColor:nil]; -} -#pragma mark - UITableViewDelegate - --(CGFloat)tableView:(UITableView *)tableView heightForRowAtIndexPath:(NSIndexPath *)indexPath{ - TCLiveChatTableViewCell *cell = [self createChatTableViewCell:tableView withIndexPath:indexPath]; - return cell.cellHeight; -} -#pragma mark - UITableViewDataSource - --(NSInteger)tableView:(UITableView *)tableView numberOfRowsInSection:(NSInteger)section{ - return self.chatMessageList.count; -} - --(UITableViewCell *)tableView:(UITableView *)tableView cellForRowAtIndexPath:(NSIndexPath *)indexPath{ - TCLiveChatTableViewCell *cell = [self createChatTableViewCell:tableView withIndexPath:indexPath]; - return cell; -} -//创建消息cell -- (TCLiveChatTableViewCell *)createChatTableViewCell:(UITableView *)tableView withIndexPath:(NSIndexPath *)indexPath{ - TCLiveChatTableViewCell *cell = [tableView dequeueReusableCellWithIdentifier:@"ChatTableviewCell"]; - if (!cell) { - cell = [[TCLiveChatTableViewCell alloc] initWithStyle:UITableViewCellStyleDefault reuseIdentifier:@"ChatTableviewCell"]; - cell.selectionStyle = UITableViewCellSelectionStyleNone; - cell.backgroundColor = [UIColor clearColor]; - cell.contentView.transform = CGAffineTransformMakeScale (1,-1); - } - TIMMessage *msg = self.chatMessageList[indexPath.row]; - int count = [msg elemCount]; - for(int i = 0; i < count; i++) { - TIMElem *elem = [msg getElem:i]; - //收到消息展示 - NSMutableAttributedString *msgInfo = [[NSMutableAttributedString alloc] initWithString:@""]; - if([elem isKindOfClass:[TIMTextElem class]]){ - msgInfo = [self getContentWithNick:msg.sender andContentTex:((TIMTextElem *)elem).text]; - [cell setModel:msgInfo]; - break; - } - else if ([elem isKindOfClass:[TIMCustomElem class]]){ - NSString *nick = msg.sender; - NSString *dataStr = [[NSString alloc] initWithData:((TIMCustomElem *)elem).data encoding:NSUTF8StringEncoding]; - NSDictionary *descDic = [NSJSONSerialization JSONObjectWithData:[((TIMCustomElem *)elem).desc dataUsingEncoding:NSUTF8StringEncoding] options:NSJSONReadingAllowFragments error:nil]; - NSString *nickNmae = descDic[@"nickName"]; - if (nickNmae.length > 0) { - nick = nickNmae; - } - msgInfo = [self getContentWithNick:nick andContentTex:dataStr]; - [cell setModel:msgInfo]; - break; - } - } - return cell; -} -- (NSMutableAttributedString *)getContentWithNick:(NSString *)nick andContentTex:(NSString *)contentText{ - NSString *content = [NSString stringWithFormat:@"%@:%@",nick, contentText]; - NSMutableAttributedString *msgInfo = [[NSMutableAttributedString alloc] initWithString:content]; - UIColor *contentColor = [UIColor whiteColor]; - UIColor *nickColor = [UIColor colorWithRGBHex:0xFF4081]; - if (self.contentColor) { - contentColor = self.contentColor; - } - if(self.nickColor){ - nickColor = self.nickColor; - } - [msgInfo addAttribute:NSForegroundColorAttributeName value:contentColor range:[content rangeOfString:contentText]]; - [msgInfo addAttribute:NSForegroundColorAttributeName value:nickColor range:[content rangeOfString:nick]]; - - return msgInfo; -} -//点击事件透传 --(UIView *)hitTest:(CGPoint)point withEvent:(UIEvent *)event{ - return nil; -} -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveChatTableViewCell.h b/src/ios/Trtc/TCLiveRoomVC/TCLiveChatTableViewCell.h deleted file mode 100644 index 8140059..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveChatTableViewCell.h +++ /dev/null @@ -1,14 +0,0 @@ -// -// TCLiveChatTableViewCell.h -// TRTC -// -// Created by Tencent on 2018/6/8. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import - -@interface TCLiveChatTableViewCell : UITableViewCell -@property(nonatomic,assign)CGFloat cellHeight; --(void)setModel:(NSMutableAttributedString *)model; -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveChatTableViewCell.m b/src/ios/Trtc/TCLiveRoomVC/TCLiveChatTableViewCell.m deleted file mode 100644 index 79bebde..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveChatTableViewCell.m +++ /dev/null @@ -1,59 +0,0 @@ -// -// TCLiveChatTableViewCell.m -// TRTC -// -// Created by Tencent on 2018/6/8. -// Copyright © 2018年 Tencent. All rights reserved. -// - - -#import "TCLiveChatTableViewCell.h" - -@interface TCLiveChatTableViewCell () -@property(nonatomic,strong) UILabel *contentLabel; -@property(nonatomic,strong) UIView *backMaskView; -@end - -@implementation TCLiveChatTableViewCell - -- (instancetype)initWithStyle:(UITableViewCellStyle)style reuseIdentifier:(NSString *)reuseIdentifier{ - if (self = [super initWithStyle:style reuseIdentifier:reuseIdentifier]) { - - [self.contentView addSubview:self.backMaskView]; - [_backMaskView addSubview:self.contentLabel]; - } - return self; -} - --(UILabel *)contentLabel{ - if (!_contentLabel) { - _contentLabel = [[UILabel alloc] initWithFrame:CGRectZero]; - _contentLabel.backgroundColor = [UIColor clearColor]; - _contentLabel.textAlignment = NSTextAlignmentLeft; - _contentLabel.font = [UIFont systemFontOfSize:13]; - _contentLabel.textColor = [UIColor whiteColor]; - _contentLabel.numberOfLines = 0; - } - return _contentLabel; -} - --(UIView *)backMaskView{ - if (!_backMaskView) { - _backMaskView = [[UIView alloc] initWithFrame:CGRectZero]; - _backMaskView.backgroundColor = [[UIColor blackColor] colorWithAlphaComponent:0.5]; - _backMaskView.layer.cornerRadius = 10; - _backMaskView.layer.masksToBounds = YES; - } - return _backMaskView; -} - --(void)setModel:(NSMutableAttributedString *)model{ - _contentLabel.attributedText = model; - CGSize size = [_contentLabel sizeThatFits:CGSizeMake(230, 10000)]; - - _contentLabel.frame = CGRectMake(5, 5, size.width, size.height ); - _backMaskView.frame = CGRectMake(5, 5, size.width + 10, size.height + 10); - self.cellHeight = size.height+20; -} - -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveRoomViewController.h b/src/ios/Trtc/TCLiveRoomVC/TCLiveRoomViewController.h deleted file mode 100644 index 4e33f53..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveRoomViewController.h +++ /dev/null @@ -1,15 +0,0 @@ -// -// TCLiveRoomViewController.h -// TRTC -// -// Created by Tencent on 2018/5/31. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import -#import "TCLiveVideoLayoutView.h" - -@interface TCLiveRoomViewController : UIViewController -//传入roomid(房间号)进入指定房间 设置role配置画面参数 --(instancetype)initWithRoomID:(NSString *)roomid role:(NSString *)role; -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveRoomViewController.m b/src/ios/Trtc/TCLiveRoomVC/TCLiveRoomViewController.m deleted file mode 100644 index ccd07de..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveRoomViewController.m +++ /dev/null @@ -1,275 +0,0 @@ -// -// TCLiveRoomViewController.m -// TRTC -// -// Created by Tencent on 2018/5/31. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import "TCLiveRoomViewController.h" -#import -#import -#import -#import -#import -#import -#import -#import "TCLiveRequestManager.h" -#import "TCLiveVideoControlBar.h" -#import "TCLiveChatTableView.h" -#import "UIToastView.h" - -#define LIVE_VIEW_HEIGHT 370 -#define LIVE_CONTROL_BAR_HEIGHT 70 -#define LIVE_INPUTTEXTFIELD_HEIGHT 40 - -@interface TCLiveRoomViewController () -@property(nonatomic,strong) TCLiveVideoLayoutView *videoLayoutView; -@property(nonatomic,strong) TCLiveVideoControlBar *controlBar; -@property(nonatomic,strong) TCLiveChatTableView *chatTableView; -@property(nonatomic,strong) UITextField *inputTextField; -@property(nonatomic,strong) NSString *roomID; -@property(nonatomic,strong) UIImageView *bgImageView; -@property(nonatomic,strong) NSTimer *logTimer; -@property(nonatomic,strong) NSTimer *heartBeatTimer; -@property(nonatomic,assign) CGRect origInputTextFieldFrame; -@property(nonatomic,assign) CGRect origChatTableViewFrame; -@property(nonatomic,strong) NSString *role; -@end - -@implementation TCLiveRoomViewController - --(instancetype)initWithRoomID:(NSString *)roomid role:(NSString *)role{ - if (self = [super init]) { - self.roomID = roomid; - self.role = role; - } - return self; -} - -- (void)viewDidLoad { - [super viewDidLoad]; - // Do any additional setup after loading the view. - self.title = self.roomID; - [self.navigationController.navigationBar setTitleTextAttributes:@{NSForegroundColorAttributeName:[UIColor whiteColor]}]; - - [self enterRoom]; - - [self customLeftButton]; - - [self.view addSubview:self.bgImageView]; - - [_bgImageView addSubview:self.videoLayoutView]; - - [_bgImageView addSubview:self.chatTableView]; - - [_bgImageView addSubview:self.controlBar]; - - [_bgImageView addSubview:self.inputTextField]; - - //监听键盘变化 - [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(keyboardWillChangeFrame:) name:UIKeyboardWillChangeFrameNotification object:nil]; -} --(void)viewWillAppear:(BOOL)animated{ - [super viewWillAppear:animated]; - //导航栏透明化 - [self setNavigationBarTransparent]; - [UIApplication sharedApplication].statusBarStyle = UIStatusBarStyleLightContent; -} - -- (void)viewWillDisappear:(BOOL)animated{ - [super viewWillDisappear:animated]; - [self.navigationController.navigationBar setTranslucent:NO]; - [UIApplication sharedApplication].statusBarStyle = UIStatusBarStyleDefault; -} -- (void)setNavigationBarTransparent{ - [self.navigationController.navigationBar setTranslucent:YES]; - [self.navigationController.navigationBar setBackgroundImage:[[UIImage alloc]init] forBarMetrics:UIBarMetricsDefault]; - [self.navigationController.navigationBar setShadowImage:[[UIImage alloc]init]]; - [self.navigationController.navigationBar setBackgroundColor:[UIColor clearColor]]; -} - -- (void)enterRoom{ - [[TCLiveRequestManager getInstance] reqGetAuthBufferInfoWithParams:@{@"roomID":self.roomID,@"userID":[TCLiveRequestManager getInstance].userID} block:^(NSDictionary *info) { - ILiveRoomOption *option = [ILiveRoomOption defaultHostLiveOption]; - option.imOption.imSupport = YES; - option.memberStatusListener = self.videoLayoutView; - option.roomDisconnectListener = self; - option.controlRole = self.role; - option.avOption.privateMapKey = [info[@"privateMapKey"] dataUsingEncoding:NSUTF8StringEncoding]; - - [[ILiveRoomManager getInstance] createRoom:[self.roomID intValue] option:option succ:^{ - NSLog(@"-----> create room succ"); - [[UIToastView getInstance] showToastWithMessage:@"创建房间成功" toastMode:UIToastShowMode_Succ]; - [self.controlBar enableBeauty:YES];//进入房间默认开启美颜 - } failed:^(NSString *module, int errId, NSString *errMsg) { - if(errId == 10021){ - //表示房间已存在直接加入房间 - [[ILiveRoomManager getInstance] joinRoom:[self.roomID intValue] option:option succ:^{ - NSLog(@"-----> join room succ"); - [[UIToastView getInstance] showToastWithMessage:@"加入房间成功" toastMode:UIToastShowMode_Succ]; - [self.controlBar enableBeauty:YES];//进入房间默认开启美颜 - } failed:^(NSString *module, int errId, NSString *errMsg) { - NSLog(@"-----> join room fail,%@ %d %@",module, errId, errMsg); - [[UIToastView getInstance] showToastWithMessage:errMsg toastMode:UIToastShowMode_fail]; - }]; - } - else{ - NSLog(@"-----> create room fail,%@ %d %@",module, errId, errMsg); - [[UIToastView getInstance] showToastWithMessage:errMsg toastMode:UIToastShowMode_fail]; - } - }]; - }]; - -} - -- (void)customLeftButton{ - UIButton *backBtn = [UIButton buttonWithType:UIButtonTypeCustom]; - backBtn.frame = CGRectMake(0, 0, 44, 44); - [backBtn setImage:[UIImage imageNamed:@"ui_title_arrow_left.png"] forState:UIControlStateNormal]; - [backBtn addTarget:self action:@selector(backBtnClicked:) forControlEvents:UIControlEventTouchUpInside]; - backBtn.frame = CGRectMake(0, 0, 44, 44); - [backBtn setTitleColor:[UIColor blackColor] forState:UIControlStateNormal]; - backBtn.imageEdgeInsets = UIEdgeInsetsMake(10, 0, 10, 20); - UIBarButtonItem *item = [[UIBarButtonItem alloc]initWithCustomView:backBtn]; - self.navigationItem.leftBarButtonItem = item; -} - -- (UIImageView *)bgImageView{ - if (!_bgImageView) { - _bgImageView = [[UIImageView alloc] initWithFrame:self.view.bounds]; - [_bgImageView setImage:[UIImage imageNamed:@"bg.png"]]; - _bgImageView.userInteractionEnabled = YES; - } - return _bgImageView; -} - -//视频区域 --(TCLiveVideoLayoutView *)videoLayoutView{ - if (!_videoLayoutView) { - _videoLayoutView = [[TCLiveVideoLayoutView alloc] initWithFrame:CGRectMake(0, 0, self.view.frame.size.width, self.view.frame.size.height)]; - } - return _videoLayoutView; -} -//控制bar -- (TCLiveVideoControlBar *)controlBar{ - if (!_controlBar) { - _controlBar = [[TCLiveVideoControlBar alloc] initWithFrame:CGRectMake(0, self.view.frame.size.height - LIVE_CONTROL_BAR_HEIGHT, self.view.frame.size.width, LIVE_CONTROL_BAR_HEIGHT)]; - _controlBar.delegate = self; - } - return _controlBar; -} -//消息列表 -- (UITableView *)chatTableView{ - if (!_chatTableView) { - _chatTableView = [[TCLiveChatTableView alloc] initWithFrame:CGRectMake(0, self.view.frame.size.height - LIVE_CONTROL_BAR_HEIGHT - 400, 250, 400) style:UITableViewStylePlain]; - _chatTableView.backgroundColor = [UIColor clearColor]; - self.origChatTableViewFrame = self.chatTableView.frame; - } - return _chatTableView; -} -//输入框 -- (UITextField *)inputTextField{ - if (!_inputTextField) { - _inputTextField = [[UITextField alloc] initWithFrame:CGRectMake(0, self.view.frame.size.height , self.view.frame.size.width, LIVE_INPUTTEXTFIELD_HEIGHT)]; - _inputTextField.delegate = self; - _inputTextField.backgroundColor= [[UIColor whiteColor] colorWithAlphaComponent:0.9]; - _inputTextField.placeholder = @"请输入内容"; - _inputTextField.returnKeyType = UIReturnKeySend; - self.origInputTextFieldFrame = self.inputTextField.frame; - } - return _inputTextField; -} -//关闭界面退出房间 -- (void)backBtnClicked:(UIButton *)sender{ - [self.navigationController popViewControllerAnimated:YES]; - [[ILiveRoomManager getInstance] quitRoom:^{ - NSLog(@"-----> quit room succ"); - [[UIToastView getInstance] showToastWithMessage:@"退出房间成功" toastMode:UIToastShowMode_Succ]; - } failed:^(NSString *module, int errId, NSString *errMsg) { - NSLog(@"-----> quit room fail,%@ %d %@",module, errId, errMsg); - [[UIToastView getInstance] showToastWithMessage:@"退出房间失败" toastMode:UIToastShowMode_fail]; - }]; - [_logTimer invalidate]; - _logTimer = nil; - -} -#pragma mark - ILiveRoomDisconnectListener -- (BOOL)onRoomDisconnect:(int)reason;{ - [self backBtnClicked:nil]; - return YES; -} -#pragma mark - UITextFieldDelegate - -- (BOOL)textFieldShouldReturn:(UITextField *)textField;{ - [textField resignFirstResponder]; - NSString *text = textField.text; - if (text.length > 0) { - [self.chatTableView sendMessage:text]; - textField.text = nil; - } - else{ - return NO; - } - return YES; -} -#pragma mark - TCLiveVideoControlBarDelegate --(void)logBtnClick:(UIButton *)sender{ - if (sender.selected) { - if (!_logTimer) { - [self logUpdate]; - _logTimer = [NSTimer scheduledTimerWithTimeInterval:1.0 target:self selector:@selector(logUpdate) userInfo:nil repeats:YES]; - } - } - else{ - [_logTimer invalidate]; - _logTimer = nil; - [_videoLayoutView closeLogView]; - } -} -- (void)chatBtnClick:(UIButton *)sender{ - [self.inputTextField becomeFirstResponder]; -} -- (void)logUpdate{ - QAVContext *avContext = [[ILiveSDK getInstance] getAVContext]; - NSString *qualityStr = [avContext.room getQualityTips]; - [_videoLayoutView showLogView:qualityStr]; -} - -- (void)beautyBtnClick:(UIButton *)sender{ - if (sender.selected) { - [self.videoLayoutView setBeautyLevel:9]; - } - else{ - [self.videoLayoutView setBeautyLevel:0]; - } -} -#pragma mark - 键盘事件 -- (void)keybaordAnimationWithDuration:(CGFloat)duration keyboardOriginY:(CGFloat)keyboardOriginY{ - - __block TCLiveRoomViewController *blockSelf = self; - //作为视图的键盘,弹出动画也是UIViewAnimationOptionCurveEaseIn的方式 - [UIView animateWithDuration:duration delay:0 options:UIViewAnimationOptionCurveEaseIn animations:^{ - //text field - if(keyboardOriginY == blockSelf.view.frame.size.height){ - blockSelf.inputTextField.frame = blockSelf.origInputTextFieldFrame; - blockSelf.chatTableView.frame = blockSelf.origChatTableViewFrame; - } - else{ - blockSelf.inputTextField.frame = CGRectMake(blockSelf.inputTextField.frame.origin.x,keyboardOriginY - blockSelf.inputTextField.frame.size.height, blockSelf.inputTextField.frame.size.width, blockSelf.inputTextField.frame.size.height); - blockSelf.chatTableView.frame = CGRectMake(blockSelf.chatTableView.frame.origin.x,keyboardOriginY - blockSelf.chatTableView.frame.size.height - LIVE_INPUTTEXTFIELD_HEIGHT, blockSelf.chatTableView.frame.size.width, blockSelf.chatTableView.frame.size.height); - } - - } completion:nil]; -} -- (void)keyboardWillChangeFrame:(NSNotification *)notify{ - NSDictionary * info = notify.userInfo; - //动画时间 - CGFloat animationDuration = [info[UIKeyboardAnimationDurationUserInfoKey] floatValue]; - //键盘目标位置 - CGRect keyboardAimFrame = [info[UIKeyboardFrameEndUserInfoKey] CGRectValue]; - if ([self.inputTextField isFirstResponder]) { - [self keybaordAnimationWithDuration:animationDuration keyboardOriginY:keyboardAimFrame.origin.y]; - } -} -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveUITools/UIColorEX.h b/src/ios/Trtc/TCLiveRoomVC/TCLiveUITools/UIColorEX.h deleted file mode 100644 index b3a8650..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveUITools/UIColorEX.h +++ /dev/null @@ -1,13 +0,0 @@ -// -// UIColorEX.h -// TRTC -// -// Created by Tencent on 2018/6/5. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import - -@interface UIColor (EX) -+ (UIColor *)colorWithRGBHex: (unsigned int)hex; -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveUITools/UIColorEX.m b/src/ios/Trtc/TCLiveRoomVC/TCLiveUITools/UIColorEX.m deleted file mode 100644 index 458f5fc..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveUITools/UIColorEX.m +++ /dev/null @@ -1,25 +0,0 @@ -// -// UIColorEX.m -// TRTC -// -// Created by Tencent on 2018/6/5. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import "UIColorEX.h" - -@implementation UIColor (EX) - -+ (UIColor *)colorWithRGBHex: (unsigned int)hex -{ - int r = (hex >> 16) & 0xFF; - int g = (hex >> 8) & 0xFF; - int b = (hex) & 0xFF; - - return [UIColor colorWithRed:r / 255.0f - green:g / 255.0f - blue:b / 255.0f - alpha:1.0f]; -} - -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveUITools/UIToastView.h b/src/ios/Trtc/TCLiveRoomVC/TCLiveUITools/UIToastView.h deleted file mode 100644 index 4103160..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveUITools/UIToastView.h +++ /dev/null @@ -1,20 +0,0 @@ -// -// UIToastView.h -// TRTC -// -// Created by Tencent on 2018/6/9. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import - -typedef enum { - UIToastShowMode_Default = 1, - UIToastShowMode_Succ, - UIToastShowMode_fail, -}UIToastShowMode; - -@interface UIToastView : UIView -+ (UIToastView *)getInstance; -- (void)showToastWithMessage:(NSString *)text toastMode:(UIToastShowMode )mode; -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveUITools/UIToastView.m b/src/ios/Trtc/TCLiveRoomVC/TCLiveUITools/UIToastView.m deleted file mode 100644 index e5d631b..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveUITools/UIToastView.m +++ /dev/null @@ -1,96 +0,0 @@ -// -// UIToastView.m -// TRTC -// -// Created by Tencent on 2018/6/9. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import "UIToastView.h" - -#define UITOAST_HEIGHT 30 -#define UITOAST_IMAGE_HEIGHT 15 -@interface UIToastView () -@property(nonatomic,strong)UILabel *toastLabel; -@property(nonatomic,strong)UIImageView *toastImageView; -@end - -@implementation UIToastView - -+ (UIToastView *)getInstance{ - static UIToastView *singleTon = nil; - static dispatch_once_t onceToken; - dispatch_once(&onceToken, ^{ - singleTon = [UIToastView new]; - }); - return singleTon; -} - --(instancetype)initWithFrame:(CGRect)frame{ - if (self = [super initWithFrame:frame]) { - self.backgroundColor = [[UIColor blackColor] colorWithAlphaComponent:0.5]; - self.layer.cornerRadius = 4; - self.layer.masksToBounds = YES; - - [self addSubview:self.toastImageView]; - [self addSubview:self.toastLabel]; - } - return self; -} -- (UILabel *)toastLabel{ - if (!_toastLabel) { - _toastLabel = [[UILabel alloc] initWithFrame:CGRectZero]; - _toastLabel.numberOfLines = 1; - _toastLabel.textAlignment = NSTextAlignmentCenter; - _toastLabel.textColor = [UIColor whiteColor]; - _toastLabel.font = [UIFont systemFontOfSize:16]; - } - return _toastLabel; -} - --(UIImageView *)toastImageView{ - if (!_toastImageView) { - _toastImageView = [[UIImageView alloc] initWithFrame:CGRectZero]; - - } - return _toastImageView; -} - -- (void)showToastWithMessage:(NSString *)text toastMode:(UIToastShowMode )mode{ - dispatch_async(dispatch_get_main_queue(), ^{ - UIToastView *view = [UIToastView new]; - CGRect rect = [text boundingRectWithSize:CGSizeMake([UIScreen mainScreen].bounds.size.width - 80, 20) options:NSStringDrawingUsesLineFragmentOrigin attributes:@{NSFontAttributeName:[UIFont systemFontOfSize:16]} context:nil]; - view.frame = CGRectMake(0, 0, rect.size.width + 10 + 20, UITOAST_HEIGHT); - view.toastImageView.frame = CGRectMake(5, 0, UITOAST_IMAGE_HEIGHT, UITOAST_IMAGE_HEIGHT); - view.toastImageView.center = CGPointMake(view.toastImageView.center.x, view.frame.size.height/2); - view.toastLabel.frame = CGRectMake(25, 0, rect.size.width, UITOAST_HEIGHT); - view.toastLabel.text = text; - if (UIToastShowMode_Succ == mode){ - view.toastImageView.image = [UIImage imageNamed:@"ic_toast_success@2x"]; - } - else if(UIToastShowMode_fail == mode){ - view.toastImageView.image = [UIImage imageNamed:@"icon_sign@2x"]; - } - else{ - view.toastImageView.frame = CGRectZero; - view.frame = CGRectMake(0, 0, rect.size.width + 10 , UITOAST_HEIGHT); - view.toastLabel.frame = CGRectMake(5, 0, rect.size.width, UITOAST_HEIGHT); - } - view.center = [[UIApplication sharedApplication] keyWindow].center; - [[[UIApplication sharedApplication] keyWindow] addSubview:view]; - [UIView animateWithDuration:0.5 animations:^{ - view.alpha = 1; - } completion:^(BOOL finished) { - dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(3 * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{ - [UIView animateWithDuration:0.5 animations:^{ - view.alpha = 0; - } completion:^(BOOL finished) { - [view removeFromSuperview]; - }]; - }); - }]; - }); - -} - -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveVideoControlBar.h b/src/ios/Trtc/TCLiveRoomVC/TCLiveVideoControlBar.h deleted file mode 100644 index 93f1a42..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveVideoControlBar.h +++ /dev/null @@ -1,34 +0,0 @@ -// -// TCLiveVideoControlBar.h -// TRTC -// -// Created by Tencent on 2018/6/3. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import - -@protocol TCLiveVideoControlBarDelegate -@optional -//切换摄像头 -- (void)switchCameraBtnClick:(UIButton *)sender; -//开关美颜 -- (void)beautyBtnClick:(UIButton *)sender; -//开关麦克风 -- (void)voiceBtnClick:(UIButton *)sender; -//展示日志 -- (void)logBtnClick:(UIButton *)sender; -//反馈 -- (void)feedBackBtnClick:(UIButton *)sender; -//切换配置 -- (void)changeRoleBtnClick:(UIButton *)sender; -//聊天 -- (void)chatBtnClick:(UIButton *)sender; -@end - -@interface TCLiveVideoControlBar : UIView -@property(nonatomic,weak)id delegate; -@property(nonatomic,strong) UIButton *logBtn; -- (void)enableLog:(BOOL)endable; -- (void)enableBeauty:(BOOL)enable; -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveVideoControlBar.m b/src/ios/Trtc/TCLiveRoomVC/TCLiveVideoControlBar.m deleted file mode 100644 index 0eb7c7d..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveVideoControlBar.m +++ /dev/null @@ -1,298 +0,0 @@ -// -// TCLiveVideoControlBar.m -// TRTC -// -// Created by Tencent on 2018/6/3. -// Copyright © 2018年 Tencent. All rights reserved. -// - - - -#import "TCLiveVideoControlBar.h" -#import -#import -#import -#import "TCLiveRequestManager.h" -#import "UIToastView.h" -#import "TCLiveRoomViewController.h" - -#define CONTROLBAR_BUTTON_WIDTH 50 - -@interface TCLiveVideoControlBar () -@property(nonatomic,strong) UIButton *chatBtn; -@property(nonatomic,strong) UIButton *switchCamera; -@property(nonatomic,strong) UIButton *beautyBtn; -@property(nonatomic,strong) UIButton *voiceBtn; -@property(nonatomic,strong) UIButton *changeRoleBtn; -@property(nonatomic,strong) UIButton *feedBackBtn; -@end - -@implementation TCLiveVideoControlBar - --(instancetype)initWithFrame:(CGRect)frame -{ - if (self = [super initWithFrame:frame]) { -// [self addSubview:self.chatBtn]; -// [self addSubview:self.beautyBtn]; - [self addSubview:self.voiceBtn]; -// [self addSubview:self.logBtn]; - [self addSubview:self.switchCamera]; -// [self addSubview:self.feedBackBtn]; -// [self addSubview:self.changeRoleBtn]; - self.backgroundColor = [UIColor clearColor]; - } - return self; -} - --(UIButton *)chatBtn{ - if(!_chatBtn){ - _chatBtn = [self createCustomControlBtn:@"聊天" withImage:[UIImage imageNamed:@"chat.png"] selectedImage:nil]; - [_chatBtn addTarget:self action:@selector(chatBtnClick:) forControlEvents:UIControlEventTouchUpInside]; - } - _chatBtn.frame = CGRectMake(self.frame.size.width / 2 - CONTROLBAR_BUTTON_WIDTH * 3.5, 0, CONTROLBAR_BUTTON_WIDTH, self.frame.size.height); - return _chatBtn; -} --(UIButton *)switchCamera{ - if (!_switchCamera) { - _switchCamera = [self createCustomControlBtn:@"翻转" withImage:[UIImage imageNamed:@"camera.png"] selectedImage:[UIImage imageNamed:@"camera-gray.png"]]; - [_switchCamera addTarget:self action:@selector(switchCameraClick:) forControlEvents:UIControlEventTouchUpInside]; - } -// _switchCamera.frame = CGRectMake(self.frame.size.width/2 - CONTROLBAR_BUTTON_WIDTH * 2.5,0,CONTROLBAR_BUTTON_WIDTH,self.frame.size.height); - _switchCamera.frame = CGRectMake(self.frame.size.width/2 - CONTROLBAR_BUTTON_WIDTH * 1.5,0,CONTROLBAR_BUTTON_WIDTH,self.frame.size.height); - return _switchCamera; -} - -- (UIButton *)beautyBtn{ - if (!_beautyBtn) { - _beautyBtn = [self createCustomControlBtn:@"美颜" withImage:[UIImage imageNamed:@"beauty.png"] selectedImage:[UIImage imageNamed:@"beauty-dis.png"]]; - [_beautyBtn addTarget:self action:@selector(beautyBtnClick:) forControlEvents:UIControlEventTouchUpInside]; - } - _beautyBtn.frame = CGRectMake(self.frame.size.width/2 - CONTROLBAR_BUTTON_WIDTH *1.5, 0, CONTROLBAR_BUTTON_WIDTH, self.frame.size.height); - return _beautyBtn; -} - -- (UIButton *)voiceBtn{ - if (!_voiceBtn) { - _voiceBtn = [self createCustomControlBtn:@"声音" withImage:[UIImage imageNamed:@"mic-dis.png"] selectedImage:[UIImage imageNamed:@"mic.png"]]; - [_voiceBtn addTarget:self action:@selector(voiceBtnClick:) forControlEvents:UIControlEventTouchUpInside]; - } -// _voiceBtn.frame = CGRectMake(self.frame.size.width/2 - CONTROLBAR_BUTTON_WIDTH * 0.5, 0, CONTROLBAR_BUTTON_WIDTH, self.frame.size.height); - _voiceBtn.frame = CGRectMake(self.frame.size.width/2 + CONTROLBAR_BUTTON_WIDTH * 0.5, 0, CONTROLBAR_BUTTON_WIDTH, self.frame.size.height); - return _voiceBtn; -} - --(UIButton *)changeRoleBtn{ - if (!_changeRoleBtn) { - _changeRoleBtn = [self createCustomControlBtn:@"配置" withImage:[UIImage imageNamed:@"role.png"] selectedImage:[UIImage imageNamed:@"role.png"]]; - [_changeRoleBtn addTarget:self action:@selector(changeRoleBtnClick:) forControlEvents:UIControlEventTouchUpInside]; - } - _changeRoleBtn.frame = CGRectMake(self.frame.size.width/2 + CONTROLBAR_BUTTON_WIDTH * 0.5, 0, CONTROLBAR_BUTTON_WIDTH, self.frame.size.height); - return _changeRoleBtn; -} - -- (UIButton *)feedBackBtn{ - if (!_feedBackBtn) { - _feedBackBtn = [self createCustomControlBtn:@"反馈" withImage:[UIImage imageNamed:@"feedback.png"] selectedImage:[UIImage imageNamed:@"feedback.png"]]; - [_feedBackBtn addTarget:self action:@selector(feedBackBtnClick:) forControlEvents:UIControlEventTouchUpInside]; - } - _feedBackBtn.frame = CGRectMake(self.frame.size.width/2 + CONTROLBAR_BUTTON_WIDTH * 1.5, 0, CONTROLBAR_BUTTON_WIDTH, self.frame.size.height); - return _feedBackBtn; -} - -- (UIButton *)logBtn{ - if (!_logBtn) { - _logBtn = [self createCustomControlBtn:@"信息" withImage:[UIImage imageNamed:@"log.png"] selectedImage:[UIImage imageNamed:@"log2.png"]]; - [_logBtn addTarget:self action:@selector(logBtnClick:) forControlEvents:UIControlEventTouchUpInside]; - } - _logBtn.frame = CGRectMake(self.frame.size.width/2 + CONTROLBAR_BUTTON_WIDTH * 2.5, 0, CONTROLBAR_BUTTON_WIDTH, self.frame.size.height); - return _logBtn; -} - -- (UIButton *)createCustomControlBtn:(NSString *)wording withImage:(UIImage *)image selectedImage:(UIImage *)highlightImage{ - UIButton *customButton = [[UIButton alloc] initWithFrame:CGRectZero]; - [customButton setTitle:wording forState:UIControlStateNormal]; - customButton.titleLabel.font = [UIFont systemFontOfSize:11]; - [customButton setTitleColor:[UIColor whiteColor] forState:UIControlStateNormal]; - [customButton setImage:image forState:UIControlStateNormal]; - [customButton setImage:highlightImage forState:UIControlStateSelected]; - CGFloat imageHeight = image.size.height; - CGFloat imageWidth = image.size.width; - [customButton setTitleEdgeInsets:UIEdgeInsetsMake(15, -60, -15, 0)]; - [customButton setImageEdgeInsets:UIEdgeInsetsMake(10, 10, self.frame.size.height - (CONTROLBAR_BUTTON_WIDTH - 20)/imageWidth *imageHeight -10, 10)]; - return customButton; -} -#pragma mark - Handle Event -//聊天按钮 -- (void)chatBtnClick:(UIButton *)sender{ - if (!sender.selected) { - sender.selected = YES; - } - else{ - sender.selected = NO; - } - if ([_delegate respondsToSelector:@selector(chatBtnClick:)]) { - [_delegate chatBtnClick:sender]; - } -} -//切换摄像头 -- (void)switchCameraClick:(UIButton *)sender{ - ILiveFrameDispatcher *frameDispatcher = [[ILiveRoomManager getInstance] getFrameDispatcher]; - ILiveRenderView *renderView = [frameDispatcher getRenderView:[TCLiveRequestManager getInstance].userID srcType:QAVVIDEO_SRC_TYPE_CAMERA]; - if (!sender.selected) { - sender.selected = YES; - renderView.isMirror = NO; - } - else{ - sender.selected = NO; - renderView.isMirror = YES; - } - [[ILiveRoomManager getInstance] switchCamera:^{ - NSLog(@"switch camera succ"); - } failed:^(NSString *module, int errId, NSString *errMsg) { - NSLog(@"switch camera fail"); - [[UIToastView getInstance] showToastWithMessage:@"切换摄像头失败" toastMode:UIToastShowMode_fail]; - }]; - if ([_delegate respondsToSelector:@selector(switchCameraClick:)]) { - [_delegate switchCameraBtnClick:sender]; - } -} -//美颜 -- (void)beautyBtnClick:(UIButton *)sender{ - if (!sender.selected) { - sender.selected = YES; - } - else{ - sender.selected = NO; - } - if ([_delegate respondsToSelector:@selector(beautyBtnClick:)]) { - [_delegate beautyBtnClick:sender]; - } -} -- (void)enableBeauty:(BOOL)enable{ - if (enable) { - _beautyBtn.selected = YES; - } - else{ - _beautyBtn.selected = NO; - } - if ([_delegate respondsToSelector:@selector(beautyBtnClick:)]) { - [_delegate beautyBtnClick:_beautyBtn]; - } -} -//语音开关 -- (void)voiceBtnClick:(UIButton *)sender{ - if (!sender.selected) { - sender.selected = YES; - } - else{ - sender.selected = NO; - } - [[ILiveRoomManager getInstance] enableMic:!sender.selected succ:^{ - NSLog(@"enable mic succ"); - } failed:^(NSString *module, int errId, NSString *errMsg) { - NSLog(@"enable mic fail"); - [[UIToastView getInstance] showToastWithMessage:@"关麦失败" toastMode:UIToastShowMode_fail]; - }]; - if ([_delegate respondsToSelector:@selector(voiceBtnClick:)]) { - [_delegate voiceBtnClick:sender]; - } - -} -//配置角色 -- (void)changeRoleBtnClick:(UIButton *)sender{ - if (!sender.selected) { - sender.selected = YES; - } - else{ - sender.selected = NO; - } - if ([_delegate respondsToSelector:@selector(changeRoleBtnClick:)]) { - [_delegate changeRoleBtnClick:sender]; - } - if ([_delegate isKindOfClass:[UIViewController class]]) { - [self showChangeRoleMenuOnVC:(UIViewController *)_delegate]; - } -} -//日志 -- (void)logBtnClick:(UIButton *)sender{ - if (!sender.selected) { - sender.selected = YES; - } - else{ - sender.selected = NO; - } - if ([_delegate respondsToSelector:@selector(logBtnClick:)]) { - [_delegate logBtnClick:sender]; - } -} -//问题反馈 -- (void)feedBackBtnClick:(UIButton *)sender{ - if (!sender.selected) { - sender.selected = YES; - } - else{ - sender.selected = NO; - } - if ([_delegate respondsToSelector:@selector(feedBackBtnClick:)]) { - [_delegate feedBackBtnClick:sender]; - } - if ([_delegate isKindOfClass:[UIViewController class]]) { - UIAlertController *alert = [UIAlertController alertControllerWithTitle:@"若您在接入过程中有疑问可直接反馈给我们" message:@"邮箱联系地址:trtcfb@qq.com" preferredStyle:UIAlertControllerStyleAlert]; - [alert addAction:[UIAlertAction actionWithTitle:@"确认" style:UIAlertActionStyleDefault handler:nil]]; - [((UIViewController *)self.delegate) presentViewController:alert animated:YES completion:nil]; - } -} -//log展示开关 -- (void)enableLog:(BOOL)endable{ - self.logBtn.selected = endable; - if ([_delegate respondsToSelector:@selector(logBtnClick:)]) { - [_delegate logBtnClick:self.logBtn]; - } -} -//弹出配置菜单 --(void)showChangeRoleMenuOnVC:(UIViewController *)vc{ - UIAlertController *alertController = [UIAlertController alertControllerWithTitle:@"请选择配置分辨率" message:nil preferredStyle:UIAlertControllerStyleAlert]; - [alertController addAction:[UIAlertAction actionWithTitle:@"1280x720 1000-1800kbps 20fps" style:UIAlertActionStyleDefault handler:^(UIAlertAction * _Nonnull action) { - [[ILiveRoomManager getInstance] changeRole:@"ed1280" succ:^{ - NSLog(@"更换ed1280 succ"); - [self enableLog:YES]; - } failed:^(NSString *module, int errId, NSString *errMsg) { - NSLog(@"更换ed1280 fail"); - }]; - }]]; - [alertController addAction:[UIAlertAction actionWithTitle:@"960x540 500-800kbps 15fps" style:UIAlertActionStyleDefault handler:^(UIAlertAction * _Nonnull action) { - [[ILiveRoomManager getInstance] changeRole:@"ed960" succ:^{ - NSLog(@"更换ed960 succ"); - [self enableLog:YES]; - } failed:^(NSString *module, int errId, NSString *errMsg) { - NSLog(@"更换ed960 fail"); - }]; - }]]; - [alertController addAction:[UIAlertAction actionWithTitle:@"640x480 400-800kbps 15fps" style:UIAlertActionStyleDefault handler:^(UIAlertAction * _Nonnull action) { - [[ILiveRoomManager getInstance] changeRole:@"ed640" succ:^{ - NSLog(@"更换ed640 succ"); - [self enableLog:YES]; - } failed:^(NSString *module, int errId, NSString *errMsg) { - NSLog(@"更换ed640 fail"); - }]; - }]]; - [alertController addAction:[UIAlertAction actionWithTitle:@"480x360 300-600kbps 15fps" style:UIAlertActionStyleDefault handler:^(UIAlertAction * _Nonnull action) { - [[ILiveRoomManager getInstance] changeRole:@"ed480" succ:^{ - NSLog(@"更换ed480 succ"); - [self enableLog:YES]; - } failed:^(NSString *module, int errId, NSString *errMsg) { - NSLog(@"更换ed480 fail"); - }]; - }]]; - [alertController addAction:[UIAlertAction actionWithTitle:@"320x240 200-400kbps 15fps" style:UIAlertActionStyleDefault handler:^(UIAlertAction * _Nonnull action) { - [[ILiveRoomManager getInstance] changeRole:@"ed320" succ:^{ - NSLog(@"更换ed320 succ"); - [self enableLog:YES]; - } failed:^(NSString *module, int errId, NSString *errMsg) { - NSLog(@"更换ed320 fail"); - }]; - }]]; - [alertController addAction:[UIAlertAction actionWithTitle:@"取消" style:UIAlertActionStyleCancel handler:nil]]; - [vc presentViewController:alertController animated:YES completion:nil]; -} -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveVideoLayoutView.h b/src/ios/Trtc/TCLiveRoomVC/TCLiveVideoLayoutView.h deleted file mode 100644 index 1898d10..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveVideoLayoutView.h +++ /dev/null @@ -1,42 +0,0 @@ -// -// TCLiveVideoLayoutView.h -// TRTC -// -// Created by Tencent on 2018/6/3. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import -#import - -@class TCLiveVideoElementView; - -typedef enum { - TCLiveRoomVideoLayoutStyle_1v3, - TCLiveRoomVideoLayoutStyle_4_geizi, -}TCLiveRoomVideoLayoutStyle; - -@protocol TCLiveVideoElementViewDelegate -- (void)tapHandle:(TCLiveVideoElementView *)view; -@end - -@interface TCLiveVideoLayoutView : UIView -//画面布局样式 -@property(nonatomic,assign) TCLiveRoomVideoLayoutStyle layoutStyle; -//显示日志信息 -- (void)showLogView:(NSString *)qualityParams; -//关闭日志信息 -- (void)closeLogView; -//设置美颜 -- (void)setBeautyLevel:(NSInteger)level; -@end - -@interface TCLiveVideoElementView: UIView -//展示userID -@property(nonatomic,strong)UILabel *userIdLabel; -//展示视频分辨率、帧率等信息 -@property(nonatomic,strong)UILabel *videoInfoLable; -@property(nonatomic,weak)TCLiveVideoLayoutView *delegate; -//可拖动开关 -- (void)ennableDraggable:(BOOL)draggable; -@end diff --git a/src/ios/Trtc/TCLiveRoomVC/TCLiveVideoLayoutView.m b/src/ios/Trtc/TCLiveRoomVC/TCLiveVideoLayoutView.m deleted file mode 100644 index b13299e..0000000 --- a/src/ios/Trtc/TCLiveRoomVC/TCLiveVideoLayoutView.m +++ /dev/null @@ -1,403 +0,0 @@ -// -// TCLiveVideoLayoutView.m -// TRTC -// -// Created by Tencent on 2018/6/3. -// Copyright © 2018年 Tencent. All rights reserved. -// - -#import "TCLiveVideoLayoutView.h" -#import -#import -#import -#import "TCLiveRequestManager.h" -#import "UIColorEX.h" -#import "TXCVideoPreprocessor.h" -#define LIVE_VIDEO_NUM 4 - -@interface TCLiveVideoLayoutView () -@property(nonatomic,strong)NSMutableArray *liveVideos; -@property(nonatomic,strong)NSMutableArray *liveRnederView; -@property(nonatomic,strong)UITextView *logView; -@property(nonatomic,assign) BOOL isShowLogInfo; -@property(nonatomic,assign) NSDate *startTime; -@property (nonatomic, strong) TXCVideoPreprocessor *preProcessor; -@property (nonatomic, assign) Byte *processorBytes; -@end - -@implementation TCLiveVideoLayoutView - --(instancetype)initWithFrame:(CGRect)frame{ - if (self = [super initWithFrame:frame]) { - self.backgroundColor = [UIColor clearColor]; - self.layoutStyle = TCLiveRoomVideoLayoutStyle_1v3; - [self initialVideoViews]; - [[TILLiveManager getInstance] setAVListener:self]; - [[ILiveRoomManager getInstance] setLocalVideoDelegate:self]; - [[ILiveRoomManager getInstance] setRemoteVideoDelegate:self]; - [[ILiveRoomManager getInstance] setScreenVideoDelegate:self]; - //美颜处理器 - self.preProcessor = [[TXCVideoPreprocessor alloc] init]; - [self.preProcessor setDelegate:self]; - //记录进入时间 - self.startTime = [NSDate date]; - } - return self; -} -//初始化每路视频view -- (void)initialVideoViews{ - _liveVideos = [NSMutableArray array]; - //自定义视窗数组 - NSArray *frames = [self customLayoutFrames]; - for (int i = 0; i < frames.count; i++) { - TCLiveVideoElementView *view = [[TCLiveVideoElementView alloc] initWithFrame:CGRectZero]; - view.delegate = self; - [self addSubview:view]; - [_liveVideos addObject:view]; - } -} --(UITextView *)logView{ - if(!_logView){ - _logView = [[UITextView alloc] initWithFrame:CGRectMake(0, self.frame.size.height/5, self.frame.size.width/3*2, self.frame.size.height/2)]; - _logView.textColor = [UIColor colorWithRGBHex:0xFF4081]; - _logView.font = [UIFont systemFontOfSize:14]; - _logView.backgroundColor = [UIColor clearColor]; - _logView.editable = NO; - } - return _logView; -} - -//自定义布局(开发者可自定义每个视窗的frame来实现,视窗个数即为frames数组个数) -- (NSArray *)customLayoutFrames{ - NSMutableArray *frames = [NSMutableArray array]; - if (TCLiveRoomVideoLayoutStyle_1v3 == _layoutStyle) { - [frames removeAllObjects]; - int smallViewWidth = ((self.frame.size.height - 20 - 150 - 10 *2)/3)*(3.0/4.0); - int smallViewHeight = (self.frame.size.height - 20 - 150 - 10 *2)/3; - CGRect frame1 = self.bounds; - CGRect frame2 = CGRectMake(self.bounds.size.width - 10 - smallViewWidth, 84 , smallViewWidth, smallViewHeight); - CGRect frame3 = CGRectMake(self.bounds.size.width - 10 - smallViewWidth, frame2.origin.y + frame2.size.height + 10, smallViewWidth, smallViewHeight); - CGRect frame4 = CGRectMake(self.bounds.size.width - 10 - smallViewWidth, frame3.origin.y + frame3.size.height + 10, smallViewWidth, smallViewHeight); - - [frames addObject:[NSValue valueWithCGRect:frame1]]; - [frames addObject:[NSValue valueWithCGRect:frame2]]; - [frames addObject:[NSValue valueWithCGRect:frame3]]; - [frames addObject:[NSValue valueWithCGRect:frame4]]; - } - else if (TCLiveRoomVideoLayoutStyle_4_geizi == _layoutStyle) { - [frames removeAllObjects]; - CGRect frame1 = CGRectMake(20, 50, (self.frame.size.width -40)/2, (self.frame.size.height - 100)/2); - CGRect frame2 = CGRectMake(20 + (self.frame.size.width -40)/2 +1, 50, (self.frame.size.width -40)/2, (self.frame.size.height - 100)/2); - CGRect frame3 = CGRectMake(20, 50 + (self.frame.size.height - 100)/2 +1, (self.frame.size.width -40)/2, (self.frame.size.height - 100)/2); - CGRect frame4 = CGRectMake(20 + (self.frame.size.width -40)/2 +1, 50 + (self.frame.size.height - 100)/2 +1,(self.frame.size.width -40)/2, (self.frame.size.height - 100)/2); - [frames addObject:[NSValue valueWithCGRect:frame1]]; - [frames addObject:[NSValue valueWithCGRect:frame2]]; - [frames addObject:[NSValue valueWithCGRect:frame3]]; - [frames addObject:[NSValue valueWithCGRect:frame4]]; - } - return frames; -} - -//添加视频画面 --(void)addLiveRenderView:(ILiveRenderView *)renderView{ - NSArray *frames = [self customLayoutFrames]; - for (int i = 0; i < _liveVideos.count;i++) { - TCLiveVideoElementView *videoView = _liveVideos[i]; - videoView.frame = [frames[i] CGRectValue]; - if (![self getLiveRenderViewOnElementView:videoView]) { - renderView.frame = videoView.bounds; - [videoView insertSubview:renderView atIndex:0]; - break; - } - } -} -//移除视频画面 -- (void)removeLiverRenderVeiw:(ILiveRenderView *)renderView{ - if ([[renderView superview] isKindOfClass:[TCLiveVideoElementView class]]) { - TCLiveVideoElementView *videoView = (TCLiveVideoElementView *)[renderView superview]; - videoView.frame = CGRectZero; - [renderView removeFromSuperview]; - } -} - -//画面切换 -- (void)tapHandle:(TCLiveVideoElementView *)view{ - NSUInteger index = [_liveVideos indexOfObject:view]; - TCLiveVideoElementView *bigView = _liveVideos[0]; - ILiveRenderView *bigRenderView = [self getLiveRenderViewOnElementView:bigView]; - ILiveRenderView *renderView = [self getLiveRenderViewOnElementView:view]; - if (index > 0) { - [UIView animateWithDuration:0.5 animations:^{ - bigView.frame = view.frame; - view.frame = [[self customLayoutFrames][0] CGRectValue]; - bigRenderView.frame = bigView.bounds; - renderView.frame = view.bounds; - [self.liveVideos exchangeObjectAtIndex:0 withObjectAtIndex:index]; - [self exchangeSubviewAtIndex:0 withSubviewAtIndex:index]; - } completion:^(BOOL finished) { - - }]; - - } - -} -//获取指定视窗的渲染view -- (ILiveRenderView *)getLiveRenderViewOnElementView:(TCLiveVideoElementView *)elementView{ - ILiveRenderView *renderView = nil; - for (id view in [elementView subviews]) { - if ([view isKindOfClass:[ILiveRenderView class]]) { - renderView = view; - } - } - return renderView; -} --(void)setBeautyLevel:(NSInteger)level{ - [self.preProcessor setBeautyLevel:level]; -} -#pragma mark - ILiveMemStatusListener -//房间成员状态变化事件 -- (BOOL)onEndpointsUpdateInfo:(QAVUpdateEvent)event updateList:(NSArray *)endpoints{ - if (endpoints.count <= 0) { - return NO; - } - for (QAVEndpoint *endoption in endpoints) { - switch (event) { - case QAV_EVENT_ID_ENDPOINT_HAS_CAMERA_VIDEO: - { - ILiveFrameDispatcher *frameDispatcher = [[ILiveRoomManager getInstance] getFrameDispatcher]; - ILiveRenderView *renderView = [frameDispatcher addRenderAt:CGRectZero forIdentifier:endoption.identifier srcType:QAVVIDEO_SRC_TYPE_CAMERA]; - renderView.isRotate = NO; - renderView.autoRotate = NO; - renderView.isMirror = YES; - renderView.identifier = endoption.identifier; - renderView.diffDirectionRenderMode = ILIVERENDERMODE_SCALEASPECTFILL; - if ([[TCLiveRequestManager getInstance].userID isEqualToString:endoption.identifier]) { - renderView.rotateAngle = ILIVEROTATION_90; - } - [self addLiveRenderView:renderView]; - } - break; - case QAV_EVENT_ID_ENDPOINT_NO_CAMERA_VIDEO: - { - ILiveFrameDispatcher *frameDispatcher = [[ILiveRoomManager getInstance] getFrameDispatcher]; - ILiveRenderView *renderView = [frameDispatcher removeRenderViewFor:endoption.identifier srcType:QAVVIDEO_SRC_TYPE_CAMERA]; - [self removeLiverRenderVeiw:renderView]; - } - break; - - default: - break; - } - } - return YES; -} -/***本地画面帧数据回调***/ -#pragma mark - QAVLocalVideoDelegate -- (void)OnLocalVideoPreview:(QAVVideoFrame *)frameData{ - [self showElementVideoInfoWithVideoFrame:frameData]; -} - -- (void)OnLocalVideoPreProcess:(QAVVideoFrame *)frameData{ - - - [self.preProcessor setOutputSize:CGSizeMake(frameData.frameDesc.width, frameData.frameDesc.height)]; - [self.preProcessor setCropRect:CGRectMake(0, 0,frameData.frameDesc.width, frameData.frameDesc.height)]; - [self.preProcessor processFrame:frameData.data width:frameData.frameDesc.width height:frameData.frameDesc.height orientation:TXE_ROTATION_0 inputFormat:TXE_FRAME_FORMAT_NV12 outputFormat:TXE_FRAME_FORMAT_NV12]; - //将处理完的数据拷贝到原来的地址空间,如果是同步处理,此时会先执行didProcessFrame回调 - if(self.processorBytes){ - memcpy(frameData.data, self.processorBytes, frameData.frameDesc.width * frameData.frameDesc.height * 3 / 2); - } -} - -- (void)OnLocalVideoRawSampleBuf:(CMSampleBufferRef)buf result:(CMSampleBufferRef *)ret{ - -} - -/***远端视频帧数据回调***/ -#pragma mark - QAVRemoteVideoDelegate -- (void)OnVideoPreview:(QAVVideoFrame *)frameData{ - [self showElementVideoInfoWithVideoFrame:frameData]; -} -/***远端屏幕分享帧数据回调***/ -#pragma mark - ILiveScreenVideoDelegate - --(void)onScreenVideoPreview:(QAVVideoFrame *)frameData{ - [self showElementVideoInfoWithVideoFrame:frameData]; -} -/***首帧回调***/ -#pragma mark - ILVLiveAVListener -- (void)onFirstFrameRecved:(int)width height:(int)height identifier:(NSString *)identifier srcType:(avVideoSrcType)srcType;{ - -} -/***美颜相关处理***/ -#pragma mark - TXIVideoPreprocessorDelegate -- (void)didProcessFrame:(Byte *)bytes width:(NSInteger)width height:(NSInteger)height format:(TXEFrameFormat)format timeStamp:(UInt64)timeStamp -{ - self.processorBytes = bytes; -} -#pragma mark - LOG相关 -- (void)showLogView:(NSString *)qualityParams{ - NSString *role = [[[[qualityParams componentsSeparatedByString:@"ControlRole="] lastObject] componentsSeparatedByString:@","] firstObject]; - - self.logView.text = [NSString stringWithFormat:@"发送速率:%ldkbps 丢包率:%.1f%%\n接收速率:%ldkbps 丢包率:%.1f%%\n应用CPU:%.1f%% 系统CPU:%.1f%%\n角色:%@\nSDKAPPID:%d\nSDKVersion:%@",(long)[[ILiveRoomManager getInstance] getQualityData].sendRate,[[ILiveRoomManager getInstance] getQualityData].sendLossRate/100.0,(long)[[ILiveRoomManager getInstance] getQualityData].recvRate,[[ILiveRoomManager getInstance] getQualityData].recvLossRate/100.0,[[ILiveRoomManager getInstance] getQualityData].appCPURate/100.0,[[ILiveRoomManager getInstance] getQualityData].sysCPURate/100.0,role,[TCLiveRequestManager getInstance].sdkAppID,[[ILiveSDK getInstance] getVersion]]; - if(![_logView superview]){ - [self addSubview:_logView]; - } - self.isShowLogInfo = YES; -} - -- (void)showElementVideoInfoWithVideoFrame:(QAVVideoFrame *)frame{ - if(!self.isShowLogInfo){ - return; - } - NSString *userId = frame.identifier; - NSString *fps = @""; - if (userId.length == 0){ - userId = [TCLiveRequestManager getInstance].userID; - } - else{ - NSString *qualityParams = [[[ILiveSDK getInstance] getAVContext].room getQualityTips]; - NSString *decode = [[qualityParams componentsSeparatedByString:@"音频部分:========"] firstObject]; - NSString *itemDecode = [[[[decode componentsSeparatedByString:[NSString stringWithFormat:@"成员:%@",userId]] lastObject] componentsSeparatedByString:@"接收参数"] firstObject]; - fps = [[[[itemDecode componentsSeparatedByString:@"FPS="] lastObject] componentsSeparatedByString:@","] firstObject]; - } - int width = frame.frameDesc.width; - int height = frame.frameDesc.height; - for (int i = 0; i < _liveVideos.count;i++) { - TCLiveVideoElementView *videoView = _liveVideos[i]; - ILiveRenderView *renderView = [self getLiveRenderViewOnElementView:videoView]; - if ([renderView.identifier isEqualToString:userId]) { - if ([userId isEqualToString:[TCLiveRequestManager getInstance].userID]) { - videoView.videoInfoLable.text = [NSString stringWithFormat:@"%dx%d",width,height]; - } - else{ - videoView.videoInfoLable.text = [NSString stringWithFormat:@"%dx%d fps:%d",width,height,[fps intValue]/10]; - } - videoView.userIdLabel.text = [NSString stringWithFormat:@"%@",userId]; - - } - } -} - -- (void)closeLogView{ - [_logView removeFromSuperview]; - _logView = nil; - for (int i = 0; i < _liveVideos.count;i++) { - TCLiveVideoElementView *videoView = _liveVideos[i]; - [videoView.userIdLabel removeFromSuperview]; - videoView.userIdLabel = nil; - [videoView.videoInfoLable removeFromSuperview]; - videoView.videoInfoLable = nil; - } - self.isShowLogInfo = NO; -} -@end - -@interface TCLiveVideoElementView() -@property(nonatomic,strong) UIPanGestureRecognizer *panGesture; -@end - -@implementation TCLiveVideoElementView - --(instancetype)initWithFrame:(CGRect)frame{ - if (self = [super initWithFrame:frame]) { - [self ennableDraggable:YES]; - UITapGestureRecognizer *tap = [[UITapGestureRecognizer alloc] initWithTarget:self action:@selector(tapGestuer:)]; - [self addGestureRecognizer:tap]; - } - return self; -} -- (UILabel *)userIdLabel{ - if (!_userIdLabel) { - _userIdLabel = [[UILabel alloc] initWithFrame:CGRectMake(0, 12, self.bounds.size.width, 8)]; - _userIdLabel.textColor = [UIColor colorWithRGBHex:0xFF4081]; - _userIdLabel.textAlignment = NSTextAlignmentLeft; - _userIdLabel.font = [UIFont systemFontOfSize:9]; - [self addSubview:_userIdLabel]; - } - return _userIdLabel; -} --(UILabel *)videoInfoLable{ - if (!_videoInfoLable) { - _videoInfoLable = [[UILabel alloc] initWithFrame:CGRectMake(0, 0 , self.bounds.size.width, 12)]; - _videoInfoLable.textColor = [UIColor colorWithRGBHex:0xFF4081]; - _videoInfoLable.textAlignment = NSTextAlignmentLeft; - _videoInfoLable.font = [UIFont systemFontOfSize:13]; - [self addSubview:_videoInfoLable]; - } - return _videoInfoLable; -} -- (void)layoutSubviews{ - if (self.bounds.size.width == [UIScreen mainScreen].bounds.size.width) { - _videoInfoLable.frame = CGRectMake(0, 84 , self.bounds.size.width, 12); - _userIdLabel.frame = CGRectMake(0, 12 + 84, self.bounds.size.width, 8); - } - else{ - _videoInfoLable.frame = CGRectMake(0, 0 , self.bounds.size.width, 12); - _userIdLabel.frame = CGRectMake(0, 12, self.bounds.size.width, 8); - } -} -//可拖动能力 --(void)ennableDraggable:(BOOL)draggable { - [self setUserInteractionEnabled:YES]; - [self removeConstraints:self.constraints]; - for (NSLayoutConstraint *constraint in self.superview.constraints) { - if ([constraint.firstItem isEqual:self]) { - [self.superview removeConstraint:constraint]; - } - } - [self setTranslatesAutoresizingMaskIntoConstraints:YES]; - if (draggable) { - if (!_panGesture) { - _panGesture = [[UIPanGestureRecognizer alloc] initWithTarget:self action:@selector(pan:)]; - _panGesture.delegate = self; - [self addGestureRecognizer:_panGesture]; - } - }else{ - if (_panGesture) { - _panGesture = nil; - [self removeGestureRecognizer:_panGesture]; - } - } -} -- (void)tapGestuer:(UITapGestureRecognizer *)gesture{ - if ([_delegate respondsToSelector:@selector(tapHandle:)]) { - [_delegate tapHandle:self]; - } -} -- (void)pan:(UIPanGestureRecognizer *)panGestureRecognizer { - switch (panGestureRecognizer.state) { - case UIGestureRecognizerStateBegan: { - [self dragging:panGestureRecognizer]; - } - break; - case UIGestureRecognizerStateChanged: { - [self dragging:panGestureRecognizer]; - } - break; - default: - break; - } -} - --(void)dragging:(UIPanGestureRecognizer *)panGestureRecognizer { - UIView *view = panGestureRecognizer.view; - CGPoint translation = [panGestureRecognizer translationInView:view.superview]; - CGPoint center = CGPointMake(view.center.x + translation.x, view.center.y + translation.y); - //不能拖过边界 - CGSize size = view.frame.size; - CGSize superSize = view.superview.frame.size; - CGFloat width = size.width; - CGFloat height = size.height; - CGFloat superWidth = superSize.width; - CGFloat superHeight = superSize.height; - center.x = (center.xsuperWidth)?superWidth-width/2:center.x; - center.y = (center.ysuperHeight)?superHeight-height/2:center.y; - - [view setCenter:center]; - [panGestureRecognizer setTranslation:CGPointZero inView:view.superview]; -} -@end - diff --git a/src/ios/TrtcPlugin.h b/src/ios/TrtcPlugin.h index 1da8479..0c246e1 100644 --- a/src/ios/TrtcPlugin.h +++ b/src/ios/TrtcPlugin.h @@ -1,13 +1,15 @@ // // TrtcPlugin.h // -// Created by 布丁丸子酱 on 2018/12/26. // #import @interface TrtcPlugin : CDVPlugin +{ +} -- (void) showCreatePage:(CDVInvokedUrlCommand*)command; +- (void)joinChannel:(CDVInvokedUrlCommand *)command; +- (void)userInfoChange:(CDVInvokedUrlCommand *)command; @end diff --git a/src/ios/TrtcPlugin.m b/src/ios/TrtcPlugin.m index 0bfa416..28a11fd 100644 --- a/src/ios/TrtcPlugin.m +++ b/src/ios/TrtcPlugin.m @@ -1,16 +1,21 @@ // // TrtcPlugin.m // -// Created by 布丁丸子酱 on 2018/12/26. // #import "TrtcPlugin.h" -#import "TCLiveRequestManager.h" -#import -#import "TCLiveJoinRoomViewController.h" -#import -#import "UIToastView.h" -#import + +#import "VideoCallingViewController.h" +// #import "TCLiveRequestManager.h" +// #import +// #import "TCLiveJoinRoomViewController.h" +// #import +// #import "UIToastView.h" +// #import +#import + +#import "Events.h"; +#import "CordovaEventKit.h"; @interface TrtcPlugin() {} @@ -18,32 +23,56 @@ @implementation TrtcPlugin -- (void) showCreatePage:(CDVInvokedUrlCommand*)command { - NSLog(@"showCreatePage"); - [[TCLiveRequestManager getInstance] requestLoginInfo:^(int code) { - if (code == 0) { - dispatch_async(dispatch_get_main_queue(), ^{ - int retCode = [[ILiveSDK getInstance] initSdk:[TCLiveRequestManager getInstance].sdkAppID accountType:[TCLiveRequestManager getInstance].accountType]; - NSLog(@"initSdk success %d",retCode); - if (retCode == 0) { - NSLog(@"userId & sig:"); - NSLog(@"%@", [TCLiveRequestManager getInstance].userID); - NSLog(@"%@", [TCLiveRequestManager getInstance].userSig); - [[ILiveLoginManager getInstance] iLiveLogin:[TCLiveRequestManager getInstance].userID sig:[TCLiveRequestManager getInstance].userSig succ:^{ - NSLog(@"-----> login succ"); - [[UIToastView getInstance] showToastWithMessage:@"登录成功" toastMode:UIToastShowMode_Succ]; - } failed:^(NSString *module, int errId, NSString *errMsg) { - NSLog(@"-----> login fail,%@ %d %@",module, errId, errMsg); - [[UIToastView getInstance] showToastWithMessage:@"登录失败" toastMode:UIToastShowMode_fail]; - }]; - } - }); + + +-(void)joinChannel:(CDVInvokedUrlCommand*)command { + NSDictionary* params = [command.arguments objectAtIndex:0]; + NSLog(@"TRTC - joinChannel::%@",params); + CDVPluginResult* pluginResult = nil; + VideoCallingViewController *vc = [[VideoCallingViewController alloc] + initWithRoomId:[params[@"ROOM_ID"] intValue] + userId:params[@"USER_ID"] + appId:[params[@"SDK_APP_ID"] intValue] + userSig:params[@"USER_SIG"]]; + dispatch_async(dispatch_get_main_queue(), ^{ + if (@available(iOS 13.0, *)){ + vc.modalPresentationStyle = UIModalPresentationFullScreen; } - }]; - TCLiveJoinRoomViewController *vc = [TCLiveJoinRoomViewController new]; -// vc.defaultRoomId = self.defaultRoomId; - UINavigationController *nav = [[UINavigationController alloc] initWithRootViewController:vc]; - [self.viewController presentViewController:nav animated:YES completion:nil]; + [self.viewController presentViewController:vc animated:YES completion:nil]; + }); +} + + +-(void)userInfoChange:(CDVInvokedUrlCommand*)command { + NSDictionary* params = [command.arguments objectAtIndex:0]; + NSLog(@"TRTC - userInfoChange::%@",params); + [Events fireEvent:@"userinfo.update" extra:params]; +} + + + +#ifdef __CORDOVA_4_0_0 + +- (void)pluginInitialize { + NSLog(@"TRTC - pluginInitialize "); + [self initPlugin]; +} + +#else + +- (CDVPlugin*)initWithWebView:(WKWebView*)theWebView{ + NSLog(@"TRTC - initWithWebView "); + if (self=[super initWithWebView:theWebView]) { + } + [self initPlugin]; + return self; +} + +#endif + +-(void)initPlugin{ + NSLog(@"TRTC - initPlugin "); + [CordovaEventKit init:self]; } @end diff --git a/src/ios/TrtcUserInfo.m b/src/ios/TrtcUserInfo.m new file mode 100644 index 0000000..f5879cb --- /dev/null +++ b/src/ios/TrtcUserInfo.m @@ -0,0 +1,77 @@ +// +// TrtcUserInfo.m +// shuto-cne +// +// Created by 范大德 on 2022/3/21. +// + +#import +@interface TrtcUserInfo : NSObject +{ + NSString* personid; + NSString* displayName; + BOOL local; +} + + + +// 自定义类的初始化方法 + +// 过去类型instancetype 是 id 类型 + +- (instancetype)initWithPersonid:(NSString *)personid; + +- (NSString*) personid; + +@end +@implementation TrtcUserInfo + +- (instancetype)initWithPersonid:(NSString *)userId +{ + personid = userId; + return self; // 返回类本身不能忘 +} + +- (NSString*) personid{ + return personid; +} + + +-(NSString *)displayName{ + return displayName; +} + +-(BOOL)isShareUser{ + return personid != nil && [personid hasPrefix:@"share_"]; +} + +-(BOOL)isLocalUser{ + return local; +} + +-(void)setLocal:(BOOL) value{ + local = value; +} +-(void)setDisplayName:(NSString *)value{ + displayName = value; +} +- (NSUInteger)hash +{ + return [personid hash]; +} + +- (BOOL)isEqual: (id)obj +{ + if( obj == nil){ + return NO; + } + if([obj isKindOfClass: [TrtcUserInfo class]]){ + TrtcUserInfo *user = (TrtcUserInfo*)obj; + return personid == nil ? [user personid] == nil : [personid isEqual: [user personid]]; + } else if([obj isKindOfClass: [NSString class]]){ + return personid == nil ? obj == nil : [personid isEqual: obj]; + } else { + return NO; + } +} +@end diff --git a/src/ios/UserUpdateListener.m b/src/ios/UserUpdateListener.m new file mode 100644 index 0000000..a5502d4 --- /dev/null +++ b/src/ios/UserUpdateListener.m @@ -0,0 +1,35 @@ +// +// Listener.m +// shuto-cne +// +// Created by 范大德 on 2022/3/29. +// + +#import +#import "Listener.h" +#import "TrtcUserInfo.m" +#import "VideoCallingViewController.h" +@interface UserUpdateListener : Listener +{} +@end +@implementation UserUpdateListener + +- (instancetype)init { + return self; +} + + +-(void)on: (NSDictionary*)extra{ + NSLog(@"TRTC - userinfo.update -- userID:%@,displayname:%@",extra[@"userId"],extra[@"displayName"]); + TrtcUserInfo *user = [[TrtcUserInfo alloc] initWithPersonid:extra[@"userId"]]; + NSInteger index = [[[VideoCallingViewController viewController] remoteUidSet] indexOfObject: user]; + NSLog(@"TRTC - userinfo.update -- userId:%@,index:%ld",extra[@"userId"],index); + if (index != NSNotFound) { return; } + TrtcUserInfo *obj = [[VideoCallingViewController viewController] remoteUidSet][index]; + [obj setDisplayName: extra[@"displayName"]]; + dispatch_async(dispatch_get_main_queue(), ^{ + [[VideoCallingViewController viewController] refreshRemoteVideoViews]; + }); +} + +@end diff --git a/src/ios/VideoCallingViewController.h b/src/ios/VideoCallingViewController.h new file mode 100755 index 0000000..2859204 --- /dev/null +++ b/src/ios/VideoCallingViewController.h @@ -0,0 +1,23 @@ +// +// VideoCallingViewController.h +// TRTC-API-Example-OC +// +// Created by bluedang on 2021/4/12. +// + +#import +//#import "TrtcOptions.h" + +#import + +NS_ASSUME_NONNULL_BEGIN + +//MARK: 视频通话示例 - 通话界面 +@interface VideoCallingViewController : UIViewController +- (instancetype)initWithRoomId:(UInt32)roomId userId:(NSString *)userId appId:(UInt32 *)appId userSig:(NSString *) userSig; +- (NSMutableOrderedSet *)remoteUidSet; ++(VideoCallingViewController*)viewController; +- (void)refreshRemoteVideoViews; +@end + +NS_ASSUME_NONNULL_END diff --git a/src/ios/VideoCallingViewController.m b/src/ios/VideoCallingViewController.m new file mode 100755 index 0000000..967036b --- /dev/null +++ b/src/ios/VideoCallingViewController.m @@ -0,0 +1,341 @@ +// +// VideoCallingViewController.m +// TRTC-API-Example-OC +// +// Created by bluedang on 2021/4/12. +// + +/* +实时视频通话功能 + TRTC APP 实时视频通话功能 + 本文件展示如何集成实时视频通话功能 + 1、切换摄像头 API:[[_trtcCloud getDeviceManager] switchCamera:_isFrontCamera]; + 2、打开关闭摄像头 API: [self.trtcCloud startLocalPreview:_isFrontCamera view:_localVideoView]; + [self.trtcCloud stopLocalPreview]; + 3、切换听筒与扬声器 API:[[_trtcCloud getDeviceManager] setAudioRoute:TXAudioRouteEarpiece]; + [[_trtcCloud getDeviceManager] setAudioRoute:TXAudioRouteSpeakerphone]; + 4、静音当前设备,其他人将无法听到该设备的声音 API: [_trtcCloud muteLocalAudio:YES]; + 参考文档:https://cloud.tencent.com/document/product/647/42044 + */ + +/* +Real-Time Audio Call + TRTC Audio Call + This document shows how to integrate the real-time audio call feature. + 1. Switch between the speaker and receiver: [[_trtcCloud getDeviceManager] setAudioRoute:TXAudioRouteSpeakerphone] + 2. Mute the device so that others won’t hear the audio of the device: [_trtcCloud muteLocalAudio:YES] + 3. Display other network and volume information: delegate -> onNetworkQuality, onUserVoiceVolume + Documentation: https://cloud.tencent.com/document/product/647/42046 +*/ + +#import "VideoCallingViewController.h" + +#import "TrtcUserInfo.m" +#import "CordovaEventKit.h" +#import "Toast/UIView+Toast.h" +#import "Events.h" +#import "UserUpdateListener.m" + + +static const NSInteger maxRemoteUserNum = 7; + +@interface VideoCallingViewController () + +@property (weak, nonatomic) IBOutlet UIButton *backButton; +@property (weak, nonatomic) IBOutlet UIButton *subVisibleButton; +@property (weak, nonatomic) IBOutlet UIButton *viewRotateButton; +@property (strong, nonatomic) IBOutletCollection(UIView) NSArray *remoteViewArr; +@property (strong, nonatomic) IBOutletCollection(UILabel) NSArray *displayLabelArr; +@property (weak, nonatomic) IBOutlet UILabel *displayLabel; + +@property (assign, nonatomic) UInt32 roomId; +@property (strong, nonatomic) NSString* userId; +@property(nonatomic, assign) UInt32 sdkAppId; +@property (strong, nonatomic) NSString* userSig; +@property (strong, nonatomic) TRTCCloud *trtcCloud; +@property (strong, nonatomic) NSMutableOrderedSet *remoteUidSet; +@property (strong, nonatomic) NSMutableOrderedSet *viewUserSet; +@property (strong, nonatomic) TrtcUserInfo *sharedUser; + +@property (assign, nonatomic) BOOL isFrontCamera; +@property (assign, nonatomic) NSInteger rotation; +@end + +@implementation VideoCallingViewController + + static VideoCallingViewController* _self; + ++(VideoCallingViewController*)viewController{ + return _self; +} +- (TRTCCloud*)trtcCloud { + if (!_trtcCloud) { + _trtcCloud = [TRTCCloud sharedInstance]; + } + return _trtcCloud; +} + +- (NSMutableOrderedSet *)remoteUidSet { + if (!_remoteUidSet) { + _remoteUidSet = [[NSMutableOrderedSet alloc] initWithCapacity:maxRemoteUserNum]; + } + return _remoteUidSet; +} + +- (NSMutableOrderedSet *)viewUserSet { + if (!_viewUserSet) { + _viewUserSet = [[NSMutableOrderedSet alloc] initWithCapacity:maxRemoteUserNum]; + } + return _viewUserSet; +} + +- (instancetype)initWithRoomId:(UInt32)roomId userId:(NSString *)userId appId:(UInt32)appId userSig:(NSString *)userSig{ + NSLog(@"TRTC - initWithRoomId:::::"); + self = [super initWithNibName:NSStringFromClass([self class]) bundle:nil]; + if (self) { + _roomId = roomId; + _userId = userId; + _sdkAppId = appId; + _userSig = userSig; + } + NSLog(@"TRTC - roomid:%d,userID:%@,sdkAppid:%d,userSig:%@",_roomId,_userId,_sdkAppId,_userSig); + _self = self; + return self; +} + +- (void)viewDidLoad { + [super viewDidLoad]; + + self.isFrontCamera = NO; + self.trtcCloud.delegate = self; + + [self setupDefaultUIConfig]; + [self setupTRTCCloud]; + + [self.view sendSubviewToBack:self.view]; +} + +- (void)setupDefaultUIConfig { + _displayLabel.text = @"我"; + _displayLabel.hidden = NO; + _rotation = 0; + [Events addListener:@"userinfo.update" listener: [UserUpdateListener init] ]; +} + + +- (void)setupTRTCCloud { + [[self remoteUidSet] removeAllObjects]; + + [self.trtcCloud startLocalPreview:_isFrontCamera view:self.view]; + TRTCParams *params = [TRTCParams new]; + params.sdkAppId = _sdkAppId; + params.roomId = _roomId; + params.userId = _userId; + params.role = TRTCRoleAnchor; + params.userSig = _userSig; + + [self.trtcCloud enterRoom:params appScene:TRTCAppSceneVideoCall]; + + TRTCVideoEncParam *encParams = [TRTCVideoEncParam new]; + encParams.videoResolution = TRTCVideoResolution_640_360; + encParams.videoBitrate = 550; + encParams.videoFps = 15; + + [self.trtcCloud setVideoEncoderParam:encParams]; + [self.trtcCloud startLocalAudio:TRTCAudioQualityMusic]; + +} + +- (void)dealloc { + [self.trtcCloud exitRoom]; + [TRTCCloud destroySharedIntance]; +} + +#pragma mark - IBActions + +- (IBAction)onSubVisibleChange:(UIButton*)sender { + NSLog(@"TRTC - onSubVisibleChange:::::"); + sender.selected = !sender.selected; + [self refreshRemoteVideoViews]; +} + +- (IBAction)onVideoRotate:(UIButton*)sender { + NSLog(@"TRTC - onRouteChange:::::"); + TRTCRenderParams *params = [[TRTCRenderParams alloc] init]; + + params.fillMode = TRTCVideoFillMode_Fit; + _rotation += 90; + _rotation = _rotation > 270 ? 0 : _rotation; + params.rotation = _rotation; + [_trtcCloud setRemoteRenderParams:[[self remoteUidSet][0] personid] streamType:TRTCVideoStreamTypeBig params:params]; + +} + +- (IBAction)onBackClick:(UIButton*)sender { + NSLog(@"TRTC - onBackClick:::::"); + [self.trtcCloud exitRoom]; + [TRTCCloud destroySharedIntance]; + [self dismissViewControllerAnimated:YES completion:nil]; +} + +- (IBAction)onSwitchCameraClick:(UIButton*)sender { + NSLog(@"TRTC - onSwitchCameraClick:::::"); + _isFrontCamera = !_isFrontCamera; + [[_trtcCloud getDeviceManager] switchCamera:_isFrontCamera]; +} + + +- (IBAction)onMicCaptureClick:(UIButton*)sender { + NSLog(@"TRTC - onMicCaptureClick:::::"); + sender.selected = !sender.selected; + if ([sender isSelected]) { + [_trtcCloud muteLocalAudio:YES]; + } else { + [_trtcCloud muteLocalAudio:NO]; + } +} + +- (IBAction)onSwitchSpeakerClick:(UIButton*)sender { + NSLog(@"TRTC - onSwitchSpeakerClick:::::"); + sender.selected = !sender.selected; + if ([sender isSelected]) { + [[_trtcCloud getDeviceManager] setAudioRoute:TXAudioRouteEarpiece]; + } else { + [[_trtcCloud getDeviceManager] setAudioRoute:TXAudioRouteSpeakerphone]; + } +} + +#pragma mark - TRTCCloud Delegate + +- (void)onEnterRoom:(NSInteger) result{ + NSLog(@"TRTC - onEnterRoom, %ld",result); + [_trtcCloud startLocalPreview:_isFrontCamera view:self.view]; + // 添加本地用户到用户列表 + TrtcUserInfo *info = [[TrtcUserInfo alloc] initWithPersonid:_userId]; + [info setLocal:YES]; + NSInteger index = [[self remoteUidSet] indexOfObject:info]; + if (index != NSNotFound) { + return; + } else { + [[self remoteUidSet] addObject:info]; + } + + NSDictionary *event = [[NSDictionary alloc]initWithObjectsAndKeys: + [[NSString alloc] initWithFormat:@"%d",_roomId],@"room",_userId,@"userId", nil]; + + [CordovaEventKit fireEvent:@"onLayoutChangeMessage" obj:event]; + + +} + +- (void)onUserVideoAvailable:(NSString *)userId available:(BOOL)available { + NSLog(@"TRTC - onUserVideoAvailable, user:%@,available%@",userId, available ? @"YES":@"NO"); + // 添加远端用户到用户列表 + TrtcUserInfo *info = [[TrtcUserInfo alloc] initWithPersonid:userId]; + NSInteger index = [[self remoteUidSet] indexOfObject:info]; + if (available) { + if (index != NSNotFound) { return; } + if([info isShareUser]){ + _sharedUser = info; + [[self remoteUidSet] setObject:info atIndex:0]; + [_trtcCloud stopLocalPreview]; + } else{ + [[self remoteUidSet] addObject:info]; + } + } else { + if (index != NSNotFound) { return; } + if([info isShareUser]){ + _sharedUser = nil; + [_trtcCloud stopLocalPreview]; + } + [_trtcCloud stopRemoteView:userId streamType:TRTCVideoStreamTypeSmall]; + [[self remoteUidSet] removeObject:userId]; + } + [self refreshRemoteVideoViews]; + NSDictionary *event = [[NSDictionary alloc]initWithObjectsAndKeys: + [[NSString alloc] initWithFormat:@"%d",_roomId],@"room",userId,@"userId",available,@"available", nil]; + [CordovaEventKit fireEvent:@"onUserVideoAvailable" obj:event]; +} + +- (void)refreshRemoteVideoViews { + NSLog(@"TRTC - refreshRemoteVideoViews"); +// NSInteger index = 0; +// for (NSString* userId in [self remoteUidSet]) {0 +// if (index >= maxRemoteUserNum) { return; } +// [_remoteViewArr[index] setHidden:NO]; +// [_trtcCloud startRemoteView:userId streamType:TRTCVideoStreamTypeSmall +// view:_remoteViewArr[index++]]; +// } + + _subVisibleButton.hidden = _sharedUser != nil; + _viewRotateButton.hidden = _sharedUser == nil; + for (int i = 0; i <= _remoteViewArr.count; i++) { + TrtcUserInfo* nUser = [self remoteUidSet].count > i ? [self remoteUidSet][i] : nil; + TrtcUserInfo* oUser = [self viewUserSet].count > i ? [self viewUserSet][i] : nil; + UIView* view = i == 0 ? self.view : _remoteViewArr[i-1]; + UILabel* label = i ==0 ? _displayLabel : _displayLabelArr[i-1]; + if((_sharedUser != nil || [_subVisibleButton isSelected]) && i!=0){ + [view setHidden:YES]; + [label setHidden:YES]; + }else{ + if(nUser != nil){ + if([nUser isEqual:oUser]){ + if([nUser displayName] == nil || [nUser displayName].length == 0) { + [label setText:[NSString stringWithFormat:@"%@%@",[nUser personid], [nUser isShareUser] ? @"的屏幕分享" : @""]]; + } else { + [label setText:[NSString stringWithFormat:@"%@%@",[nUser displayName], [nUser isShareUser] ? @"的屏幕分享" : @""]]; + } + return; + } + TRTCRenderParams *params = [TRTCRenderParams init]; + params.rotation = 0; + params.fillMode = [nUser isShareUser] ? TRTCVideoFillMode_Fit : TRTCVideoFillMode_Fill; + if([nUser isLocalUser]){ + [_trtcCloud startLocalPreview:_isFrontCamera view:view]; + [_trtcCloud setLocalRenderParams:params]; + + [label setText:@"我"]; + } else { + [_trtcCloud setRemoteRenderParams:[nUser personid] streamType:i == 0 ? TRTCVideoStreamTypeBig : TRTCVideoStreamTypeSmall params:params]; + [_trtcCloud startRemoteView:[nUser personid] streamType:i == 0 ? TRTCVideoStreamTypeBig : TRTCVideoStreamTypeSmall view:view]; + + if([nUser displayName] == nil || [nUser displayName].length == 0) { + [label setText:[NSString stringWithFormat:@"%@%@",[nUser personid], [nUser isShareUser] ? @"的屏幕分享" : @""]]; + } else { + [label setText:[NSString stringWithFormat:@"%@%@",[nUser displayName], [nUser isShareUser] ? @"的屏幕分享" : @""]]; + } + } + [label setHidden:NO]; + [view setHidden:NO]; + } else { + [view setHidden:YES]; + [label setHidden:YES]; + } + } + if( [self viewUserSet].count > i ){ + [[self viewUserSet] setObject:nUser atIndex:i]; + } + + } +} + +- (void) onError:(TXLiteAVError)errCode errMsg:(NSString *)errMsg extInfo:(NSDictionary *)extInfo{ + NSLog(@"TRTC - onError::sdk callback onError code: %d,msg: %@,extInfo: %@",errCode,errMsg,extInfo); + dispatch_async(dispatch_get_main_queue(), ^{ + [self.view makeToast: [[NSString alloc] initWithFormat:@"%@[%d]",errMsg,errCode]]; + }); +// [self.trtcCloud exitRoom]; +// [TRTCCloud destroySharedIntance]; +// [self dismissViewControllerAnimated:YES completion:nil]; +} + +- (void) onExitRoom:(NSInteger)reason{ + if (reason == 2) { + dispatch_async(dispatch_get_main_queue(), ^{ + [self.view makeToast: @"远程协助已结束"]; + }); + } + [self dismissViewControllerAnimated:YES completion:nil]; +} + +@end diff --git a/src/ios/VideoCallingViewController.xib b/src/ios/VideoCallingViewController.xib new file mode 100755 index 0000000..02ff771 --- /dev/null +++ b/src/ios/VideoCallingViewController.xib @@ -0,0 +1,199 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/ios/libs/.gitkeep b/src/ios/libs/.gitkeep deleted file mode 100644 index f2b04b1..0000000 --- a/src/ios/libs/.gitkeep +++ /dev/null @@ -1,4 +0,0 @@ -# AVSDK -# BeautySDK -# ILiveSDK -# IMSDK