/aosp_15_r20/external/armnn/python/pyarmnn/examples/tests/ |
H A D | test_mfcc.py | 41 def audio_data(test_data_folder, file, audio_cap_params): function 49 def test_audio_file(audio_data, test_data_folder, file, audio_cap_params): argument 114 def test_mfcc_compute_first_frame(audio_data, mfcc_test_params, test_out, file, audio_cap_params): argument 175 def test_feat_extraction_full_sized_input(audio_data, argument
|
/aosp_15_r20/external/armnn/python/pyarmnn/examples/common/ |
H A D | mfcc.py | 78 def spectrum_calc(self, audio_data): argument 86 def mfcc_compute(self, audio_data): argument 201 def _get_features(self, features, mfcc_instance, audio_data): argument 214 def extract_features(self, audio_data): argument
|
H A D | utils.py | 45 def prepare_input_data(audio_data, input_data_type, input_quant_scale, input_quant_offset, mfcc_pre… argument
|
/aosp_15_r20/external/webrtc/audio/ |
H A D | audio_state_unittest.cc | 146 std::vector<int16_t> audio_data(samples_per_channel * num_channels, 0); in Create10msTestData() local 223 auto audio_data = Create10msTestData(kSampleRate, kNumChannels); in TEST_P() local 282 auto audio_data = Create10msTestData(kSampleRate, kNumChannels); in TEST_P() local 322 auto audio_data = Create10msTestData(kSampleRate, kNumChannels); in TEST_P() local
|
H A D | audio_transport_impl.cc | 109 const void* audio_data, in RecordedDataIsAvailable() 128 const void* audio_data, in RecordedDataIsAvailable() 244 void* audio_data, in PullRenderData()
|
/aosp_15_r20/packages/modules/Bluetooth/floss/pandora/floss/ |
D | audio_utils.py | 147 def generate_playback_file(audio_data): argument 171 def generate_playback_file_from_binary_data(audio_data): argument
|
/aosp_15_r20/external/webrtc/api/ |
H A D | media_stream_interface.h | 201 virtual void OnData(const void* audio_data, in OnData() 213 virtual void OnData(const void* audio_data, in OnData()
|
/aosp_15_r20/external/webrtc/sdk/android/src/jni/audio_device/ |
H A D | aaudio_wrapper.cc | 103 void* audio_data, in DataCallback() 270 void AAudioWrapper::ClearInputStream(void* audio_data, int32_t num_frames) { in ClearInputStream()
|
H A D | aaudio_recorder.cc | 160 void* audio_data, in OnDataCallback()
|
H A D | aaudio_player.cc | 167 aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data, in OnDataCallback()
|
/aosp_15_r20/external/webrtc/modules/audio_device/android/ |
H A D | aaudio_wrapper.cc | 102 void* audio_data, in DataCallback() 270 void AAudioWrapper::ClearInputStream(void* audio_data, int32_t num_frames) { in ClearInputStream()
|
H A D | aaudio_player.cc | 151 aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data, in OnDataCallback()
|
H A D | aaudio_recorder.cc | 150 void* audio_data, in OnDataCallback()
|
/aosp_15_r20/external/webrtc/modules/audio_processing/agc2/ |
H A D | clipping_predictor_unittest.cc | 62 std::vector<float> audio_data(num_channels * kSamplesPerChannel, 0.0f); in AnalyzeNonZeroCrestFactorAudio() local 121 std::vector<float> audio_data(num_channels * kSamplesPerChannel, 0.f); in AnalyzeZeroCrestFactorAudio() local
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/experimental/microfrontend/lib/ |
H A D | frontend_memmap_main.c | 32 int16_t* audio_data = malloc(audio_file_size * sizeof(int16_t)); in main() local
|
H A D | frontend_main.c | 42 int16_t* audio_data = malloc(audio_file_size * sizeof(int16_t)); in main() local
|
/aosp_15_r20/external/armnn/python/pyarmnn/examples/keyword_spotting/ |
H A D | run_audio_classification.py | 70 def recognise_speech(audio_data, network, preprocessor, threshold): argument
|
/aosp_15_r20/external/autotest/server/cros/bluetooth/ |
H A D | bluetooth_device.py | 1129 def start_capturing_audio_subprocess(self, audio_data, recording_device): argument 1152 def start_playing_audio_subprocess(self, audio_data, pin_device=None): argument 1175 def play_audio(self, audio_data): argument
|
/aosp_15_r20/cts/apps/CtsVerifier/jni/megaaudio/recorder/ |
H A D | NativeAudioSink.cpp | 50 jfloatArray audio_data, jint num_frames, jint num_chans) { in Java_org_hyphonate_megaaudio_recorder_NativeAudioSink_pushN()
|
/aosp_15_r20/external/armnn/python/pyarmnn/examples/speech_recognition/ |
H A D | wav2letter_mfcc.py | 20 def spectrum_calc(self, audio_data): argument
|
/aosp_15_r20/cts/apps/CtsVerifier/jni/megaaudio/player/ |
H A D | NativeAudioSource.cpp | 64 jfloatArray audio_data, jint num_frames, jint num_chans) { in Java_org_hyphonate_megaaudio_player_NativeAudioSource_pullN()
|
/aosp_15_r20/external/googleapis/google/assistant/embedded/v1alpha1/ |
H A D | embedded_assistant.proto | 172 bytes audio_data = 1; field
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/experimental/microfrontend/ops/ |
H A D | audio_microfrontend_op.cc | 206 auto audio_data = in Compute() local
|
/aosp_15_r20/external/webrtc/modules/audio_device/win/ |
H A D | core_audio_output_win.cc | 318 uint8_t* audio_data; in OnDataCallback() local
|
H A D | core_audio_input_win.cc | 297 uint8_t* audio_data; in OnDataCallback() local
|