1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_PROGRAM_CACHE_H_ 17 #define TENSORFLOW_LITE_DELEGATES_GPU_CL_PROGRAM_CACHE_H_ 18 19 #include <cstdint> 20 #include <string> 21 #include <vector> 22 23 #include "absl/container/flat_hash_map.h" 24 #include "absl/types/span.h" 25 #include "tensorflow/lite/delegates/gpu/cl/cl_context.h" 26 #include "tensorflow/lite/delegates/gpu/cl/cl_device.h" 27 #include "tensorflow/lite/delegates/gpu/cl/cl_kernel.h" 28 #include "tensorflow/lite/delegates/gpu/cl/cl_program.h" 29 #include "tensorflow/lite/delegates/gpu/common/status.h" 30 31 namespace tflite { 32 namespace gpu { 33 namespace cl { 34 35 class ProgramCache { 36 public: 37 ProgramCache() = default; 38 39 ProgramCache(ProgramCache&& program_cache); 40 ProgramCache& operator=(ProgramCache&& program_cache); 41 ProgramCache(const ProgramCache&) = delete; 42 ProgramCache& operator=(const ProgramCache&) = delete; 43 44 absl::Status GetOrCreateCLKernel( 45 const std::string& code, const std::string& function_name, 46 const std::vector<CompilerOptions>& compiler_options, 47 const CLContext& context, const CLDevice& device, CLKernel* result, 48 uint64_t* kernel_fingerprint = nullptr); 49 50 absl::Status GetOrCreateCLKernel(const std::string& code, 51 const std::string& function_name, 52 const CLContext& context, 53 const CLDevice& device, CLKernel* result, 54 uint64_t* kernel_fingerprint = nullptr); 55 56 absl::Status GetKernel(uint64_t fingerprint, const std::string& function_name, 57 CLKernel* result) const; 58 59 absl::Status AddProgramBinary(const CLContext& context, 60 const CLDevice& device, uint64_t fingerprint, 61 absl::Span<const uint8_t> binary); 62 absl::Status GetProgramBinary(uint64_t fingerprint, 63 std::vector<uint8_t>* program_binary) const; 64 65 absl::Status AddSerializedCache(const CLContext& context, 66 const CLDevice& device, 67 absl::Span<const uint8_t> serialized_cache); 68 absl::Status GetSerializedCache(const CLDevice& device, 69 std::vector<uint8_t>* serialized_cache) const; 70 71 private: 72 struct ProgramDescriptor { 73 ProgramDescriptor() = default; 74 ProgramDescriptor(const std::string& code, 75 const std::string& compiler_options); 76 explicit ProgramDescriptor(uint64_t fingerprint); 77 78 uint64_t fingerprint; 79 }; 80 struct ProgramDescriptorHasher { operatorProgramDescriptorHasher81 std::size_t operator()(const ProgramDescriptor& k) const { 82 return std::hash<uint64_t>()(k.fingerprint); 83 } 84 }; 85 struct ProgramDescriptorEqual { operatorProgramDescriptorEqual86 bool operator()(const ProgramDescriptor& a, 87 const ProgramDescriptor& b) const { 88 return a.fingerprint == b.fingerprint; 89 } 90 }; 91 92 absl::flat_hash_map<ProgramDescriptor, CLProgram, ProgramDescriptorHasher, 93 ProgramDescriptorEqual> 94 programs_; 95 }; 96 97 } // namespace cl 98 } // namespace gpu 99 } // namespace tflite 100 101 #endif // TENSORFLOW_LITE_DELEGATES_GPU_CL_PROGRAM_CACHE_H_ 102