1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_INFEED_THUNK_H_ 17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_INFEED_THUNK_H_ 18 19 #include "tensorflow/compiler/xla/service/buffer_assignment.h" 20 #include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h" 21 #include "tensorflow/compiler/xla/service/gpu/thunk.h" 22 #include "tensorflow/compiler/xla/service/hlo_instruction.h" 23 #include "tensorflow/core/platform/stream_executor_no_cuda.h" 24 25 namespace xla { 26 namespace gpu { 27 28 // A thunk that infeeds data. Data must be already resident on the 29 // device. This thunk performs an intra-device copy from that location 30 // to the buffer allocated for the infeed op. 31 class InfeedThunk : public Thunk { 32 public: 33 // Constructs a InfeedThunk that copies data from the on-device 34 // infeed queue into the buffers in the given shape tree. 35 InfeedThunk(ThunkInfo thunk_info, std::vector<ShapedSlice> dest_slices); 36 37 InfeedThunk(const InfeedThunk&) = delete; 38 InfeedThunk& operator=(const InfeedThunk&) = delete; 39 40 Status ExecuteOnStream(const ExecuteParams& params) override; 41 42 private: 43 const std::vector<ShapedSlice> dest_slices_; 44 }; 45 46 } // namespace gpu 47 } // namespace xla 48 49 #endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_INFEED_THUNK_H_ 50