1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/compiler/xla/cpu_function_runtime.h"
17 #include "tensorflow/core/framework/allocator.h"
18 #include "tensorflow/core/platform/test.h"
19
20 namespace tensorflow {
21 namespace {
22
23 using ::xla::cpu_function_runtime::BufferInfo;
24
TEST(XlaCompiledCpuFunctionTest,AlignmentValue)25 TEST(XlaCompiledCpuFunctionTest, AlignmentValue) {
26 // We've chosen 64 byte alignment for the tfcompile runtime to mimic the
27 // regular tensorflow allocator, which was chosen to play nicely with Eigen.
28 // The tfcompile runtime also has a requirement that comes from the xla
29 // generated code, on the relation: buffer_size >= 16 ? 2 * sizeof(void*) : 8
30 // So any value that we choose must abide by that constraint as well.
31 EXPECT_EQ(xla::cpu_function_runtime::Align(), Allocator::kAllocatorAlignment);
32 EXPECT_LE(xla::cpu_function_runtime::MinAlign(),
33 Allocator::kAllocatorAlignment);
34 }
35
SizesToBufferInfos(const intptr_t * sizes,size_t n)36 std::vector<BufferInfo> SizesToBufferInfos(const intptr_t* sizes, size_t n) {
37 std::vector<BufferInfo> buffer_infos;
38 std::transform(sizes, sizes + n, std::back_inserter(buffer_infos),
39 [&](intptr_t size) {
40 if (size == -1) {
41 // Use a dummy on-stack buffer allocation to indicat the
42 // the current slot does not need an allocation.
43 int64_t on_stack_buffer_size = 4;
44 return BufferInfo::MakeOnStackBuffer(on_stack_buffer_size);
45 }
46 return BufferInfo::MakeTempBuffer(size);
47 });
48 return buffer_infos;
49 }
50
51 // Simple wrappers to make writing tests more ergonomic.
52
AlignedBufferBytesFromSizes(const intptr_t * sizes,size_t n)53 size_t AlignedBufferBytesFromSizes(const intptr_t* sizes, size_t n) {
54 std::vector<BufferInfo> buffer_infos = SizesToBufferInfos(sizes, n);
55 return AlignedBufferBytes(buffer_infos.data(), n,
56 /*allocate_entry_params=*/false);
57 }
58
MallocContiguousBuffersFromSizes(const intptr_t * sizes,size_t n,void ** bufs,bool annotate_initialized)59 void* MallocContiguousBuffersFromSizes(const intptr_t* sizes, size_t n,
60 void** bufs, bool annotate_initialized) {
61 std::vector<BufferInfo> buffer_infos = SizesToBufferInfos(sizes, n);
62 return MallocContiguousBuffers(buffer_infos.data(), n,
63 /*allocate_entry_params=*/false, bufs,
64 annotate_initialized);
65 }
66
TEST(XlaCompiledCpuFunctionTest,AlignedBufferBytes)67 TEST(XlaCompiledCpuFunctionTest, AlignedBufferBytes) {
68 EXPECT_EQ(AlignedBufferBytesFromSizes(nullptr, 0), 0);
69
70 static constexpr intptr_t sizesA[1] = {-1};
71 EXPECT_EQ(AlignedBufferBytesFromSizes(sizesA, 1), 0);
72
73 static constexpr intptr_t sizesB[1] = {3};
74 EXPECT_EQ(AlignedBufferBytesFromSizes(sizesB, 1), 64);
75
76 static constexpr intptr_t sizesC[1] = {32};
77 EXPECT_EQ(AlignedBufferBytesFromSizes(sizesC, 1), 64);
78
79 static constexpr intptr_t sizesD[7] = {1, -1, 32, -1, 64, 2, 3};
80 EXPECT_EQ(AlignedBufferBytesFromSizes(sizesD, 7), 320);
81 }
82
add_ptr(void * base,uintptr_t delta)83 void* add_ptr(void* base, uintptr_t delta) {
84 return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(base) + delta);
85 }
86
87 // To test MallocContiguousBuffers and FreeContiguous, we just check for
88 // expected nullptrs, and write to each byte of allocated memory. We rely on
89 // the leak checker to tell us if there's an inconsistency between malloc and
90 // free. We also check the contiguous property.
TEST(XlaCompiledCpuFunctionTest,MallocFreeContiguousBuffers)91 TEST(XlaCompiledCpuFunctionTest, MallocFreeContiguousBuffers) {
92 // Test empty sizes.
93 void* base = MallocContiguousBuffersFromSizes(nullptr, 0, nullptr, false);
94 EXPECT_EQ(base, nullptr);
95 xla::cpu_function_runtime::FreeContiguous(base);
96
97 // Test non-empty sizes with 0 sum.
98 static constexpr intptr_t sizesA[1] = {-1};
99 void* bufA[1];
100 base = MallocContiguousBuffersFromSizes(sizesA, 1, bufA, false);
101 EXPECT_EQ(base, nullptr);
102 EXPECT_EQ(bufA[0], nullptr);
103 xla::cpu_function_runtime::FreeContiguous(base);
104
105 // Test non-empty sizes with non-0 sum.
106 static constexpr intptr_t sizesB[1] = {3};
107 void* bufB[1];
108 base = MallocContiguousBuffersFromSizes(sizesB, 1, bufB, false);
109 EXPECT_NE(base, nullptr);
110 EXPECT_EQ(bufB[0], add_ptr(base, 0));
111 char* bufB0_bytes = static_cast<char*>(bufB[0]);
112 bufB0_bytes[0] = 'A';
113 bufB0_bytes[1] = 'B';
114 bufB0_bytes[2] = 'C';
115 xla::cpu_function_runtime::FreeContiguous(base);
116
117 // Test non-empty sizes with non-0 sum, and annotate_initialized.
118 static constexpr intptr_t sizesC[1] = {3};
119 void* bufC[1];
120 base = MallocContiguousBuffersFromSizes(sizesC, 1, bufC, true);
121 EXPECT_NE(base, nullptr);
122 EXPECT_EQ(bufC[0], add_ptr(base, 0));
123 char* bufC0_bytes = static_cast<char*>(bufC[0]);
124 bufC0_bytes[0] = 'A';
125 bufC0_bytes[1] = 'B';
126 bufC0_bytes[2] = 'C';
127 xla::cpu_function_runtime::FreeContiguous(base);
128
129 // Test mixed sizes.
130 static constexpr intptr_t sizesD[7] = {1, -1, 32, -1, 64, 2, 3};
131 void* bufD[7];
132 base = MallocContiguousBuffersFromSizes(sizesD, 7, bufD, false);
133 EXPECT_NE(base, nullptr);
134 EXPECT_EQ(bufD[0], add_ptr(base, 0));
135 EXPECT_EQ(bufD[1], nullptr);
136 EXPECT_EQ(bufD[2], add_ptr(base, 64));
137 EXPECT_EQ(bufD[3], nullptr);
138 EXPECT_EQ(bufD[4], add_ptr(base, 128));
139 EXPECT_EQ(bufD[5], add_ptr(base, 192));
140 EXPECT_EQ(bufD[6], add_ptr(base, 256));
141 for (int i = 0; i < 7; ++i) {
142 const intptr_t size = sizesD[i];
143 if (size != -1) {
144 char* bufD_bytes = static_cast<char*>(bufD[i]);
145 for (size_t j = 0; j < size; ++j) {
146 bufD_bytes[j] = 'A' + j;
147 }
148 }
149 }
150 xla::cpu_function_runtime::FreeContiguous(base);
151 }
152
CheckRoundTripIsOk(const BufferInfo & buffer_info)153 void CheckRoundTripIsOk(const BufferInfo& buffer_info) {
154 BufferInfo round_trip(buffer_info.Encode());
155 ASSERT_EQ(round_trip, buffer_info);
156 }
157
TEST(XlaCompiledCpuFunctionTest,BufferInfoTest)158 TEST(XlaCompiledCpuFunctionTest, BufferInfoTest) {
159 CheckRoundTripIsOk(BufferInfo::MakeTempBuffer(0));
160 CheckRoundTripIsOk(BufferInfo::MakeTempBuffer(4));
161 CheckRoundTripIsOk(BufferInfo::MakeOnStackBuffer(0));
162 CheckRoundTripIsOk(BufferInfo::MakeOnStackBuffer(4));
163 CheckRoundTripIsOk(BufferInfo::MakeConstant(0));
164 CheckRoundTripIsOk(BufferInfo::MakeConstant(4));
165 CheckRoundTripIsOk(
166 BufferInfo::MakeEntryParameter(/*size=*/0, /*param_number=*/4));
167 CheckRoundTripIsOk(
168 BufferInfo::MakeEntryParameter(/*size=*/4, /*param_number=*/0));
169 }
170
171 } // namespace
172 } // namespace tensorflow
173