xref: /aosp_15_r20/external/OpenCL-CTS/test_conformance/SVM/test_byte_granularity.cpp (revision 6467f958c7de8070b317fc65bcb0f6472e388d82)
1 //
2 // Copyright (c) 2017 The Khronos Group Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //    http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 #include "common.h"
17 
18 const char *byte_manipulation_kernels[] = {
19   // Each device will write it's id into the bytes that it "owns", ownership is based on round robin (global_id % num_id)
20   // num_id is equal to number of SVM devices in the system plus one (for the host code).
21   // id is the index (id) of the device that this kernel is executing on.
22   // For example, if there are 2 SVM devices and the host; the buffer should look like this after each device and the host write their id's:
23   // 0, 1, 2, 0, 1, 2, 0, 1, 2...
24   "__kernel void write_owned_locations(__global char* a, uint num_id, uint id)\n"
25   "{\n"
26   "    size_t i = get_global_id(0);\n"
27   "   int owner = i % num_id;\n"
28   "    if(id == owner) \n"
29   "       a[i] = id;\n"  // modify location if it belongs to this device, write id
30   "}\n"
31 
32   // Verify that a device can see the byte sized updates from the other devices, sum up the device id's and see if they match expected value.
33   // Note: this must be called with a reduced NDRange so that neighbor acesses don't go past end of buffer.
34   // For example if there are two SVM devices and the host (3 total devices) the buffer should look like this:
35   // 0,1,2,0,1,2...
36   // and the expected sum at each point is 0+1+2 = 3.
37   "__kernel void sum_neighbor_locations(__global char* a, uint num_devices, volatile __global uint* error_count)\n"
38   "{\n"
39   "    size_t i = get_global_id(0);\n"
40   "    uint expected_sum = (num_devices * (num_devices - 1))/2;\n"
41   "    uint sum = 0;\n"
42   "    for(uint j=0; j<num_devices; j++) {\n"
43   "        sum += a[i + j];\n" // add my neighbors to the right
44   "    }\n"
45   "    if(sum != expected_sum)\n"
46   "        atomic_inc(error_count);\n"
47   "}\n"
48 };
49 
50 
51 
test_svm_byte_granularity(cl_device_id deviceID,cl_context c,cl_command_queue queue,int num_elements)52 int test_svm_byte_granularity(cl_device_id deviceID, cl_context c, cl_command_queue queue, int num_elements)
53 {
54   clContextWrapper context;
55   clProgramWrapper program;
56   clKernelWrapper k1,k2;
57   clCommandQueueWrapper queues[MAXQ];
58 
59   cl_uint     num_devices = 0;
60   cl_int      err = CL_SUCCESS;
61 
62   err = create_cl_objects(deviceID, &byte_manipulation_kernels[0], &context, &program, &queues[0], &num_devices, CL_DEVICE_SVM_FINE_GRAIN_BUFFER);
63   if(err == 1) return 0; // no devices capable of requested SVM level, so don't execute but count test as passing.
64   if(err < 0) return -1; // fail test.
65 
66   cl_uint num_devices_plus_host = num_devices + 1;
67 
68   k1 = clCreateKernel(program, "write_owned_locations", &err);
69   test_error(err, "clCreateKernel failed");
70   k2 = clCreateKernel(program, "sum_neighbor_locations", &err);
71   test_error(err, "clCreateKernel failed");
72 
73 
74   cl_char *pA = (cl_char*) clSVMAlloc(context, CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER, sizeof(cl_char) * num_elements, 0);
75 
76   cl_uint **error_counts =  (cl_uint**) malloc(sizeof(void*) * num_devices);
77 
78   for(cl_uint i=0; i < num_devices; i++) {
79     error_counts[i] = (cl_uint*) clSVMAlloc(context, CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER, sizeof(cl_uint), 0);
80     *error_counts[i] = 0;
81   }
82   for(int i=0; i < num_elements; i++) pA[i] = -1;
83 
84   err |= clSetKernelArgSVMPointer(k1, 0, pA);
85   err |= clSetKernelArg(k1, 1, sizeof(cl_uint), &num_devices_plus_host);
86   test_error(err, "clSetKernelArg failed");
87 
88   // get all the devices going simultaneously
89   size_t element_num = num_elements;
90   for(cl_uint d=0; d < num_devices; d++)  // device ids starting at 1.
91   {
92     err = clSetKernelArg(k1, 2, sizeof(cl_uint), &d);
93     test_error(err, "clSetKernelArg failed");
94     err = clEnqueueNDRangeKernel(queues[d], k1, 1, NULL, &element_num, NULL, 0, NULL, NULL);
95     test_error(err,"clEnqueueNDRangeKernel failed");
96   }
97 
98   for(cl_uint d=0; d < num_devices; d++) clFlush(queues[d]);
99 
100   cl_uint host_id = num_devices;  // host code will take the id above the devices.
101   for(int i = (int)num_devices; i < num_elements; i+= num_devices_plus_host) pA[i] = host_id;
102 
103   for(cl_uint id = 0; id < num_devices; id++) clFinish(queues[id]);
104 
105   // now check that each device can see the byte writes made by the other devices.
106 
107   err |= clSetKernelArgSVMPointer(k2, 0, pA);
108   err |= clSetKernelArg(k2, 1, sizeof(cl_uint), &num_devices_plus_host);
109   test_error(err, "clSetKernelArg failed");
110 
111   // adjusted so k2 doesn't read past end of buffer
112   size_t adjusted_num_elements = num_elements - num_devices;
113   for(cl_uint id = 0; id < num_devices; id++)
114   {
115     err = clSetKernelArgSVMPointer(k2, 2, error_counts[id]);
116     test_error(err, "clSetKernelArg failed");
117 
118     err = clEnqueueNDRangeKernel(queues[id], k2, 1, NULL, &adjusted_num_elements, NULL, 0, NULL, NULL);
119     test_error(err,"clEnqueueNDRangeKernel failed");
120   }
121 
122   for(cl_uint id = 0; id < num_devices; id++) clFinish(queues[id]);
123 
124   bool failed = false;
125 
126   // see if any of the devices found errors
127   for(cl_uint i=0; i < num_devices; i++) {
128     if(*error_counts[i] > 0)
129       failed = true;
130   }
131   cl_uint expected = (num_devices_plus_host * (num_devices_plus_host - 1))/2;
132   // check that host can see the byte writes made by the devices.
133   for(cl_uint i = 0; i < num_elements - num_devices_plus_host; i++)
134   {
135     int sum = 0;
136     for(cl_uint j=0; j < num_devices_plus_host; j++) sum += pA[i+j];
137     if(sum != expected)
138       failed = true;
139   }
140 
141   clSVMFree(context, pA);
142   for(cl_uint i=0; i < num_devices; i++) clSVMFree(context, error_counts[i]);
143 
144   if(failed)
145     return -1;
146   return 0;
147 }
148