1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <arm_neon.h>
9
10 #include <xnnpack/pad.h>
11
12
xnn_xx_pad_ukernel__neon(size_t rows,size_t channels,size_t pre_padding,size_t post_padding,const void * input,size_t input_stride,void * output,size_t output_stride,uint32_t fill_pattern)13 void xnn_xx_pad_ukernel__neon(
14 size_t rows,
15 size_t channels,
16 size_t pre_padding,
17 size_t post_padding,
18 const void* input,
19 size_t input_stride,
20 void* output,
21 size_t output_stride,
22 uint32_t fill_pattern) XNN_OOB_READS
23 {
24 const size_t input_increment = input_stride - channels;
25 const size_t output_increment = output_stride - (pre_padding + channels + post_padding);
26
27 const uint8x16_t vfill_pattern = vreinterpretq_u8_u32(vdupq_n_u32(fill_pattern));
28 do {
29 // Pre-pad input channels.
30 size_t l = pre_padding;
31 if XNN_LIKELY(l != 0) {
32 for (; l >= 16 * sizeof(uint8_t); l -= 16 * sizeof(uint8_t)) {
33 vst1q_u8(output, vfill_pattern); output = (uint8_t*) output + 16;
34 }
35 if (l & (8 * sizeof(uint8_t))) {
36 vst1_u8(output, vget_low_u8(vfill_pattern)); output = (uint8_t*) output + 8;
37 }
38 if (l & (4 * sizeof(uint8_t))) {
39 vst1q_lane_u32(output, vreinterpretq_u32_u8(vfill_pattern), 0); output = (uint8_t*) output + 4;
40 }
41 uint8x8_t vfill_subpattern = vget_low_u8(vfill_pattern);
42 if (l & (2 * sizeof(uint8_t))) {
43 vst1_lane_u16(output, vreinterpret_u16_u8(vfill_subpattern), 0); output = (uint8_t*) output + 2;
44 vfill_subpattern = vext_u8(vfill_subpattern, vfill_subpattern, 2);
45 }
46 if (l & (1 * sizeof(uint8_t))) {
47 vst1_lane_u8(output, vfill_subpattern, 0); output = (uint8_t*) output + 1;
48 }
49 }
50
51 // Copy input channels.
52 size_t c = channels;
53 for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) {
54 const uint8x16_t vdata = vld1q_u8(input); input = (const uint8_t*) input + 16;
55 vst1q_u8(output, vdata); output = (uint8_t*) output + 16;
56 }
57 if XNN_UNLIKELY(c != 0) {
58 uint8x16_t vdata = vld1q_u8(input); input = (const void*) ((uintptr_t) input + c);
59
60 uint8x8_t vsubdata = vget_low_u8(vdata);
61 if (c & (8 * sizeof(uint8_t))) {
62 vst1_u8(output, vsubdata); output = (uint8_t*) output + 8;
63 vsubdata = vget_high_u8(vdata);
64 }
65 if (c & (4 * sizeof(uint8_t))) {
66 vst1_lane_u32(output, vreinterpret_u32_u8(vsubdata), 0); output = (uint8_t*) output + 4;
67 vsubdata = vext_u8(vsubdata, vsubdata, 4);
68 }
69 if (c & (2 * sizeof(uint8_t))) {
70 vst1_lane_u16(output, vreinterpret_u16_u8(vsubdata), 0); output = (uint8_t*) output + 2;
71 vsubdata = vext_u8(vsubdata, vsubdata, 2);
72 }
73 if (c & (1 * sizeof(uint8_t))) {
74 vst1_lane_u8(output, vsubdata, 0); output = (uint8_t*) output + 1;
75 }
76 }
77
78 // Post-pad input channels.
79 size_t r = post_padding;
80 if XNN_LIKELY(r != 0) {
81 for (; r >= 16 * sizeof(uint8_t); r -= 16 * sizeof(uint8_t)) {
82 vst1q_u8(output, vfill_pattern); output = (uint8_t*) output + 16;
83 }
84 if (r & (8 * sizeof(uint8_t))) {
85 vst1_u8(output, vget_low_u8(vfill_pattern)); output = (uint8_t*) output + 8;
86 }
87 if (r & (4 * sizeof(uint8_t))) {
88 vst1q_lane_u32(output, vreinterpretq_u32_u8(vfill_pattern), 0); output = (uint8_t*) output + 4;
89 }
90 uint8x8_t vfill_subpattern = vget_low_u8(vfill_pattern);
91 if (r & (2 * sizeof(uint8_t))) {
92 vst1_lane_u16(output, vreinterpret_u16_u8(vfill_subpattern), 0); output = (uint8_t*) output + 2;
93 vfill_subpattern = vext_u8(vfill_subpattern, vfill_subpattern, 2);
94 }
95 if (r & (1 * sizeof(uint8_t))) {
96 vst1_lane_u8(output, vfill_subpattern, 0); output = (uint8_t*) output + 1;
97 }
98 }
99
100 input = (const uint32_t*) ((uintptr_t) input + input_increment);
101 output = (uint32_t*) ((uintptr_t) output + output_increment);
102 } while (--rows != 0);
103 }
104