xref: /aosp_15_r20/external/cronet/third_party/boringssl/src/gen/bcm/vpaes-armv7-linux.S (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1// This file is generated from a similarly-named Perl script in the BoringSSL
2// source tree. Do not edit by hand.
3
4#include <openssl/asm_base.h>
5
6#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
7.syntax	unified
8
9.arch	armv7-a
10.fpu	neon
11
12#if defined(__thumb2__)
13.thumb
14#else
15.code	32
16#endif
17
18.text
19
20.type	_vpaes_consts,%object
21.align	7	@ totally strategic alignment
22_vpaes_consts:
23.Lk_mc_forward:@ mc_forward
24.quad	0x0407060500030201, 0x0C0F0E0D080B0A09
25.quad	0x080B0A0904070605, 0x000302010C0F0E0D
26.quad	0x0C0F0E0D080B0A09, 0x0407060500030201
27.quad	0x000302010C0F0E0D, 0x080B0A0904070605
28.Lk_mc_backward:@ mc_backward
29.quad	0x0605040702010003, 0x0E0D0C0F0A09080B
30.quad	0x020100030E0D0C0F, 0x0A09080B06050407
31.quad	0x0E0D0C0F0A09080B, 0x0605040702010003
32.quad	0x0A09080B06050407, 0x020100030E0D0C0F
33.Lk_sr:@ sr
34.quad	0x0706050403020100, 0x0F0E0D0C0B0A0908
35.quad	0x030E09040F0A0500, 0x0B06010C07020D08
36.quad	0x0F060D040B020900, 0x070E050C030A0108
37.quad	0x0B0E0104070A0D00, 0x0306090C0F020508
38
39@
40@ "Hot" constants
41@
42.Lk_inv:@ inv, inva
43.quad	0x0E05060F0D080180, 0x040703090A0B0C02
44.quad	0x01040A060F0B0780, 0x030D0E0C02050809
45.Lk_ipt:@ input transform (lo, hi)
46.quad	0xC2B2E8985A2A7000, 0xCABAE09052227808
47.quad	0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
48.Lk_sbo:@ sbou, sbot
49.quad	0xD0D26D176FBDC700, 0x15AABF7AC502A878
50.quad	0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
51.Lk_sb1:@ sb1u, sb1t
52.quad	0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
53.quad	0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
54.Lk_sb2:@ sb2u, sb2t
55.quad	0x69EB88400AE12900, 0xC2A163C8AB82234A
56.quad	0xE27A93C60B712400, 0x5EB7E955BC982FCD
57
58.byte	86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,55,32,78,69,79,78,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
59.align	2
60.size	_vpaes_consts,.-_vpaes_consts
61.align	6
62@@
63@@  _aes_preheat
64@@
65@@  Fills q9-q15 as specified below.
66@@
67.type	_vpaes_preheat,%function
68.align	4
69_vpaes_preheat:
70	adr	r10, .Lk_inv
71	vmov.i8	q9, #0x0f		@ .Lk_s0F
72	vld1.64	{q10,q11}, [r10]!	@ .Lk_inv
73	add	r10, r10, #64		@ Skip .Lk_ipt, .Lk_sbo
74	vld1.64	{q12,q13}, [r10]!	@ .Lk_sb1
75	vld1.64	{q14,q15}, [r10]	@ .Lk_sb2
76	bx	lr
77
78@@
79@@  _aes_encrypt_core
80@@
81@@  AES-encrypt q0.
82@@
83@@  Inputs:
84@@     q0 = input
85@@     q9-q15 as in _vpaes_preheat
86@@    [r2] = scheduled keys
87@@
88@@  Output in q0
89@@  Clobbers  q1-q5, r8-r11
90@@  Preserves q6-q8 so you get some local vectors
91@@
92@@
93.type	_vpaes_encrypt_core,%function
94.align	4
95_vpaes_encrypt_core:
96	mov	r9, r2
97	ldr	r8, [r2,#240]		@ pull rounds
98	adr	r11, .Lk_ipt
99	@ vmovdqa	.Lk_ipt(%rip),	%xmm2	# iptlo
100	@ vmovdqa	.Lk_ipt+16(%rip), %xmm3	# ipthi
101	vld1.64	{q2, q3}, [r11]
102	adr	r11, .Lk_mc_forward+16
103	vld1.64	{q5}, [r9]!		@ vmovdqu	(%r9),	%xmm5		# round0 key
104	vand	q1, q0, q9		@ vpand	%xmm9,	%xmm0,	%xmm1
105	vshr.u8	q0, q0, #4		@ vpsrlb	$4,	%xmm0,	%xmm0
106	vtbl.8	d2, {q2}, d2	@ vpshufb	%xmm1,	%xmm2,	%xmm1
107	vtbl.8	d3, {q2}, d3
108	vtbl.8	d4, {q3}, d0	@ vpshufb	%xmm0,	%xmm3,	%xmm2
109	vtbl.8	d5, {q3}, d1
110	veor	q0, q1, q5		@ vpxor	%xmm5,	%xmm1,	%xmm0
111	veor	q0, q0, q2		@ vpxor	%xmm2,	%xmm0,	%xmm0
112
113	@ .Lenc_entry ends with a bnz instruction which is normally paired with
114	@ subs in .Lenc_loop.
115	tst	r8, r8
116	b	.Lenc_entry
117
118.align	4
119.Lenc_loop:
120	@ middle of middle round
121	add	r10, r11, #0x40
122	vtbl.8	d8, {q13}, d4	@ vpshufb	%xmm2,	%xmm13,	%xmm4	# 4 = sb1u
123	vtbl.8	d9, {q13}, d5
124	vld1.64	{q1}, [r11]!		@ vmovdqa	-0x40(%r11,%r10), %xmm1	# .Lk_mc_forward[]
125	vtbl.8	d0, {q12}, d6	@ vpshufb	%xmm3,	%xmm12,	%xmm0	# 0 = sb1t
126	vtbl.8	d1, {q12}, d7
127	veor	q4, q4, q5		@ vpxor		%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
128	vtbl.8	d10, {q15}, d4	@ vpshufb	%xmm2,	%xmm15,	%xmm5	# 4 = sb2u
129	vtbl.8	d11, {q15}, d5
130	veor	q0, q0, q4		@ vpxor		%xmm4,	%xmm0,	%xmm0	# 0 = A
131	vtbl.8	d4, {q14}, d6	@ vpshufb	%xmm3,	%xmm14,	%xmm2	# 2 = sb2t
132	vtbl.8	d5, {q14}, d7
133	vld1.64	{q4}, [r10]		@ vmovdqa	(%r11,%r10), %xmm4	# .Lk_mc_backward[]
134	vtbl.8	d6, {q0}, d2	@ vpshufb	%xmm1,	%xmm0,	%xmm3	# 0 = B
135	vtbl.8	d7, {q0}, d3
136	veor	q2, q2, q5		@ vpxor		%xmm5,	%xmm2,	%xmm2	# 2 = 2A
137	@ Write to q5 instead of q0, so the table and destination registers do
138	@ not overlap.
139	vtbl.8	d10, {q0}, d8	@ vpshufb	%xmm4,	%xmm0,	%xmm0	# 3 = D
140	vtbl.8	d11, {q0}, d9
141	veor	q3, q3, q2		@ vpxor		%xmm2,	%xmm3,	%xmm3	# 0 = 2A+B
142	vtbl.8	d8, {q3}, d2	@ vpshufb	%xmm1,	%xmm3,	%xmm4	# 0 = 2B+C
143	vtbl.8	d9, {q3}, d3
144	@ Here we restore the original q0/q5 usage.
145	veor	q0, q5, q3		@ vpxor		%xmm3,	%xmm0,	%xmm0	# 3 = 2A+B+D
146	and	r11, r11, #~(1<<6)	@ and		$0x30,	%r11		# ... mod 4
147	veor	q0, q0, q4		@ vpxor		%xmm4,	%xmm0, %xmm0	# 0 = 2A+3B+C+D
148	subs	r8, r8, #1		@ nr--
149
150.Lenc_entry:
151	@ top of round
152	vand	q1, q0, q9		@ vpand		%xmm0,	%xmm9,	%xmm1   # 0 = k
153	vshr.u8	q0, q0, #4		@ vpsrlb	$4,	%xmm0,	%xmm0	# 1 = i
154	vtbl.8	d10, {q11}, d2	@ vpshufb	%xmm1,	%xmm11,	%xmm5	# 2 = a/k
155	vtbl.8	d11, {q11}, d3
156	veor	q1, q1, q0		@ vpxor		%xmm0,	%xmm1,	%xmm1	# 0 = j
157	vtbl.8	d6, {q10}, d0	@ vpshufb	%xmm0, 	%xmm10,	%xmm3  	# 3 = 1/i
158	vtbl.8	d7, {q10}, d1
159	vtbl.8	d8, {q10}, d2	@ vpshufb	%xmm1, 	%xmm10,	%xmm4  	# 4 = 1/j
160	vtbl.8	d9, {q10}, d3
161	veor	q3, q3, q5		@ vpxor		%xmm5,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
162	veor	q4, q4, q5		@ vpxor		%xmm5,	%xmm4,	%xmm4  	# 4 = jak = 1/j + a/k
163	vtbl.8	d4, {q10}, d6	@ vpshufb	%xmm3,	%xmm10,	%xmm2  	# 2 = 1/iak
164	vtbl.8	d5, {q10}, d7
165	vtbl.8	d6, {q10}, d8	@ vpshufb	%xmm4,	%xmm10,	%xmm3	# 3 = 1/jak
166	vtbl.8	d7, {q10}, d9
167	veor	q2, q2, q1		@ vpxor		%xmm1,	%xmm2,	%xmm2  	# 2 = io
168	veor	q3, q3, q0		@ vpxor		%xmm0,	%xmm3,	%xmm3	# 3 = jo
169	vld1.64	{q5}, [r9]!		@ vmovdqu	(%r9),	%xmm5
170	bne	.Lenc_loop
171
172	@ middle of last round
173	add	r10, r11, #0x80
174
175	adr	r11, .Lk_sbo
176	@ Read to q1 instead of q4, so the vtbl.8 instruction below does not
177	@ overlap table and destination registers.
178	vld1.64	{q1}, [r11]!		@ vmovdqa	-0x60(%r10), %xmm4	# 3 : sbou
179	vld1.64	{q0}, [r11]		@ vmovdqa	-0x50(%r10), %xmm0	# 0 : sbot	.Lk_sbo+16
180	vtbl.8	d8, {q1}, d4	@ vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
181	vtbl.8	d9, {q1}, d5
182	vld1.64	{q1}, [r10]		@ vmovdqa	0x40(%r11,%r10), %xmm1	# .Lk_sr[]
183	@ Write to q2 instead of q0 below, to avoid overlapping table and
184	@ destination registers.
185	vtbl.8	d4, {q0}, d6	@ vpshufb	%xmm3,	%xmm0,	%xmm0	# 0 = sb1t
186	vtbl.8	d5, {q0}, d7
187	veor	q4, q4, q5		@ vpxor	%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
188	veor	q2, q2, q4		@ vpxor	%xmm4,	%xmm0,	%xmm0	# 0 = A
189	@ Here we restore the original q0/q2 usage.
190	vtbl.8	d0, {q2}, d2	@ vpshufb	%xmm1,	%xmm0,	%xmm0
191	vtbl.8	d1, {q2}, d3
192	bx	lr
193.size	_vpaes_encrypt_core,.-_vpaes_encrypt_core
194
195.globl	vpaes_encrypt
196.hidden	vpaes_encrypt
197.type	vpaes_encrypt,%function
198.align	4
199vpaes_encrypt:
200	@ _vpaes_encrypt_core uses r8-r11. Round up to r7-r11 to maintain stack
201	@ alignment.
202	stmdb	sp!, {r7,r8,r9,r10,r11,lr}
203	@ _vpaes_encrypt_core uses q4-q5 (d8-d11), which are callee-saved.
204	vstmdb	sp!, {d8,d9,d10,d11}
205
206	vld1.64	{q0}, [r0]
207	bl	_vpaes_preheat
208	bl	_vpaes_encrypt_core
209	vst1.64	{q0}, [r1]
210
211	vldmia	sp!, {d8,d9,d10,d11}
212	ldmia	sp!, {r7,r8,r9,r10,r11, pc}	@ return
213.size	vpaes_encrypt,.-vpaes_encrypt
214
215@
216@  Decryption stuff
217@
218.type	_vpaes_decrypt_consts,%object
219.align	4
220_vpaes_decrypt_consts:
221.Lk_dipt:@ decryption input transform
222.quad	0x0F505B040B545F00, 0x154A411E114E451A
223.quad	0x86E383E660056500, 0x12771772F491F194
224.Lk_dsbo:@ decryption sbox final output
225.quad	0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
226.quad	0x12D7560F93441D00, 0xCA4B8159D8C58E9C
227.Lk_dsb9:@ decryption sbox output *9*u, *9*t
228.quad	0x851C03539A86D600, 0xCAD51F504F994CC9
229.quad	0xC03B1789ECD74900, 0x725E2C9EB2FBA565
230.Lk_dsbd:@ decryption sbox output *D*u, *D*t
231.quad	0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
232.quad	0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
233.Lk_dsbb:@ decryption sbox output *B*u, *B*t
234.quad	0xD022649296B44200, 0x602646F6B0F2D404
235.quad	0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
236.Lk_dsbe:@ decryption sbox output *E*u, *E*t
237.quad	0x46F2929626D4D000, 0x2242600464B4F6B0
238.quad	0x0C55A6CDFFAAC100, 0x9467F36B98593E32
239.size	_vpaes_decrypt_consts,.-_vpaes_decrypt_consts
240
241@@
242@@  Decryption core
243@@
244@@  Same API as encryption core, except it clobbers q12-q15 rather than using
245@@  the values from _vpaes_preheat. q9-q11 must still be set from
246@@  _vpaes_preheat.
247@@
248.type	_vpaes_decrypt_core,%function
249.align	4
250_vpaes_decrypt_core:
251	mov	r9, r2
252	ldr	r8, [r2,#240]		@ pull rounds
253
254	@ This function performs shuffles with various constants. The x86_64
255	@ version loads them on-demand into %xmm0-%xmm5. This does not work well
256	@ for ARMv7 because those registers are shuffle destinations. The ARMv8
257	@ version preloads those constants into registers, but ARMv7 has half
258	@ the registers to work with. Instead, we load them on-demand into
259	@ q12-q15, registers normally use for preloaded constants. This is fine
260	@ because decryption doesn't use those constants. The values are
261	@ constant, so this does not interfere with potential 2x optimizations.
262	adr	r7, .Lk_dipt
263
264	vld1.64	{q12,q13}, [r7]		@ vmovdqa	.Lk_dipt(%rip), %xmm2	# iptlo
265	lsl	r11, r8, #4		@ mov		%rax,	%r11;	shl	$4, %r11
266	eor	r11, r11, #0x30		@ xor		$0x30,	%r11
267	adr	r10, .Lk_sr
268	and	r11, r11, #0x30		@ and		$0x30,	%r11
269	add	r11, r11, r10
270	adr	r10, .Lk_mc_forward+48
271
272	vld1.64	{q4}, [r9]!		@ vmovdqu	(%r9),	%xmm4		# round0 key
273	vand	q1, q0, q9		@ vpand		%xmm9,	%xmm0,	%xmm1
274	vshr.u8	q0, q0, #4		@ vpsrlb	$4,	%xmm0,	%xmm0
275	vtbl.8	d4, {q12}, d2	@ vpshufb	%xmm1,	%xmm2,	%xmm2
276	vtbl.8	d5, {q12}, d3
277	vld1.64	{q5}, [r10]		@ vmovdqa	.Lk_mc_forward+48(%rip), %xmm5
278					@ vmovdqa	.Lk_dipt+16(%rip), %xmm1 # ipthi
279	vtbl.8	d0, {q13}, d0	@ vpshufb	%xmm0,	%xmm1,	%xmm0
280	vtbl.8	d1, {q13}, d1
281	veor	q2, q2, q4		@ vpxor		%xmm4,	%xmm2,	%xmm2
282	veor	q0, q0, q2		@ vpxor		%xmm2,	%xmm0,	%xmm0
283
284	@ .Ldec_entry ends with a bnz instruction which is normally paired with
285	@ subs in .Ldec_loop.
286	tst	r8, r8
287	b	.Ldec_entry
288
289.align	4
290.Ldec_loop:
291@
292@  Inverse mix columns
293@
294
295	@ We load .Lk_dsb* into q12-q15 on-demand. See the comment at the top of
296	@ the function.
297	adr	r10, .Lk_dsb9
298	vld1.64	{q12,q13}, [r10]!	@ vmovdqa	-0x20(%r10),%xmm4		# 4 : sb9u
299					@ vmovdqa	-0x10(%r10),%xmm1		# 0 : sb9t
300	@ Load sbd* ahead of time.
301	vld1.64	{q14,q15}, [r10]!	@ vmovdqa	0x00(%r10),%xmm4		# 4 : sbdu
302					@ vmovdqa	0x10(%r10),%xmm1		# 0 : sbdt
303	vtbl.8	d8, {q12}, d4	@ vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sb9u
304	vtbl.8	d9, {q12}, d5
305	vtbl.8	d2, {q13}, d6	@ vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sb9t
306	vtbl.8	d3, {q13}, d7
307	veor	q0, q4, q0		@ vpxor		%xmm4,	%xmm0,	%xmm0
308
309	veor	q0, q0, q1		@ vpxor		%xmm1,	%xmm0,	%xmm0		# 0 = ch
310
311	@ Load sbb* ahead of time.
312	vld1.64	{q12,q13}, [r10]!	@ vmovdqa	0x20(%r10),%xmm4		# 4 : sbbu
313					@ vmovdqa	0x30(%r10),%xmm1		# 0 : sbbt
314
315	vtbl.8	d8, {q14}, d4	@ vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbdu
316	vtbl.8	d9, {q14}, d5
317	@ Write to q1 instead of q0, so the table and destination registers do
318	@ not overlap.
319	vtbl.8	d2, {q0}, d10	@ vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
320	vtbl.8	d3, {q0}, d11
321	@ Here we restore the original q0/q1 usage. This instruction is
322	@ reordered from the ARMv8 version so we do not clobber the vtbl.8
323	@ below.
324	veor	q0, q1, q4		@ vpxor		%xmm4,	%xmm0,	%xmm0		# 4 = ch
325	vtbl.8	d2, {q15}, d6	@ vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbdt
326	vtbl.8	d3, {q15}, d7
327					@ vmovdqa	0x20(%r10),	%xmm4		# 4 : sbbu
328	veor	q0, q0, q1		@ vpxor		%xmm1,	%xmm0,	%xmm0		# 0 = ch
329					@ vmovdqa	0x30(%r10),	%xmm1		# 0 : sbbt
330
331	@ Load sbd* ahead of time.
332	vld1.64	{q14,q15}, [r10]!	@ vmovdqa	0x40(%r10),%xmm4		# 4 : sbeu
333					@ vmovdqa	0x50(%r10),%xmm1		# 0 : sbet
334
335	vtbl.8	d8, {q12}, d4	@ vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbbu
336	vtbl.8	d9, {q12}, d5
337	@ Write to q1 instead of q0, so the table and destination registers do
338	@ not overlap.
339	vtbl.8	d2, {q0}, d10	@ vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
340	vtbl.8	d3, {q0}, d11
341	@ Here we restore the original q0/q1 usage. This instruction is
342	@ reordered from the ARMv8 version so we do not clobber the vtbl.8
343	@ below.
344	veor	q0, q1, q4		@ vpxor		%xmm4,	%xmm0,	%xmm0		# 4 = ch
345	vtbl.8	d2, {q13}, d6	@ vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbbt
346	vtbl.8	d3, {q13}, d7
347	veor	q0, q0, q1		@ vpxor		%xmm1,	%xmm0,	%xmm0		# 0 = ch
348
349	vtbl.8	d8, {q14}, d4	@ vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbeu
350	vtbl.8	d9, {q14}, d5
351	@ Write to q1 instead of q0, so the table and destination registers do
352	@ not overlap.
353	vtbl.8	d2, {q0}, d10	@ vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
354	vtbl.8	d3, {q0}, d11
355	@ Here we restore the original q0/q1 usage. This instruction is
356	@ reordered from the ARMv8 version so we do not clobber the vtbl.8
357	@ below.
358	veor	q0, q1, q4		@ vpxor		%xmm4,	%xmm0,	%xmm0		# 4 = ch
359	vtbl.8	d2, {q15}, d6	@ vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbet
360	vtbl.8	d3, {q15}, d7
361	vext.8	q5, q5, q5, #12		@ vpalignr 	$12,	%xmm5,	%xmm5,	%xmm5
362	veor	q0, q0, q1		@ vpxor		%xmm1,	%xmm0,	%xmm0		# 0 = ch
363	subs	r8, r8, #1		@ sub		$1,%rax			# nr--
364
365.Ldec_entry:
366	@ top of round
367	vand	q1, q0, q9		@ vpand		%xmm9,	%xmm0,	%xmm1	# 0 = k
368	vshr.u8	q0, q0, #4		@ vpsrlb	$4,	%xmm0,	%xmm0	# 1 = i
369	vtbl.8	d4, {q11}, d2	@ vpshufb	%xmm1,	%xmm11,	%xmm2	# 2 = a/k
370	vtbl.8	d5, {q11}, d3
371	veor	q1, q1, q0		@ vpxor		%xmm0,	%xmm1,	%xmm1	# 0 = j
372	vtbl.8	d6, {q10}, d0	@ vpshufb	%xmm0, 	%xmm10,	%xmm3	# 3 = 1/i
373	vtbl.8	d7, {q10}, d1
374	vtbl.8	d8, {q10}, d2	@ vpshufb	%xmm1,	%xmm10,	%xmm4	# 4 = 1/j
375	vtbl.8	d9, {q10}, d3
376	veor	q3, q3, q2		@ vpxor		%xmm2,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
377	veor	q4, q4, q2		@ vpxor		%xmm2, 	%xmm4,	%xmm4	# 4 = jak = 1/j + a/k
378	vtbl.8	d4, {q10}, d6	@ vpshufb	%xmm3,	%xmm10,	%xmm2	# 2 = 1/iak
379	vtbl.8	d5, {q10}, d7
380	vtbl.8	d6, {q10}, d8	@ vpshufb	%xmm4,  %xmm10,	%xmm3	# 3 = 1/jak
381	vtbl.8	d7, {q10}, d9
382	veor	q2, q2, q1		@ vpxor		%xmm1,	%xmm2,	%xmm2	# 2 = io
383	veor	q3, q3, q0		@ vpxor		%xmm0,  %xmm3,	%xmm3	# 3 = jo
384	vld1.64	{q0}, [r9]!		@ vmovdqu	(%r9),	%xmm0
385	bne	.Ldec_loop
386
387	@ middle of last round
388
389	adr	r10, .Lk_dsbo
390
391	@ Write to q1 rather than q4 to avoid overlapping table and destination.
392	vld1.64	{q1}, [r10]!		@ vmovdqa	0x60(%r10),	%xmm4	# 3 : sbou
393	vtbl.8	d8, {q1}, d4	@ vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
394	vtbl.8	d9, {q1}, d5
395	@ Write to q2 rather than q1 to avoid overlapping table and destination.
396	vld1.64	{q2}, [r10]		@ vmovdqa	0x70(%r10),	%xmm1	# 0 : sbot
397	vtbl.8	d2, {q2}, d6	@ vpshufb	%xmm3,	%xmm1,	%xmm1	# 0 = sb1t
398	vtbl.8	d3, {q2}, d7
399	vld1.64	{q2}, [r11]		@ vmovdqa	-0x160(%r11),	%xmm2	# .Lk_sr-.Lk_dsbd=-0x160
400	veor	q4, q4, q0		@ vpxor		%xmm0,	%xmm4,	%xmm4	# 4 = sb1u + k
401	@ Write to q1 rather than q0 so the table and destination registers
402	@ below do not overlap.
403	veor	q1, q1, q4		@ vpxor		%xmm4,	%xmm1,	%xmm0	# 0 = A
404	vtbl.8	d0, {q1}, d4	@ vpshufb	%xmm2,	%xmm0,	%xmm0
405	vtbl.8	d1, {q1}, d5
406	bx	lr
407.size	_vpaes_decrypt_core,.-_vpaes_decrypt_core
408
409.globl	vpaes_decrypt
410.hidden	vpaes_decrypt
411.type	vpaes_decrypt,%function
412.align	4
413vpaes_decrypt:
414	@ _vpaes_decrypt_core uses r7-r11.
415	stmdb	sp!, {r7,r8,r9,r10,r11,lr}
416	@ _vpaes_decrypt_core uses q4-q5 (d8-d11), which are callee-saved.
417	vstmdb	sp!, {d8,d9,d10,d11}
418
419	vld1.64	{q0}, [r0]
420	bl	_vpaes_preheat
421	bl	_vpaes_decrypt_core
422	vst1.64	{q0}, [r1]
423
424	vldmia	sp!, {d8,d9,d10,d11}
425	ldmia	sp!, {r7,r8,r9,r10,r11, pc}	@ return
426.size	vpaes_decrypt,.-vpaes_decrypt
427@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
428@@                                                    @@
429@@                  AES key schedule                  @@
430@@                                                    @@
431@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
432
433@ This function diverges from both x86_64 and armv7 in which constants are
434@ pinned. x86_64 has a common preheat function for all operations. aarch64
435@ separates them because it has enough registers to pin nearly all constants.
436@ armv7 does not have enough registers, but needing explicit loads and stores
437@ also complicates using x86_64's register allocation directly.
438@
439@ We pin some constants for convenience and leave q14 and q15 free to load
440@ others on demand.
441
442@
443@  Key schedule constants
444@
445.type	_vpaes_key_consts,%object
446.align	4
447_vpaes_key_consts:
448.Lk_dksd:@ decryption key schedule: invskew x*D
449.quad	0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
450.quad	0x41C277F4B5368300, 0x5FDC69EAAB289D1E
451.Lk_dksb:@ decryption key schedule: invskew x*B
452.quad	0x9A4FCA1F8550D500, 0x03D653861CC94C99
453.quad	0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
454.Lk_dkse:@ decryption key schedule: invskew x*E + 0x63
455.quad	0xD5031CCA1FC9D600, 0x53859A4C994F5086
456.quad	0xA23196054FDC7BE8, 0xCD5EF96A20B31487
457.Lk_dks9:@ decryption key schedule: invskew x*9
458.quad	0xB6116FC87ED9A700, 0x4AED933482255BFC
459.quad	0x4576516227143300, 0x8BB89FACE9DAFDCE
460
461.Lk_rcon:@ rcon
462.quad	0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
463
464.Lk_opt:@ output transform
465.quad	0xFF9F4929D6B66000, 0xF7974121DEBE6808
466.quad	0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
467.Lk_deskew:@ deskew tables: inverts the sbox's "skew"
468.quad	0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
469.quad	0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
470.size	_vpaes_key_consts,.-_vpaes_key_consts
471
472.type	_vpaes_key_preheat,%function
473.align	4
474_vpaes_key_preheat:
475	adr	r11, .Lk_rcon
476	vmov.i8	q12, #0x5b			@ .Lk_s63
477	adr	r10, .Lk_inv			@ Must be aligned to 8 mod 16.
478	vmov.i8	q9, #0x0f			@ .Lk_s0F
479	vld1.64	{q10,q11}, [r10]		@ .Lk_inv
480	vld1.64	{q8}, [r11]			@ .Lk_rcon
481	bx	lr
482.size	_vpaes_key_preheat,.-_vpaes_key_preheat
483
484.type	_vpaes_schedule_core,%function
485.align	4
486_vpaes_schedule_core:
487	@ We only need to save lr, but ARM requires an 8-byte stack alignment,
488	@ so save an extra register.
489	stmdb	sp!, {r3,lr}
490
491	bl	_vpaes_key_preheat	@ load the tables
492
493	adr	r11, .Lk_ipt		@ Must be aligned to 8 mod 16.
494	vld1.64	{q0}, [r0]!		@ vmovdqu	(%rdi),	%xmm0		# load key (unaligned)
495
496	@ input transform
497	@ Use q4 here rather than q3 so .Lschedule_am_decrypting does not
498	@ overlap table and destination.
499	vmov	q4, q0			@ vmovdqa	%xmm0,	%xmm3
500	bl	_vpaes_schedule_transform
501	adr	r10, .Lk_sr		@ Must be aligned to 8 mod 16.
502	vmov	q7, q0			@ vmovdqa	%xmm0,	%xmm7
503
504	add	r8, r8, r10
505	tst	r3, r3
506	bne	.Lschedule_am_decrypting
507
508	@ encrypting, output zeroth round key after transform
509	vst1.64	{q0}, [r2]		@ vmovdqu	%xmm0,	(%rdx)
510	b	.Lschedule_go
511
512.Lschedule_am_decrypting:
513	@ decrypting, output zeroth round key after shiftrows
514	vld1.64	{q1}, [r8]		@ vmovdqa	(%r8,%r10),	%xmm1
515	vtbl.8	d6, {q4}, d2	@ vpshufb  	%xmm1,	%xmm3,	%xmm3
516	vtbl.8	d7, {q4}, d3
517	vst1.64	{q3}, [r2]		@ vmovdqu	%xmm3,	(%rdx)
518	eor	r8, r8, #0x30		@ xor	$0x30, %r8
519
520.Lschedule_go:
521	cmp	r1, #192		@ cmp	$192,	%esi
522	bhi	.Lschedule_256
523	beq	.Lschedule_192
524	@ 128: fall though
525
526@@
527@@  .schedule_128
528@@
529@@  128-bit specific part of key schedule.
530@@
531@@  This schedule is really simple, because all its parts
532@@  are accomplished by the subroutines.
533@@
534.Lschedule_128:
535	mov	r0, #10		@ mov	$10, %esi
536
537.Loop_schedule_128:
538	bl	_vpaes_schedule_round
539	subs	r0, r0, #1		@ dec	%esi
540	beq	.Lschedule_mangle_last
541	bl	_vpaes_schedule_mangle	@ write output
542	b	.Loop_schedule_128
543
544@@
545@@  .aes_schedule_192
546@@
547@@  192-bit specific part of key schedule.
548@@
549@@  The main body of this schedule is the same as the 128-bit
550@@  schedule, but with more smearing.  The long, high side is
551@@  stored in q7 as before, and the short, low side is in
552@@  the high bits of q6.
553@@
554@@  This schedule is somewhat nastier, however, because each
555@@  round produces 192 bits of key material, or 1.5 round keys.
556@@  Therefore, on each cycle we do 2 rounds and produce 3 round
557@@  keys.
558@@
559.align	4
560.Lschedule_192:
561	sub	r0, r0, #8
562	vld1.64	{q0}, [r0]			@ vmovdqu	8(%rdi),%xmm0		# load key part 2 (very unaligned)
563	bl	_vpaes_schedule_transform	@ input transform
564	vmov	q6, q0				@ vmovdqa	%xmm0,	%xmm6		# save short part
565	vmov.i8	d12, #0			@ vpxor	%xmm4,	%xmm4, %xmm4	# clear 4
566						@ vmovhlps	%xmm4,	%xmm6,	%xmm6		# clobber low side with zeros
567	mov	r0, #4			@ mov	$4,	%esi
568
569.Loop_schedule_192:
570	bl	_vpaes_schedule_round
571	vext.8	q0, q6, q0, #8			@ vpalignr	$8,%xmm6,%xmm0,%xmm0
572	bl	_vpaes_schedule_mangle		@ save key n
573	bl	_vpaes_schedule_192_smear
574	bl	_vpaes_schedule_mangle		@ save key n+1
575	bl	_vpaes_schedule_round
576	subs	r0, r0, #1			@ dec	%esi
577	beq	.Lschedule_mangle_last
578	bl	_vpaes_schedule_mangle		@ save key n+2
579	bl	_vpaes_schedule_192_smear
580	b	.Loop_schedule_192
581
582@@
583@@  .aes_schedule_256
584@@
585@@  256-bit specific part of key schedule.
586@@
587@@  The structure here is very similar to the 128-bit
588@@  schedule, but with an additional "low side" in
589@@  q6.  The low side's rounds are the same as the
590@@  high side's, except no rcon and no rotation.
591@@
592.align	4
593.Lschedule_256:
594	vld1.64	{q0}, [r0]			@ vmovdqu	16(%rdi),%xmm0		# load key part 2 (unaligned)
595	bl	_vpaes_schedule_transform	@ input transform
596	mov	r0, #7			@ mov	$7, %esi
597
598.Loop_schedule_256:
599	bl	_vpaes_schedule_mangle		@ output low result
600	vmov	q6, q0				@ vmovdqa	%xmm0,	%xmm6		# save cur_lo in xmm6
601
602	@ high round
603	bl	_vpaes_schedule_round
604	subs	r0, r0, #1			@ dec	%esi
605	beq	.Lschedule_mangle_last
606	bl	_vpaes_schedule_mangle
607
608	@ low round. swap xmm7 and xmm6
609	vdup.32	q0, d1[1]		@ vpshufd	$0xFF,	%xmm0,	%xmm0
610	vmov.i8	q4, #0
611	vmov	q5, q7			@ vmovdqa	%xmm7,	%xmm5
612	vmov	q7, q6			@ vmovdqa	%xmm6,	%xmm7
613	bl	_vpaes_schedule_low_round
614	vmov	q7, q5			@ vmovdqa	%xmm5,	%xmm7
615
616	b	.Loop_schedule_256
617
618@@
619@@  .aes_schedule_mangle_last
620@@
621@@  Mangler for last round of key schedule
622@@  Mangles q0
623@@    when encrypting, outputs out(q0) ^ 63
624@@    when decrypting, outputs unskew(q0)
625@@
626@@  Always called right before return... jumps to cleanup and exits
627@@
628.align	4
629.Lschedule_mangle_last:
630	@ schedule last round key from xmm0
631	adr	r11, .Lk_deskew			@ lea	.Lk_deskew(%rip),%r11	# prepare to deskew
632	tst	r3, r3
633	bne	.Lschedule_mangle_last_dec
634
635	@ encrypting
636	vld1.64	{q1}, [r8]		@ vmovdqa	(%r8,%r10),%xmm1
637	adr	r11, .Lk_opt		@ lea		.Lk_opt(%rip),	%r11		# prepare to output transform
638	add	r2, r2, #32		@ add		$32,	%rdx
639	vmov	q2, q0
640	vtbl.8	d0, {q2}, d2	@ vpshufb	%xmm1,	%xmm0,	%xmm0		# output permute
641	vtbl.8	d1, {q2}, d3
642
643.Lschedule_mangle_last_dec:
644	sub	r2, r2, #16			@ add	$-16,	%rdx
645	veor	q0, q0, q12			@ vpxor	.Lk_s63(%rip),	%xmm0,	%xmm0
646	bl	_vpaes_schedule_transform	@ output transform
647	vst1.64	{q0}, [r2]			@ vmovdqu	%xmm0,	(%rdx)		# save last key
648
649	@ cleanup
650	veor	q0, q0, q0		@ vpxor	%xmm0,	%xmm0,	%xmm0
651	veor	q1, q1, q1		@ vpxor	%xmm1,	%xmm1,	%xmm1
652	veor	q2, q2, q2		@ vpxor	%xmm2,	%xmm2,	%xmm2
653	veor	q3, q3, q3		@ vpxor	%xmm3,	%xmm3,	%xmm3
654	veor	q4, q4, q4		@ vpxor	%xmm4,	%xmm4,	%xmm4
655	veor	q5, q5, q5		@ vpxor	%xmm5,	%xmm5,	%xmm5
656	veor	q6, q6, q6		@ vpxor	%xmm6,	%xmm6,	%xmm6
657	veor	q7, q7, q7		@ vpxor	%xmm7,	%xmm7,	%xmm7
658	ldmia	sp!, {r3,pc}		@ return
659.size	_vpaes_schedule_core,.-_vpaes_schedule_core
660
661@@
662@@  .aes_schedule_192_smear
663@@
664@@  Smear the short, low side in the 192-bit key schedule.
665@@
666@@  Inputs:
667@@    q7: high side, b  a  x  y
668@@    q6:  low side, d  c  0  0
669@@
670@@  Outputs:
671@@    q6: b+c+d  b+c  0  0
672@@    q0: b+c+d  b+c  b  a
673@@
674.type	_vpaes_schedule_192_smear,%function
675.align	4
676_vpaes_schedule_192_smear:
677	vmov.i8	q1, #0
678	vdup.32	q0, d15[1]
679	vshl.i64	q1, q6, #32		@ vpshufd	$0x80,	%xmm6,	%xmm1	# d c 0 0 -> c 0 0 0
680	vmov	d0, d15		@ vpshufd	$0xFE,	%xmm7,	%xmm0	# b a _ _ -> b b b a
681	veor	q6, q6, q1		@ vpxor	%xmm1,	%xmm6,	%xmm6	# -> c+d c 0 0
682	veor	q1, q1, q1		@ vpxor	%xmm1,	%xmm1,	%xmm1
683	veor	q6, q6, q0		@ vpxor	%xmm0,	%xmm6,	%xmm6	# -> b+c+d b+c b a
684	vmov	q0, q6			@ vmovdqa	%xmm6,	%xmm0
685	vmov	d12, d2		@ vmovhlps	%xmm1,	%xmm6,	%xmm6	# clobber low side with zeros
686	bx	lr
687.size	_vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
688
689@@
690@@  .aes_schedule_round
691@@
692@@  Runs one main round of the key schedule on q0, q7
693@@
694@@  Specifically, runs subbytes on the high dword of q0
695@@  then rotates it by one byte and xors into the low dword of
696@@  q7.
697@@
698@@  Adds rcon from low byte of q8, then rotates q8 for
699@@  next rcon.
700@@
701@@  Smears the dwords of q7 by xoring the low into the
702@@  second low, result into third, result into highest.
703@@
704@@  Returns results in q7 = q0.
705@@  Clobbers q1-q4, r11.
706@@
707.type	_vpaes_schedule_round,%function
708.align	4
709_vpaes_schedule_round:
710	@ extract rcon from xmm8
711	vmov.i8	q4, #0				@ vpxor		%xmm4,	%xmm4,	%xmm4
712	vext.8	q1, q8, q4, #15		@ vpalignr	$15,	%xmm8,	%xmm4,	%xmm1
713	vext.8	q8, q8, q8, #15	@ vpalignr	$15,	%xmm8,	%xmm8,	%xmm8
714	veor	q7, q7, q1			@ vpxor		%xmm1,	%xmm7,	%xmm7
715
716	@ rotate
717	vdup.32	q0, d1[1]			@ vpshufd	$0xFF,	%xmm0,	%xmm0
718	vext.8	q0, q0, q0, #1			@ vpalignr	$1,	%xmm0,	%xmm0,	%xmm0
719
720	@ fall through...
721
722	@ low round: same as high round, but no rotation and no rcon.
723_vpaes_schedule_low_round:
724	@ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12.
725	@ We pin other values in _vpaes_key_preheat, so load them now.
726	adr	r11, .Lk_sb1
727	vld1.64	{q14,q15}, [r11]
728
729	@ smear xmm7
730	vext.8	q1, q4, q7, #12			@ vpslldq	$4,	%xmm7,	%xmm1
731	veor	q7, q7, q1			@ vpxor	%xmm1,	%xmm7,	%xmm7
732	vext.8	q4, q4, q7, #8			@ vpslldq	$8,	%xmm7,	%xmm4
733
734	@ subbytes
735	vand	q1, q0, q9			@ vpand		%xmm9,	%xmm0,	%xmm1		# 0 = k
736	vshr.u8	q0, q0, #4			@ vpsrlb	$4,	%xmm0,	%xmm0		# 1 = i
737	veor	q7, q7, q4			@ vpxor		%xmm4,	%xmm7,	%xmm7
738	vtbl.8	d4, {q11}, d2		@ vpshufb	%xmm1,	%xmm11,	%xmm2		# 2 = a/k
739	vtbl.8	d5, {q11}, d3
740	veor	q1, q1, q0			@ vpxor		%xmm0,	%xmm1,	%xmm1		# 0 = j
741	vtbl.8	d6, {q10}, d0		@ vpshufb	%xmm0, 	%xmm10,	%xmm3		# 3 = 1/i
742	vtbl.8	d7, {q10}, d1
743	veor	q3, q3, q2			@ vpxor		%xmm2,	%xmm3,	%xmm3		# 3 = iak = 1/i + a/k
744	vtbl.8	d8, {q10}, d2		@ vpshufb	%xmm1,	%xmm10,	%xmm4		# 4 = 1/j
745	vtbl.8	d9, {q10}, d3
746	veor	q7, q7, q12			@ vpxor		.Lk_s63(%rip),	%xmm7,	%xmm7
747	vtbl.8	d6, {q10}, d6		@ vpshufb	%xmm3,	%xmm10,	%xmm3		# 2 = 1/iak
748	vtbl.8	d7, {q10}, d7
749	veor	q4, q4, q2			@ vpxor		%xmm2,	%xmm4,	%xmm4		# 4 = jak = 1/j + a/k
750	vtbl.8	d4, {q10}, d8		@ vpshufb	%xmm4,	%xmm10,	%xmm2		# 3 = 1/jak
751	vtbl.8	d5, {q10}, d9
752	veor	q3, q3, q1			@ vpxor		%xmm1,	%xmm3,	%xmm3		# 2 = io
753	veor	q2, q2, q0			@ vpxor		%xmm0,	%xmm2,	%xmm2		# 3 = jo
754	vtbl.8	d8, {q15}, d6		@ vpshufb	%xmm3,	%xmm13,	%xmm4		# 4 = sbou
755	vtbl.8	d9, {q15}, d7
756	vtbl.8	d2, {q14}, d4		@ vpshufb	%xmm2,	%xmm12,	%xmm1		# 0 = sb1t
757	vtbl.8	d3, {q14}, d5
758	veor	q1, q1, q4			@ vpxor		%xmm4,	%xmm1,	%xmm1		# 0 = sbox output
759
760	@ add in smeared stuff
761	veor	q0, q1, q7			@ vpxor	%xmm7,	%xmm1,	%xmm0
762	veor	q7, q1, q7			@ vmovdqa	%xmm0,	%xmm7
763	bx	lr
764.size	_vpaes_schedule_round,.-_vpaes_schedule_round
765
766@@
767@@  .aes_schedule_transform
768@@
769@@  Linear-transform q0 according to tables at [r11]
770@@
771@@  Requires that q9 = 0x0F0F... as in preheat
772@@  Output in q0
773@@  Clobbers q1, q2, q14, q15
774@@
775.type	_vpaes_schedule_transform,%function
776.align	4
777_vpaes_schedule_transform:
778	vld1.64	{q14,q15}, [r11]	@ vmovdqa	(%r11),	%xmm2 	# lo
779					@ vmovdqa	16(%r11),	%xmm1 # hi
780	vand	q1, q0, q9		@ vpand	%xmm9,	%xmm0,	%xmm1
781	vshr.u8	q0, q0, #4		@ vpsrlb	$4,	%xmm0,	%xmm0
782	vtbl.8	d4, {q14}, d2	@ vpshufb	%xmm1,	%xmm2,	%xmm2
783	vtbl.8	d5, {q14}, d3
784	vtbl.8	d0, {q15}, d0	@ vpshufb	%xmm0,	%xmm1,	%xmm0
785	vtbl.8	d1, {q15}, d1
786	veor	q0, q0, q2		@ vpxor	%xmm2,	%xmm0,	%xmm0
787	bx	lr
788.size	_vpaes_schedule_transform,.-_vpaes_schedule_transform
789
790@@
791@@  .aes_schedule_mangle
792@@
793@@  Mangles q0 from (basis-transformed) standard version
794@@  to our version.
795@@
796@@  On encrypt,
797@@    xor with 0x63
798@@    multiply by circulant 0,1,1,1
799@@    apply shiftrows transform
800@@
801@@  On decrypt,
802@@    xor with 0x63
803@@    multiply by "inverse mixcolumns" circulant E,B,D,9
804@@    deskew
805@@    apply shiftrows transform
806@@
807@@
808@@  Writes out to [r2], and increments or decrements it
809@@  Keeps track of round number mod 4 in r8
810@@  Preserves q0
811@@  Clobbers q1-q5
812@@
813.type	_vpaes_schedule_mangle,%function
814.align	4
815_vpaes_schedule_mangle:
816	tst	r3, r3
817	vmov	q4, q0			@ vmovdqa	%xmm0,	%xmm4	# save xmm0 for later
818	adr	r11, .Lk_mc_forward	@ Must be aligned to 8 mod 16.
819	vld1.64	{q5}, [r11]		@ vmovdqa	.Lk_mc_forward(%rip),%xmm5
820	bne	.Lschedule_mangle_dec
821
822	@ encrypting
823	@ Write to q2 so we do not overlap table and destination below.
824	veor	q2, q0, q12		@ vpxor		.Lk_s63(%rip),	%xmm0,	%xmm4
825	add	r2, r2, #16		@ add		$16,	%rdx
826	vtbl.8	d8, {q2}, d10	@ vpshufb	%xmm5,	%xmm4,	%xmm4
827	vtbl.8	d9, {q2}, d11
828	vtbl.8	d2, {q4}, d10	@ vpshufb	%xmm5,	%xmm4,	%xmm1
829	vtbl.8	d3, {q4}, d11
830	vtbl.8	d6, {q1}, d10	@ vpshufb	%xmm5,	%xmm1,	%xmm3
831	vtbl.8	d7, {q1}, d11
832	veor	q4, q4, q1		@ vpxor		%xmm1,	%xmm4,	%xmm4
833	vld1.64	{q1}, [r8]		@ vmovdqa	(%r8,%r10),	%xmm1
834	veor	q3, q3, q4		@ vpxor		%xmm4,	%xmm3,	%xmm3
835
836	b	.Lschedule_mangle_both
837.align	4
838.Lschedule_mangle_dec:
839	@ inverse mix columns
840	adr	r11, .Lk_dksd 		@ lea		.Lk_dksd(%rip),%r11
841	vshr.u8	q1, q4, #4		@ vpsrlb	$4,	%xmm4,	%xmm1	# 1 = hi
842	vand	q4, q4, q9		@ vpand		%xmm9,	%xmm4,	%xmm4	# 4 = lo
843
844	vld1.64	{q14,q15}, [r11]! 	@ vmovdqa	0x00(%r11),	%xmm2
845					@ vmovdqa	0x10(%r11),	%xmm3
846	vtbl.8	d4, {q14}, d8	@ vpshufb	%xmm4,	%xmm2,	%xmm2
847	vtbl.8	d5, {q14}, d9
848	vtbl.8	d6, {q15}, d2	@ vpshufb	%xmm1,	%xmm3,	%xmm3
849	vtbl.8	d7, {q15}, d3
850	@ Load .Lk_dksb ahead of time.
851	vld1.64	{q14,q15}, [r11]! 	@ vmovdqa	0x20(%r11),	%xmm2
852					@ vmovdqa	0x30(%r11),	%xmm3
853	@ Write to q13 so we do not overlap table and destination.
854	veor	q13, q3, q2		@ vpxor		%xmm2,	%xmm3,	%xmm3
855	vtbl.8	d6, {q13}, d10	@ vpshufb	%xmm5,	%xmm3,	%xmm3
856	vtbl.8	d7, {q13}, d11
857
858	vtbl.8	d4, {q14}, d8	@ vpshufb	%xmm4,	%xmm2,	%xmm2
859	vtbl.8	d5, {q14}, d9
860	veor	q2, q2, q3		@ vpxor		%xmm3,	%xmm2,	%xmm2
861	vtbl.8	d6, {q15}, d2	@ vpshufb	%xmm1,	%xmm3,	%xmm3
862	vtbl.8	d7, {q15}, d3
863	@ Load .Lk_dkse ahead of time.
864	vld1.64	{q14,q15}, [r11]! 	@ vmovdqa	0x40(%r11),	%xmm2
865					@ vmovdqa	0x50(%r11),	%xmm3
866	@ Write to q13 so we do not overlap table and destination.
867	veor	q13, q3, q2		@ vpxor		%xmm2,	%xmm3,	%xmm3
868	vtbl.8	d6, {q13}, d10	@ vpshufb	%xmm5,	%xmm3,	%xmm3
869	vtbl.8	d7, {q13}, d11
870
871	vtbl.8	d4, {q14}, d8	@ vpshufb	%xmm4,	%xmm2,	%xmm2
872	vtbl.8	d5, {q14}, d9
873	veor	q2, q2, q3		@ vpxor		%xmm3,	%xmm2,	%xmm2
874	vtbl.8	d6, {q15}, d2	@ vpshufb	%xmm1,	%xmm3,	%xmm3
875	vtbl.8	d7, {q15}, d3
876	@ Load .Lk_dkse ahead of time.
877	vld1.64	{q14,q15}, [r11]! 	@ vmovdqa	0x60(%r11),	%xmm2
878					@ vmovdqa	0x70(%r11),	%xmm4
879	@ Write to q13 so we do not overlap table and destination.
880	veor	q13, q3, q2		@ vpxor		%xmm2,	%xmm3,	%xmm3
881
882	vtbl.8	d4, {q14}, d8	@ vpshufb	%xmm4,	%xmm2,	%xmm2
883	vtbl.8	d5, {q14}, d9
884	vtbl.8	d6, {q13}, d10	@ vpshufb	%xmm5,	%xmm3,	%xmm3
885	vtbl.8	d7, {q13}, d11
886	vtbl.8	d8, {q15}, d2	@ vpshufb	%xmm1,	%xmm4,	%xmm4
887	vtbl.8	d9, {q15}, d3
888	vld1.64	{q1}, [r8]		@ vmovdqa	(%r8,%r10),	%xmm1
889	veor	q2, q2, q3		@ vpxor	%xmm3,	%xmm2,	%xmm2
890	veor	q3, q4, q2		@ vpxor	%xmm2,	%xmm4,	%xmm3
891
892	sub	r2, r2, #16		@ add	$-16,	%rdx
893
894.Lschedule_mangle_both:
895	@ Write to q2 so table and destination do not overlap.
896	vtbl.8	d4, {q3}, d2	@ vpshufb	%xmm1,	%xmm3,	%xmm3
897	vtbl.8	d5, {q3}, d3
898	add	r8, r8, #64-16		@ add	$-16,	%r8
899	and	r8, r8, #~(1<<6)	@ and	$0x30,	%r8
900	vst1.64	{q2}, [r2]		@ vmovdqu	%xmm3,	(%rdx)
901	bx	lr
902.size	_vpaes_schedule_mangle,.-_vpaes_schedule_mangle
903
904.globl	vpaes_set_encrypt_key
905.hidden	vpaes_set_encrypt_key
906.type	vpaes_set_encrypt_key,%function
907.align	4
908vpaes_set_encrypt_key:
909	stmdb	sp!, {r7,r8,r9,r10,r11, lr}
910	vstmdb	sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
911
912	lsr	r9, r1, #5		@ shr	$5,%eax
913	add	r9, r9, #5		@ $5,%eax
914	str	r9, [r2,#240]		@ mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
915
916	mov	r3, #0		@ mov	$0,%ecx
917	mov	r8, #0x30		@ mov	$0x30,%r8d
918	bl	_vpaes_schedule_core
919	eor	r0, r0, r0
920
921	vldmia	sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
922	ldmia	sp!, {r7,r8,r9,r10,r11, pc}	@ return
923.size	vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
924
925.globl	vpaes_set_decrypt_key
926.hidden	vpaes_set_decrypt_key
927.type	vpaes_set_decrypt_key,%function
928.align	4
929vpaes_set_decrypt_key:
930	stmdb	sp!, {r7,r8,r9,r10,r11, lr}
931	vstmdb	sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
932
933	lsr	r9, r1, #5		@ shr	$5,%eax
934	add	r9, r9, #5		@ $5,%eax
935	str	r9, [r2,#240]		@ mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
936	lsl	r9, r9, #4		@ shl	$4,%eax
937	add	r2, r2, #16		@ lea	16(%rdx,%rax),%rdx
938	add	r2, r2, r9
939
940	mov	r3, #1		@ mov	$1,%ecx
941	lsr	r8, r1, #1		@ shr	$1,%r8d
942	and	r8, r8, #32		@ and	$32,%r8d
943	eor	r8, r8, #32		@ xor	$32,%r8d	# nbits==192?0:32
944	bl	_vpaes_schedule_core
945
946	vldmia	sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
947	ldmia	sp!, {r7,r8,r9,r10,r11, pc}	@ return
948.size	vpaes_set_decrypt_key,.-vpaes_set_decrypt_key
949
950@ Additional constants for converting to bsaes.
951.type	_vpaes_convert_consts,%object
952.align	4
953_vpaes_convert_consts:
954@ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear
955@ transform in the AES S-box. 0x63 is incorporated into the low half of the
956@ table. This was computed with the following script:
957@
958@   def u64s_to_u128(x, y):
959@       return x | (y << 64)
960@   def u128_to_u64s(w):
961@       return w & ((1<<64)-1), w >> 64
962@   def get_byte(w, i):
963@       return (w >> (i*8)) & 0xff
964@   def apply_table(table, b):
965@       lo = b & 0xf
966@       hi = b >> 4
967@       return get_byte(table[0], lo) ^ get_byte(table[1], hi)
968@   def opt(b):
969@       table = [
970@           u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808),
971@           u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0),
972@       ]
973@       return apply_table(table, b)
974@   def rot_byte(b, n):
975@       return 0xff & ((b << n) | (b >> (8-n)))
976@   def skew(x):
977@       return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^
978@               rot_byte(x, 4))
979@   table = [0, 0]
980@   for i in range(16):
981@       table[0] |= (skew(opt(i)) ^ 0x63) << (i*8)
982@       table[1] |= skew(opt(i<<4)) << (i*8)
983@   print("	.quad	0x%016x, 0x%016x" % u128_to_u64s(table[0]))
984@   print("	.quad	0x%016x, 0x%016x" % u128_to_u64s(table[1]))
985.Lk_opt_then_skew:
986.quad	0x9cb8436798bc4763, 0x6440bb9f6044bf9b
987.quad	0x1f30062936192f00, 0xb49bad829db284ab
988
989@ .Lk_decrypt_transform is a permutation which performs an 8-bit left-rotation
990@ followed by a byte-swap on each 32-bit word of a vector. E.g., 0x11223344
991@ becomes 0x22334411 and then 0x11443322.
992.Lk_decrypt_transform:
993.quad	0x0704050603000102, 0x0f0c0d0e0b08090a
994.size	_vpaes_convert_consts,.-_vpaes_convert_consts
995
996@ void vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes);
997.globl	vpaes_encrypt_key_to_bsaes
998.hidden	vpaes_encrypt_key_to_bsaes
999.type	vpaes_encrypt_key_to_bsaes,%function
1000.align	4
1001vpaes_encrypt_key_to_bsaes:
1002	stmdb	sp!, {r11, lr}
1003
1004	@ See _vpaes_schedule_core for the key schedule logic. In particular,
1005	@ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper),
1006	@ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last
1007	@ contain the transformations not in the bsaes representation. This
1008	@ function inverts those transforms.
1009	@
1010	@ Note also that bsaes-armv7.pl expects aes-armv4.pl's key
1011	@ representation, which does not match the other aes_nohw_*
1012	@ implementations. The ARM aes_nohw_* stores each 32-bit word
1013	@ byteswapped, as a convenience for (unsupported) big-endian ARM, at the
1014	@ cost of extra REV and VREV32 operations in little-endian ARM.
1015
1016	vmov.i8	q9, #0x0f		@ Required by _vpaes_schedule_transform
1017	adr	r2, .Lk_mc_forward	@ Must be aligned to 8 mod 16.
1018	add	r3, r2, 0x90		@ .Lk_sr+0x10-.Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression)
1019
1020	vld1.64	{q12}, [r2]
1021	vmov.i8	q10, #0x5b		@ .Lk_s63 from vpaes-x86_64
1022	adr	r11, .Lk_opt		@ Must be aligned to 8 mod 16.
1023	vmov.i8	q11, #0x63		@ .LK_s63 without .Lk_ipt applied
1024
1025	@ vpaes stores one fewer round count than bsaes, but the number of keys
1026	@ is the same.
1027	ldr	r2, [r1,#240]
1028	add	r2, r2, #1
1029	str	r2, [r0,#240]
1030
1031	@ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt).
1032	@ Invert this with .Lk_opt.
1033	vld1.64	{q0}, [r1]!
1034	bl	_vpaes_schedule_transform
1035	vrev32.8	q0, q0
1036	vst1.64	{q0}, [r0]!
1037
1038	@ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied,
1039	@ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63,
1040	@ multiplies by the circulant 0,1,1,1, then applies ShiftRows.
1041.Loop_enc_key_to_bsaes:
1042	vld1.64	{q0}, [r1]!
1043
1044	@ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle
1045	@ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30.
1046	@ We use r3 rather than r8 to avoid a callee-saved register.
1047	vld1.64	{q1}, [r3]
1048	vtbl.8	d4, {q0}, d2
1049	vtbl.8	d5, {q0}, d3
1050	add	r3, r3, #16
1051	and	r3, r3, #~(1<<6)
1052	vmov	q0, q2
1053
1054	@ Handle the last key differently.
1055	subs	r2, r2, #1
1056	beq	.Loop_enc_key_to_bsaes_last
1057
1058	@ Multiply by the circulant. This is its own inverse.
1059	vtbl.8	d2, {q0}, d24
1060	vtbl.8	d3, {q0}, d25
1061	vmov	q0, q1
1062	vtbl.8	d4, {q1}, d24
1063	vtbl.8	d5, {q1}, d25
1064	veor	q0, q0, q2
1065	vtbl.8	d2, {q2}, d24
1066	vtbl.8	d3, {q2}, d25
1067	veor	q0, q0, q1
1068
1069	@ XOR and finish.
1070	veor	q0, q0, q10
1071	bl	_vpaes_schedule_transform
1072	vrev32.8	q0, q0
1073	vst1.64	{q0}, [r0]!
1074	b	.Loop_enc_key_to_bsaes
1075
1076.Loop_enc_key_to_bsaes_last:
1077	@ The final key does not have a basis transform (note
1078	@ .Lschedule_mangle_last inverts the original transform). It only XORs
1079	@ 0x63 and applies ShiftRows. The latter was already inverted in the
1080	@ loop. Note that, because we act on the original representation, we use
1081	@ q11, not q10.
1082	veor	q0, q0, q11
1083	vrev32.8	q0, q0
1084	vst1.64	{q0}, [r0]
1085
1086	@ Wipe registers which contained key material.
1087	veor	q0, q0, q0
1088	veor	q1, q1, q1
1089	veor	q2, q2, q2
1090
1091	ldmia	sp!, {r11, pc}	@ return
1092.size	vpaes_encrypt_key_to_bsaes,.-vpaes_encrypt_key_to_bsaes
1093
1094@ void vpaes_decrypt_key_to_bsaes(AES_KEY *vpaes, const AES_KEY *bsaes);
1095.globl	vpaes_decrypt_key_to_bsaes
1096.hidden	vpaes_decrypt_key_to_bsaes
1097.type	vpaes_decrypt_key_to_bsaes,%function
1098.align	4
1099vpaes_decrypt_key_to_bsaes:
1100	stmdb	sp!, {r11, lr}
1101
1102	@ See _vpaes_schedule_core for the key schedule logic. Note vpaes
1103	@ computes the decryption key schedule in reverse. Additionally,
1104	@ aes-x86_64.pl shares some transformations, so we must only partially
1105	@ invert vpaes's transformations. In general, vpaes computes in a
1106	@ different basis (.Lk_ipt and .Lk_opt) and applies the inverses of
1107	@ MixColumns, ShiftRows, and the affine part of the AES S-box (which is
1108	@ split into a linear skew and XOR of 0x63). We undo all but MixColumns.
1109	@
1110	@ Note also that bsaes-armv7.pl expects aes-armv4.pl's key
1111	@ representation, which does not match the other aes_nohw_*
1112	@ implementations. The ARM aes_nohw_* stores each 32-bit word
1113	@ byteswapped, as a convenience for (unsupported) big-endian ARM, at the
1114	@ cost of extra REV and VREV32 operations in little-endian ARM.
1115
1116	adr	r2, .Lk_decrypt_transform
1117	adr	r3, .Lk_sr+0x30
1118	adr	r11, .Lk_opt_then_skew	@ Input to _vpaes_schedule_transform.
1119	vld1.64	{q12}, [r2]	@ Reuse q12 from encryption.
1120	vmov.i8	q9, #0x0f		@ Required by _vpaes_schedule_transform
1121
1122	@ vpaes stores one fewer round count than bsaes, but the number of keys
1123	@ is the same.
1124	ldr	r2, [r1,#240]
1125	add	r2, r2, #1
1126	str	r2, [r0,#240]
1127
1128	@ Undo the basis change and reapply the S-box affine transform. See
1129	@ .Lschedule_mangle_last.
1130	vld1.64	{q0}, [r1]!
1131	bl	_vpaes_schedule_transform
1132	vrev32.8	q0, q0
1133	vst1.64	{q0}, [r0]!
1134
1135	@ See _vpaes_schedule_mangle for the transform on the middle keys. Note
1136	@ it simultaneously inverts MixColumns and the S-box affine transform.
1137	@ See .Lk_dksd through .Lk_dks9.
1138.Loop_dec_key_to_bsaes:
1139	vld1.64	{q0}, [r1]!
1140
1141	@ Invert the ShiftRows step (see .Lschedule_mangle_both). Note going
1142	@ forwards cancels inverting for which direction we cycle r3. We use r3
1143	@ rather than r8 to avoid a callee-saved register.
1144	vld1.64	{q1}, [r3]
1145	vtbl.8	d4, {q0}, d2
1146	vtbl.8	d5, {q0}, d3
1147	add	r3, r3, #64-16
1148	and	r3, r3, #~(1<<6)
1149	vmov	q0, q2
1150
1151	@ Handle the last key differently.
1152	subs	r2, r2, #1
1153	beq	.Loop_dec_key_to_bsaes_last
1154
1155	@ Undo the basis change and reapply the S-box affine transform.
1156	bl	_vpaes_schedule_transform
1157
1158	@ Rotate each word by 8 bytes (cycle the rows) and then byte-swap. We
1159	@ combine the two operations in .Lk_decrypt_transform.
1160	@
1161	@ TODO(davidben): Where does the rotation come from?
1162	vtbl.8	d2, {q0}, d24
1163	vtbl.8	d3, {q0}, d25
1164
1165	vst1.64	{q1}, [r0]!
1166	b	.Loop_dec_key_to_bsaes
1167
1168.Loop_dec_key_to_bsaes_last:
1169	@ The final key only inverts ShiftRows (already done in the loop). See
1170	@ .Lschedule_am_decrypting. Its basis is not transformed.
1171	vrev32.8	q0, q0
1172	vst1.64	{q0}, [r0]!
1173
1174	@ Wipe registers which contained key material.
1175	veor	q0, q0, q0
1176	veor	q1, q1, q1
1177	veor	q2, q2, q2
1178
1179	ldmia	sp!, {r11, pc}	@ return
1180.size	vpaes_decrypt_key_to_bsaes,.-vpaes_decrypt_key_to_bsaes
1181.globl	vpaes_ctr32_encrypt_blocks
1182.hidden	vpaes_ctr32_encrypt_blocks
1183.type	vpaes_ctr32_encrypt_blocks,%function
1184.align	4
1185vpaes_ctr32_encrypt_blocks:
1186	mov	ip, sp
1187	stmdb	sp!, {r7,r8,r9,r10,r11, lr}
1188	@ This function uses q4-q7 (d8-d15), which are callee-saved.
1189	vstmdb	sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
1190
1191	cmp	r2, #0
1192	@ r8 is passed on the stack.
1193	ldr	r8, [ip]
1194	beq	.Lctr32_done
1195
1196	@ _vpaes_encrypt_core expects the key in r2, so swap r2 and r3.
1197	mov	r9, r3
1198	mov	r3, r2
1199	mov	r2, r9
1200
1201	@ Load the IV and counter portion.
1202	ldr	r7, [r8, #12]
1203	vld1.8	{q7}, [r8]
1204
1205	bl	_vpaes_preheat
1206	rev	r7, r7		@ The counter is big-endian.
1207
1208.Lctr32_loop:
1209	vmov	q0, q7
1210	vld1.8	{q6}, [r0]!		@ .Load input ahead of time
1211	bl	_vpaes_encrypt_core
1212	veor	q0, q0, q6		@ XOR input and result
1213	vst1.8	{q0}, [r1]!
1214	subs	r3, r3, #1
1215	@ Update the counter.
1216	add	r7, r7, #1
1217	rev	r9, r7
1218	vmov.32	d15[1], r9
1219	bne	.Lctr32_loop
1220
1221.Lctr32_done:
1222	vldmia	sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
1223	ldmia	sp!, {r7,r8,r9,r10,r11, pc}	@ return
1224.size	vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks
1225#endif  // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
1226