xref: /aosp_15_r20/external/coreboot/src/arch/x86/c_start.S (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <cpu/x86/post_code.h>
4#include <arch/ram_segs.h>
5
6/* Place the stack in the bss section. It's not necessary to define it in
7 * the linker script. */
8	.section .bss, "aw", @nobits
9.global _stack
10.global _estack
11.global _stack_size
12
13.align 16
14_stack:
15.space CONFIG_STACK_SIZE
16_estack:
17.set _stack_size, _estack - _stack
18
19	.section ".text._start", "ax", @progbits
20#if ENV_X86_64
21	.code64
22#else
23	.code32
24#endif
25	.globl _start
26_start:
27	cli
28#if ENV_X86_64
29	movabs	$gdtaddr, %rax
30	lgdt	(%rax)
31#else
32	lgdt	%cs:gdtaddr
33	ljmp	$RAM_CODE_SEG, $1f
34#endif
351:	movl	$RAM_DATA_SEG, %eax
36	movl	%eax, %ds
37	movl	%eax, %es
38	movl	%eax, %ss
39	xor	%eax, %eax /* zero out the gs and fs segment index */
40	movl	%eax, %fs
41	movl	%eax, %gs /* Will be used for cpu_info */
42#if ENV_X86_64
43	mov	$RAM_CODE_SEG64, %ecx
44	call	SetCodeSelector
45#endif
46
47	post_code(POSTCODE_ENTRY_C_START)		/* post 13 */
48
49	cld
50
51#if ENV_X86_64
52	mov	%rdi, %rax
53	movabs	%rax, _cbmem_top_ptr
54	movabs	$_stack, %rdi
55#else
56	/* The return argument is at 0(%esp), the calling argument at 4(%esp) */
57	movl	4(%esp), %eax
58	movl	%eax, _cbmem_top_ptr
59	leal	_stack, %edi
60#endif
61
62#if ENV_X86_64
63	/** poison the stack. Code should not count on the
64	 * stack being full of zeros. This stack poisoning
65	 * recently uncovered a bug in the broadcast SIPI
66	 * code.
67	 */
68	movabs	$_estack, %rcx
69	sub	%rdi, %rcx
70	shr	$3, %rcx   /* it is 64 bit aligned, right? */
71	movq	$0xDEADBEEFDEADBEEF, %rax
72	rep
73	stosq
74
75	/* Set new stack with enforced alignment. */
76	movabs	$_estack, %rsp
77	movq	$(0xfffffffffffffff0), %rax
78	and	%rax, %rsp
79#else
80	/** poison the stack. Code should not count on the
81	 * stack being full of zeros. This stack poisoning
82	 * recently uncovered a bug in the broadcast SIPI
83	 * code.
84	 */
85	movl	$_estack, %ecx
86	subl	%edi, %ecx
87	shrl	$2, %ecx   /* it is 32 bit aligned, right? */
88	movl	$0xDEADBEEF, %eax
89	rep
90	stosl
91
92	/* Set new stack with enforced alignment. */
93	movl	$_estack, %esp
94	andl	$(0xfffffff0), %esp
95#endif
96
97	/*
98	 *	Now we are finished. Memory is up, data is copied and
99	 *	bss is cleared.   Now we call the main routine and
100	 *	let it do the rest.
101	 */
102	post_code(POSTCODE_PRE_HARDWAREMAIN)	/* post 6e */
103
104#if ENV_X86_64
105	movq	$0xFFFFFFFFFFFFFFF0, %rax
106	and	%rax, %rsp
107#else
108	andl	$0xFFFFFFF0, %esp
109#endif
110
111#if CONFIG(ASAN_IN_RAMSTAGE)
112	call asan_init
113#endif
114
115#if CONFIG(GDB_WAIT)
116	call gdb_hw_init
117	call gdb_stub_breakpoint
118#endif
119	call	main
120	/* NOTREACHED */
121.Lhlt:
122	post_code(POSTCODE_DEAD_CODE)	/* post ee */
123	hlt
124	jmp	.Lhlt
125
126#if CONFIG(GDB_WAIT)
127
128	.globl gdb_stub_breakpoint
129gdb_stub_breakpoint:
130#if ENV_X86_64
131	pop	%rax	/* Return address */
132	pushfq
133	mov	%cs, %rbx
134	push	%rbx
135	push	%rax	/* Return address */
136	push	$0	/* No error code */
137	push	$32	/* vector 32 is user defined */
138#else
139	popl	%eax	/* Return address */
140	pushfl
141	pushl	%cs
142	pushl	%eax	/* Return address */
143	pushl	$0	/* No error code */
144	pushl	$32	/* vector 32 is user defined */
145#endif
146	jmp	int_hand
147#endif
148
149	.globl gdt, gdt_end
150	.global per_cpu_segment_descriptors, per_cpu_segment_selector
151
152gdtaddr:
153	.word	gdt_end - gdt - 1
154#if ENV_X86_64
155	.quad	gdt
156#else
157	.long	gdt		/* we know the offset */
158#endif
159
160	.data
161
162	/* This is the gdt for GCC part of coreboot.
163	 * It is different from the gdt in ASM part of coreboot
164	 * which is defined in gdt_init.S
165	 *
166	 * When the machine is initially started, we use a very simple
167	 * gdt from ROM (that in gdt_init.S) which only contains those
168	 * entries we need for protected mode.
169	 *
170	 * When we're executing code from RAM, we want to do more complex
171	 * stuff, like initializing PCI option ROMs in real mode, or doing
172	 * a resume from a suspend to RAM.
173	 */
174gdt:
175	/* selgdt 0, unused */
176	.word	0x0000, 0x0000		/* dummy */
177	.byte	0x00, 0x00, 0x00, 0x00
178
179	/* selgdt 8, unused */
180	.word	0x0000, 0x0000		/* dummy */
181	.byte	0x00, 0x00, 0x00, 0x00
182
183	/* selgdt 0x10, flat code segment */
184	.word	0xffff, 0x0000
185	.byte	0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for
186					* limit
187					*/
188
189	/* selgdt 0x18, flat data segment */
190	.word	0xffff, 0x0000
191#if ENV_X86_64
192	.byte	0x00, 0x92, 0xcf, 0x00
193#else
194	.byte	0x00, 0x93, 0xcf, 0x00
195#endif
196
197	/* selgdt 0x20, unused */
198	.word	0x0000, 0x0000		/* dummy */
199	.byte	0x00, 0x00, 0x00, 0x00
200
201	/* The next two entries are used for executing VGA option ROMs */
202
203	/* selgdt 0x28 16 bit 64k code at 0x00000000 */
204	.word	0xffff, 0x0000
205	.byte	0, 0x9a, 0, 0
206
207	/* selgdt 0x30 16 bit 64k data at 0x00000000 */
208	.word	0xffff, 0x0000
209	.byte	0, 0x92, 0, 0
210
211	/* The next two entries are used for ACPI S3 RESUME */
212
213	/* selgdt 0x38, flat data segment 16 bit */
214	.word	0x0000, 0x0000		/* dummy */
215	.byte	0x00, 0x93, 0x8f, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for
216					* limit
217					*/
218
219	/* selgdt 0x40, flat code segment 16 bit */
220	.word	0xffff, 0x0000
221	.byte	0x00, 0x9b, 0x8f, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for
222					* limit
223					*/
224
225#if ENV_X86_64
226	/* selgdt 0x48, flat x64 code segment */
227	.word	0xffff, 0x0000
228	.byte	0x00, 0x9b, 0xaf, 0x00
229#endif
230per_cpu_segment_descriptors:
231	.rept CONFIG_MAX_CPUS
232	/* flat data segment */
233	.word	0xffff, 0x0000
234#if ENV_X86_64
235	.byte	0x00, 0x92, 0xcf, 0x00
236#else
237	.byte	0x00, 0x93, 0xcf, 0x00
238#endif
239	.endr
240gdt_end:
241
242/* Segment selector pointing to the first per_cpu_segment_descriptor. */
243per_cpu_segment_selector:
244	.long	per_cpu_segment_descriptors - gdt
245
246	.section ".text._start", "ax", @progbits
247#if ENV_X86_64
248SetCodeSelector:
249	# save rsp because iret will align it to a 16 byte boundary
250	mov	%rsp, %rdx
251
252	# use iret to jump to a 64-bit offset in a new code segment
253	# iret will pop cs:rip, flags, then ss:rsp
254	mov	%ss, %ax	# need to push ss..
255	push	%rax		# push ss instruction not valid in x64 mode,
256				# so use ax
257	push	%rsp
258	pushfq
259	push	%rcx		# cx is code segment selector from caller
260	movabs	$setCodeSelectorLongJump, %rax
261	push	%rax
262
263	# the iret will continue at next instruction, with the new cs value
264	# loaded
265	iretq
266
267setCodeSelectorLongJump:
268	# restore rsp, it might not have been 16-byte aligned on entry
269	mov	%rdx, %rsp
270	ret
271#endif
272