您的位置:首页 > 运维架构 > Linux

linux启动流程导读(arm为例)<一>

2012-01-30 18:10 633 查看
==============================================
本文系本站原创,欢迎转载!转载请注明出处:
http://blog.csdn.net/gdt_a20/article/details/7220389
==============================================

以arm为例,分析一下kernel的启动过程;

内核版本:linux-3.2.tar.gz

一、arch/arm/kernel/head.s

/*

* Kernel startup entry point.

* ---------------------------

*

* This is normally called from the decompressor code. The requirements

* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,

* r1 = machine nr, r2 = atags or dtb pointer.

*

* This code is mostly position independent, so if you link the kernel at

* 0xc0008000, you call this at __pa(0xc0008000).

*

* See linux/arch/arm/tools/mach-types for the complete list of machine

* numbers for r1.

*

* We're trying to keep crap to a minimum; DO NOT add any machine specific

* crap here - that's what the boot loader (or in extreme, well justified

* circumstances, zImage) is for.

*/

.arm



__HEAD @#define __HEAD .section ".head.text","ax"

ENTRY(stext)



THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.

THUMB( bx r9 ) @ If this is a Thumb-2 kernel,

THUMB( .thumb ) @ switch to Thumb now.

THUMB(1: )



setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 关闭普通中断,快速中断,使能svc模式

@ and irqs disabled

mrc p15, 0, r9, c0, c0 @ get processor id 获得芯片ID

bl __lookup_processor_type @ r5=procinfo r9=cpuid 获得处理器型号,r5 == id,#1

movs r10, r5 @ invalid processor (r5=0)? 校验正确性,0错误

THUMB( it eq ) @ force fixup-able long branch encoding

beq __error_p @ yes, error 'p'



#ifndef CONFIG_XIP_KERNEL

adr r3, 2f

ldmia r3, {r4, r8}

sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)

add r8, r8, r4 @ PHYS_OFFSET

==========

#1 :arch/arm/kernel/head-common.h

==========

/*

* Read processor ID register (CP#15, CR0), and look up in the linker-built

* supported processor list. Note that we can't use the absolute addresses

* for the __proc_info lists since we aren't running with the MMU on

* (and therefore, we are not in the correct address space). We have to

* calculate the offset.

*

* r9 = cpuid

* Returns:

* r3, r4, r6 corrupted

* r5 = proc_info pointer in physical address space

* r9 = cpuid (preserved)

*/

__CPUINIT

__lookup_processor_type:

adr r3, __lookup_processor_type_data @adr 相对偏移读取,读取下面type_data地址

ldmia r3, {r4 - r6} @将该地址存放的值 放入r4(.),r5(begin),r6(end)

sub r3, r3, r4 @ get offset between virt&phys 链接地址-实际地址=偏移量

==========

继续arch/arm/kernel/head.s

==========

/*

* r1 = machine no, r2 = atags or dtb,

* r8 = phys_offset, r9 = cpuid, r10 = procinfo

*/

bl __vet_atags @#1,head-common.s

#ifdef CONFIG_SMP_ON_UP

bl __fixup_smp @略

#endif

#ifdef CONFIG_ARM_PATCH_PHYS_VIRT

bl __fixup_pv_table @略

#endif

bl __create_page_tables @#2

==========

#1 :arch/arm/kernel/head-common.h

==========

/* Determine validity of the r2 atags pointer. The heuristic requires

* that the pointer be aligned, in the first 16k of physical RAM and

* that the ATAG_CORE marker is first and present. If CONFIG_OF_FLATTREE

* is selected, then it will also accept a dtb pointer. Future revisions

* of this function may be more lenient with the physical address and

* may also be able to move the ATAGS block if necessary.

*

* Returns:

* r2 either valid atags pointer, valid dtb pointer, or zero

* r5, r6 corrupted

*/

__vet_atags:

tst r2, #0x3 @ aligned? 不对其就返回

bne 1f



ldr r5, [r2, #0] @读到r5

#ifdef CONFIG_OF_FLATTREE

ldr r6, =OF_DT_MAGIC @ is it a DTB? 过滤DTB

cmp r5, r6

beq 2f

#endif

cmp r5, #ATAG_CORE_SIZE @ is first tag ATAG_CORE? must be first

cmpne r5, #ATAG_CORE_SIZE_EMPTY

bne 1f

ldr r5, [r2, #4]

ldr r6, =ATAG_CORE

cmp r5, r6

bne 1f



2: mov pc, lr @ atag/dtb pointer is ok



1: mov r2, #0

mov pc, lr

ENDPROC(__vet_atags)

============

@#2:__create_page_tables

============

/*

* Setup the initial page tables. We only setup the barest

* amount which are required to get the kernel running, which

* generally means mapping in the kernel code.

*

* r8 = phys_offset, r9 = cpuid, r10 = procinfo

*

* Returns:

* r0, r3, r5-r7 corrupted

* r4 = physical page table address

*/

__create_page_tables:

pgtbl r4, r8 @ page table address

============

1     .ltorg
2     .align
3 __enable_mmu_loc:
4     .long    .
5     .long    __enable_mmu
6     .long    __enable_mmu_end
==============


/*

* Then map boot params address in r2 or

* the first 1MB of ram if boot params address is not specified.


3.0选择性将enablemmu和boot params双映射,并不是以前机械的前8M双映射

mov pc,lr

回到

/*

* The following calls CPU specific code in a position independent

* manner. See arch/arm/mm/proc-*.S for details. r10 = base of

* xxx_proc_info structure selected by __lookup_processor_type

* above. On return, the CPU will be ready for the MMU to be

* turned on, and r0 will hold the CPU control register value.

*/

ldr r13, =__mmap_switched @ address to jump to after

@ mmu has been enabled

adr lr, BSYM(1f) @ return (PIC) address

mov r8, r4 @ set TTBR1 to swapper_pg_dir

ARM( add pc, r10, #PROCINFO_INITFUNC )

THUMB( add r12, r10, #PROCINFO_INITFUNC )

THUMB( mov pc, r12 )

1: b __enable_mmu

============

进入enable_mmu

/*

* Setup common bits before finally enabling the MMU. Essentially

* this is just loading the page table pointer and domain access

* registers.

*

* r0 = cp#15 control register

* r1 = machine ID

* r2 = atags or dtb pointer

* r4 = page table pointer

* r9 = processor ID

* r13 = *virtual* address to jump to upon completion

*/

__enable_mmu:

#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6

orr r0, r0, #CR_A

#else

bic r0, r0, #CR_A

#endif

#ifdef CONFIG_CPU_DCACHE_DISABLE

bic r0, r0, #CR_C

#endif

#ifdef CONFIG_CPU_BPREDICT_DISABLE

bic r0, r0, #CR_Z

#endif

#ifdef CONFIG_CPU_ICACHE_DISABLE

bic r0, r0, #CR_I

#endif

mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \

domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \

domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \

domain_val(DOMAIN_IO, DOMAIN_CLIENT))

mcr p15, 0, r5, c3, c0, 0 @ load domain access register

mcr p15, 0, r4, c2, c0, 0 @ load page table pointer

b __turn_mmu_on

ENDPROC(__enable_mmu)

==========

/*

* Enable the MMU. This completely changes the structure of the visible

* memory space. You will not be able to trace execution through this.

* If you have an enquiry about this, *please* check the linux-arm-kernel

* mailing list archives BEFORE sending another post to the list.

*

* r0 = cp#15 control register

* r1 = machine ID

* r2 = atags or dtb pointer

* r9 = processor ID

* r13 = *virtual* address to jump to upon completion

*

* other registers depend on the function called upon completion

*/

.align 5

__turn_mmu_on:

mov r0, r0

mcr p15, 0, r0, c1, c0, 0 @ write control reg

mrc p15, 0, r3, c0, c0, 0 @ read id reg

mov r3, r3

mov r3, r13 //跳回r13,=__mmap_switched

mov pc, r3

__enable_mmu_end:

ENDPROC(__turn_mmu_on)

==========

#arch/arm/kernel/head-common.s

/*

* The following fragment of code is executed with the MMU on in MMU mode,

* and uses absolute addresses; this is not position independent.

*

* r0 = cp#15 control register

* r1 = machine ID

* r2 = atags/dtb pointer

* r9 = processor ID

*/

__INIT

__mmap_switched:

adr r3, __mmap_switched_data



ldmia r3!, {r4, r5, r6, r7}

cmp r4, r5 @ Copy data segment if needed

1: cmpne r5, r6

ldrne fp, [r4], #4

strne fp, [r5], #4

bne 1b



mov fp, #0 @ Clear BSS (and zero fp)

1: cmp r6, r7

strcc fp, [r6],#4

bcc 1b



ARM( ldmia r3, {r4, r5, r6, r7, sp})

THUMB( ldmia r3, {r4, r5, r6, r7} )

THUMB( ldr sp, [r3, #16] )

str r9, [r4] @ Save processor ID

str r1, [r5] @ Save machine type

str r2, [r6] @ Save atags pointer

bic r4, r0, #CR_A @ Clear 'A' bit

stmia r7, {r0, r4} @ Save control register values

b start_kernel

ENDPROC(__mmap_switched)

===========



绝对地址操作,copy data,bss清空,跳入start_kernel,进入c环境。
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: