您的位置:首页 > 运维架构 > Linux

关于linux下arm的上下文切换之context_switch

2013-11-09 17:18 411 查看

关于linux下arm的上下文切换之context_switch

函数所在的文件:kernel/sched/core.c

正如下面注释所讲的,进程的切换主要包含两部分:

一部分:进程地址空间的切换

二部分:进程各自的cpu寄存器现场

由于每个应用进程都有自己的地址空间,所以每个进程的页表是不一样的,故在进程切换时,相应的页表也要做相应的切换。

而所有内核进程的地址空间都是一样的,所以不需要做页表的切换。

/*
 * context_switch - switch to the new MM and the new
 * thread's register state.
 */
static inline void
context_switch(struct rq *rq, struct task_struct *prev,
	       struct task_struct *next)
{
	struct mm_struct *mm, *oldmm;

	prepare_task_switch(rq, prev, next);

	mm = next->mm;
	oldmm = prev->active_mm;
	/*
	 * For paravirt, this is coupled with an exit in switch_to to
	 * combine the page table reload and the switch backend into
	 * one hypercall.
	 */
	arch_start_context_switch(prev);

	if (!mm) {//如果当前进程是内核进程,没有对应的用户空间,则就继续使用切换前的进程的地址空间。
		next->active_mm = oldmm;
		atomic_inc(&oldmm->mm_count);
		enter_lazy_tlb(oldmm, next);
	} else
		switch_mm(oldmm, mm, next);//如果应用进程,则进行地址空间的切换

	if (!prev->mm) {
		prev->active_mm = NULL;
		rq->prev_mm = oldmm;
	}
	/*
	 * Since the runqueue lock will be released by the next
	 * task (which is an invalid locking op but in the case
	 * of the scheduler it's an obvious special-case), so we
	 * do an early lockdep release here:
	 */
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif

	/* Here we just switch the register state and the stack. */
	switch_to(prev, next, prev);//执行进程的寄存器的上下文的切换

	barrier();
	/*
	 * this_rq must be evaluated again because prev may have moved
	 * CPUs since it called schedule(), thus the 'rq' on its stack
	 * frame will be invalid.
	 */
	finish_task_switch(this_rq(), prev);
}


switch_mm的实质就是将新进程的页表基地址设置到页目录表基地址寄存器中。调用流程如下:

arch/arm/include/asm/mmu_context.h

static inline void check_and_switch_context(struct mm_struct *mm,
					    struct task_struct *tsk)
{
	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
		__check_kvm_seq(mm);

	if (irqs_disabled())
		/*
		 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
		 * high interrupt latencies, defer the call and continue
		 * running with the old mm. Since we only support UP systems
		 * on non-ASID CPUs, the old mm will remain valid until the
		 * finish_arch_post_lock_switch() call.
		 */
		set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
	else
		cpu_switch_mm(mm->pgd, mm);
}

以上的cpu_switch_mm函数最终会调用到如下函数cpu_v7_switch_mm:
该函数的r0指向:mm->pgd,而r1则指向mm,而mm_CONTEXT_ID的定义见文件:arch/arm/kernel/asm-offsets.c
TTB is short for "translation table base"--------页表
arch/arm/mm/proc-v7-2level.S
/*
 *	cpu_v7_switch_mm(pgd_phys, tsk)
 *
 *	Set the translation table base pointer to be pgd_phys
 *
 *	- pgd_phys - physical address of new TTB
 *
 *	It is assumed that:
 *	- we are not using split page tables
 */
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
	mov	r2, #0
	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id
	ALT_SMP(orr	r0, r0, #TTB_FLAGS_SMP)
	ALT_UP(orr	r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_ARM_ERRATA_430973
	mcr	p15, 0, r2, c7, c5, 6		@ flush BTAC/BTB
#endif
#ifdef CONFIG_ARM_ERRATA_754322
	dsb
#endif
	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
	isb
	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
	isb
#endif
	mov	pc, lr
ENDPROC(cpu_v7_switch_mm)


回到context_switch函数的switch_to函数(arch/arm/include/asm/switch_to.h)
-->__switch_to(arch/arm/kernel/entry-armv.S)
该函数的实质就是将移出进程的寄存器上下文保存到移出进程的thread_info->cpu_context结构体中。
并且将移入进程的thread_info->cpu_context结构体中的值restore到cpu 的寄存器中,从而实现堆栈,pc指针和cpu通用寄存器都切换到新的进程上开始执行。
thread_info->cpu_context结构体如下:

struct cpu_context_save {

__u32 r4;

__u32 r5;

__u32 r6;

__u32 r7;

__u32 r8;

__u32 r9;

__u32 sl;--->r10

__u32 fp;

__u32 sp;

__u32 pc;

__u32 extra[2];/* Xscale 'acc' register, etc */

};

/*
 * Register switch for ARMv3 and ARMv4 processors
 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 * previous and next are guaranteed not to be the same.
 */
ENTRY(__switch_to)
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
	add	ip, r1, #TI_CPU_S***E
	ldr	r3, [r2, #TI_TP_VALUE]
 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 THUMB(	str	sp, [ip], #4		   )
 THUMB(	str	lr, [ip], #4		   )
#ifdef CONFIG_CPU_USE_DOMAINS
	ldr	r6, [r2, #TI_CPU_DOMAIN]
#endif
	set_tls	r3, r4, r5
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	ldr	r7, [r2, #TI_TASK]
	ldr	r8, =__stack_chk_guard
	ldr	r7, [r7, #TSK_STACK_CANARY]
#endif
#ifdef CONFIG_CPU_USE_DOMAINS
	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
#endif
	mov	r5, r0
	add	r4, r2, #TI_CPU_S***E
	ldr	r0, =thread_notify_head
	mov	r1, #THREAD_NOTIFY_SWITCH
	bl	atomic_notifier_call_chain
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	str	r7, [r8]
#endif
 THUMB(	mov	ip, r4			   )
	mov	r0, r5
 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 THUMB(	ldr	sp, [ip], #4		   )
 THUMB(	ldr	pc, [ip]		   )
 UNWIND(.fnend		)
ENDPROC(__switch_to)


由于switch_to是一个函数,所以对他的寄存器的保存,同样也遵循arm的APCS标准,但又跟APCS又不完全一样。
跟APCS不一样的地方表现在:

保存移出进程的寄存器上下时,保存了lr的地址,而并未保存pc值,而此时的lr寄存器的地址即为context_switch函数中的switch_to的下一行,即barrier()函数的开始地址。

而跟APCS相同的地方表现在:

r0-r3都未保存,因为根据APCS约束,这4个寄存器都是用来存函数的参数,可以不入栈保存。

r4-r10,根据APCS约束,由于这些寄存器会被编译器用来暂存变量的值,因为会被修改,需要入栈保存和出栈回复。

最后,我们知道在中断处理过程中,linux arm是有将cpsr寄存器也做了保存,而为什么在上下文切换时,却不需要保存呢?
这个问题困扰了我一根烟的时间:

抽了根烟后,终于理解了为什么中断时需要保存cpsr,而上下文切换时不需要保存cpsr寄存器了。
因为中断发生是随机不确定的,他可能在任何指令的后面产生中断,而cpsr只会影响到中断指令的下面的
指令的执行,如果被中断时的指令的下一条指令的执行是依赖于cpsr的,如果中断处理程序修改了cpsr值,而又
没保存,会导致中断返回时,在执行下条指令时,会出现逻辑错误。

而上下文切换的返回地址和接下来要执行的指令是确定的,只要上下文切换回来的指令不受cpsr值得影响(不依赖cpsr),则就可以
不保存cpsr寄存器的值
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: