您的位置:首页 > 运维架构 > Linux

linux2.6内核kthread 内核线程机制解析(自己写的额注释)

2011-07-08 12:47 483 查看
struct kthread {
int should_stop;
struct completion exited;
};
struct kthread_create_info
{
/* Information passed to kthread() from kthreadd. */
int (*threadfn)(void *data); //内核线程的实体,进行服务的程序
void *data;/* Result passed back to kthread_create() from kthreadd. */
struct task_struct *result;
struct completion done;struct list_head list;
};
struct task_struct *kthread_create(int (*threadfn)(void *data),
void *data,
const char namefmt[],
...)
{
struct kthread_create_info create; //对所要创建线程的包装create.threadfn = threadfn;
create.data = data;
init_completion(&create.done); //初始化完成量spin_lock(&kthread_create_lock); //对全局内核线程队列操作时要加锁
list_add_tail(&create.list, &kthread_create_list); //在加锁状态添加到全局内核线程队列
spin_unlock(&kthread_create_lock);wake_up_process(kthreadd_task); //唤醒全局内核线程,但是如前所述2.4内核wake_up_process只是讲所要进程连入运行队列的最前面,注意但是linux2.6内核里改的面目全非
wait_for_completion(&create.done); //我想在这里会导致当前进程的睡眠,当时当某一个进程调用complete()函数后,必然会唤醒本进程,应该是本进程继续运行 if (!IS_ERR(create.result)) {
struct sched_param param = { .sched_priority = 0 };
va_list args; va_start(args, namefmt);
vsnprintf(create.result->comm, sizeof(create.result->comm),
namefmt, args);
va_end(args);
/*
* root may have changed our (kthreadd's) priority or CPU mask.
* The kernel thread should not inherit these properties.
*/
sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m);
set_cpus_allowed_ptr(create.result, cpu_all_mask);
}
return create.result;
}
int kthreadd(void *unused) //我在这里安排的顺序正好kthread_run的调用过程
{
struct task_struct *tsk = current; //这是这里的当前进程早已不是当初调用kthread_run()的进程了
//当前进程已经是kthread
/* Setup a clean context for our children to inherit. */
set_task_comm(tsk, "kthreadd");
ignore_signals(tsk);
set_cpus_allowed_ptr(tsk, cpu_all_mask);
set_mems_allowed(node_possible_map);current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;for (;;) {
set_current_state(TASK_INTERRUPTIBLE); //将当前进程,也就是kthreadd的状态标志设置为TASK_INTRRUPTIBLE
if (list_empty(&kthread_create_list)) //检查全局内核线程队列是否为空,如果为空
schedule(); //调度
__set_current_state(TASK_RUNNING); //如果过不为空,当前进程的状态标志改回 spin_lock(&kthread_create_lock);
while (!list_empty(&kthread_create_list)) { //继续运行后再次检查全局内核线程队列(这个队列实际上是对需要的建立的内核线程的一个缓冲 )
struct kthread_create_info *create; create = list_entry(kthread_create_list.next, //从队列中找到找建立的内核线程包装结构
struct kthread_create_info, list); //其实找到结构就是在kthread_create中添加进的kthread_cread_info结构
list_del_init(&create->list); //从队列中删除,就像中断清除一样
spin_unlock(&kthread_create_lock); create_kthread(create); //建立内核线程的函数 spin_lock(&kthread_create_lock);
}
spin_unlock(&kthread_create_lock);
}return 0;
}
static void create_kthread(struct kthread_create_info *create)
{
int pid;/* We want our own signal handler (we take no signals by default). */
pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); //这才是真正建立内核线程的函数,其根本是do_fork()函数
if (pid < 0) {
create->result = ERR_PTR(pid);
complete(&create->done); //由于在函数kthread_cread 里在等待完成量,这里告知告知kthread_cread已经建立好,其实用kernel_thread建立的线程也是要wake的
}
}
static int kthread(void *_create) //这就是创建出来的内核线程 是kthread_create_info结构的"上层结构" 真正意义上的内核线程
{
/* Copy data: it's on kthread's stack */
struct kthread_create_info *create = _create;
int (*threadfn)(void *data) = create->threadfn;
void *data = create->data;
struct kthread self;
int ret;self.should_stop = 0; //这个标志位表明内核线程是否退出
init_completion(&self.exited); //初始化内核线程结构体的完成量
current->vfork_done = &self.exited;/* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state(TASK_UNINTERRUPTIBLE); //将当前的内核线程睡眠,也就是创建出的内核线程
create->result = current; //完成内核线程包装结构的赋值
complete(&create->done); //使得调用kthread_run的进程从wait_for_completion()内部返回,
schedule(); //此时当前进程变为调用kthread_run的进程,等待被wake_up_process();ret = -EINTR; //这以后才是内核线程的主体,当wake_up_process
if (!self.should_stop)
ret = threadfn(data);/* we can't just return, we must preserve "self" on stack */
do_exit(ret);
}关于完成量的同步逻辑上还有个问题,以后再改,如果有人清楚请告知 谢了
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: