您的位置:首页 > 产品设计 > UI/UE

工作队列(workqueue) create_workqueue/schedule_work/queue_work .

2013-10-09 14:19 447 查看
一、workqueue简介

workqueue与tasklet类似,都是允许内核代码请求某个函数在将来的时间被调用(抄《ldd3》上的)

每个workqueue就是一个内核进程。

workqueue与tasklet的区别:

1.tasklet是通过软中断实现的,在软中断上下文中运行,tasklet代码必须是原子的

workqueue是通过内核进程实现的,就没有上述限制的,最爽的是,工作队列函数可以休眠

PS: 我的驱动模块就是印在计时器中调用了可休眠函数,所以出现了cheduling while atomic告警

内核计时器也是通过软中断实现的

2.tasklet始终运行在被初始提交的同一处理器上,workqueue不一定

3.tasklet不能确定延时时间(即使很短),workqueue可以设定延迟时间

二、workqueue的API

workqueue的API自2.6.20后发生了变化

#include <linux/workqueue.h>

struct workqueue_struct;

struct work_struct;

struct workqueue_struct *create_workqueue(const char *name);

void destroy_workqueue(struct workqueue_struct *queue);

INIT_WORK(_work, _func);

INIT_DELAYED_WORK(_work, _func);

int queue_work(struct workqueue_struct *wq, struct work_struct *work);

int queue_delayed_work(struct workqueue_struct *wq,struct delayed_work *dwork, unsigned
long delay);

int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,

struct delayed_work *dwork, unsigned long delay);

int cancel_work_sync(struct work_struct *work);

int cancel_delayed_work_sync(struct delayed_work *dwork);

void flush_workqueue(struct workqueue_struct *wq);

Workqueue编程接口
序号
接口函数
说明
1
create_workqueue

用于创建一个workqueue队列,为系统中的每个CPU都创建一个内核线程。输入参数:

@name:workqueue的名称

2
create_singlethread_workqueue

用于创建workqueue,只创建一个内核线程。输入参数:

@name:workqueue名称

3
destroy_workqueue

释放workqueue队列。输入参数:

@ workqueue_struct:需要释放的workqueue队列指针

4
schedule_work

调度执行一个具体的任务,执行的任务将会被挂入Linux系统提供的workqueue——keventd_wq输入参数:

@ work_struct:具体任务对象指针

5
schedule_delayed_work

延迟一定时间去执行一个具体的任务,功能与schedule_work类似,多了一个延迟时间,输入参数:

@work_struct:具体任务对象指针

@delay:延迟时间

6
queue_work

调度执行一个指定workqueue中的任务。输入参数:

@ workqueue_struct:指定的workqueue指针

@work_struct:具体任务对象指针

7
queue_delayed_work

延迟调度执行一个指定workqueue中的任务,功能与queue_work类似,输入参数多了一个delay。

下面实例是不指定delay时间的workqueue

(代码基于2.6.24)

struct my_work_stuct{

int test;

struct work_stuct save;

};

struct my_work_stuct test_work;

struct workqueue_struct *test_workqueue;

void do_save(struct work_struct *p_work)

{

struct my_work_struct *p_test_work = container_of(p_work, struct my_work_stuct, save);

printk("%d\n",p_test_work->test);

}

void test_init()

{

test_workqueue = create_workqueue("test_workqueue");

if (!test_workqueue)

panic("Failed to create test_workqueue\n");

INIT_WORK(&(test_work.save), do_save);

queue_work(test_workqueue, &(test_work.save));
}

void test_destory(void)

{

if(test_workqueue)

destroy_workqueue(test_workqueue);

}

三、workqueue的实现

工作队列workqueue不是通过软中断实现的,它是通过内核进程实现的





首先,创建一个workqueue,实际上就是建立一个内核进程

create_workqueue("tap_workqueue")

--> __create_workqueue(“tap_workqueue”, 0, 0)

--> __create_workqueue_key((name), (singlethread), (freezeable), NULL, NULL){

wq = kzalloc(sizeof(*wq), GFP_KERNEL);

wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);

wq->name = name;

wq->singlethread = singlethread;

wq->freezeable = freezeable;

INIT_LIST_HEAD(&wq->list);

for_each_possible_cpu(cpu) {

cwq = init_cpu_workqueue(wq, cpu);

err = create_workqueue_thread(cwq, cpu);

start_workqueue_thread(cwq, cpu);

}

}

create_workqueue_thread 建立了一个内核进程 worker_thread(linux_2_6_24/kernel/workqueue.c)

create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)

{

struct workqueue_struct *wq = cwq->wq;

const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";

struct task_struct *p;

p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);

if (IS_ERR(p))

return PTR_ERR(p);

cwq->thread = p;

return 0;

}

内核进程worker_thread做的事情很简单,死循环而已,不停的执行workqueue上的work_list

(linux_2_6_24/kernel/workqueue.c)

int worker_thread (void *__cwq)

{

struct cpu_workqueue_struct *cwq = __cwq;

/*下面定义等待队列项*/

DEFINE_WAIT(wait);

/*下面freezeable一般为0*/

if (cwq->wq->freezeable)

set_freezable();

/*提高优先级别*/

set_user_nice(current, -5);

for (;;) {

/*在cwq->more_work上等待, 若有人调用queue_work,该函数将调用wake_up(&cwq->more_work) 激活本进程*/

prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);

/*work队列空则切换出去*/

if (!freezing(current) && !kthread_should_stop() && list_empty(&cwq->worklist))

schedule();

/*切换回来则结束等待 说明有人唤醒cwq->more_work上的等待 有work需要处理*/

finish_wait(&cwq->more_work, &wait);

/*下面空,因为没有定义电源管理*/

try_to_freeze();

if (kthread_should_stop())

break;

/*run_workqueue依次处理工作队列上所有的work*/

run_workqueue(cwq);

}

return 0;

}

/*run_workqueue依次处理工作队列上所有的work*/

static void run_workqueue(struct cpu_workqueue_struct *cwq)

{

spin_lock_irq(&cwq->lock);

cwq->run_depth++;

if (cwq->run_depth > 3) {

/* morton gets to eat his hat */

printk("%s: recursion depth exceeded: %d\n",

__FUNCTION__, cwq->run_depth);

dump_stack();

}

while (!list_empty(&cwq->worklist)) {

struct work_struct *work = list_entry(cwq->worklist.next,

struct work_struct, entry);

work_func_t f = work->func;

#ifdef CONFIG_LOCKDEP

/*

* It is permissible to free the struct work_struct

* from inside the function that is called from it,

* this we need to take into account for lockdep too.

* To avoid bogus "held lock freed" warnings as well

* as problems when looking into work->lockdep_map,

* make a copy and use that here.

*/

struct lockdep_map lockdep_map = work->lockdep_map;

#endif

cwq->current_work = work;

list_del_init(cwq->worklist.next);

spin_unlock_irq(&cwq->lock);

BUG_ON(get_wq_data(work) != cwq);

work_clear_pending(work);

lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);

lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);

f(work); /*执行work项中的func*/

lock_release(&lockdep_map, 1, _THIS_IP_);

lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);

if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {

printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "

"%s/0x%08x/%d\n",

current->comm, preempt_count(),

task_pid_nr(current));

printk(KERN_ERR " last function: ");

print_symbol("%s\n", (unsigned long)f);

debug_show_held_locks(current);

dump_stack();

}

spin_lock_irq(&cwq->lock);

cwq->current_work = NULL;

}

cwq->run_depth--;

spin_unlock_irq(&cwq->lock);

}

将一个work加入到指定workqueue的work_list中(文件linux_2_6_24/kernel/workqueue.c)

int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)

{

int ret = 0;

if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {

BUG_ON(!list_empty(&work->entry));

__queue_work(wq_per_cpu(wq, get_cpu()), work);

put_cpu();

ret = 1;

}

return ret;

}

/* Preempt must be disabled. */

static void __queue_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)

{

unsigned long flags;

spin_lock_irqsave(&cwq->lock, flags);

insert_work(cwq, work, 1);

spin_unlock_irqrestore(&cwq->lock, flags);

}

static void insert_work(struct cpu_workqueue_struct *cwq,

struct work_struct *work, int tail)

{

set_wq_data(work, cwq);

/*

* Ensure that we get the right work->data if we see
the

* result of list_add() below, see try_to_grab_pending().

*/

smp_wmb();

if (tail)

list_add_tail(&work->entry, &cwq->worklist);

else

list_add(&work->entry, &cwq->worklist);

wake_up(&cwq->more_work);

}

四、共享队列

其实内核有自己的一个workqueue,叫keventd_wq,这个工作队列也叫做“共享队列”。

do_basic_setup --> init_workqueues --> create_workqueue("events");

若驱动模块使用的workqueue功能很简单的话,可以使用“共享队列”,不用自己再建一个队列

使用共享队列,有这样一套API

int schedule_work(struct work_struct *work)

{

queue_work(keventd_wq, work);

}

int schedule_delayed_work(struct delayed_work *dwork,unsigned long delay)

{

timer_stats_timer_set_start_info(&dwork->timer);

return queue_delayed_work(keventd_wq, dwork, delay);

}

void flush_scheduled_work(void)

{

flush_workqueue(keventd_wq);

}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: