linux 中断处理(基于linux-3.7.2 arm linux)
2013-02-17 13:05
363 查看
所有中断入口都是由异常向量,再到中断向量的
汇编到C的中断入口asm_do_IRQ
arch/arm/kernel/irq.c
86 /*
87 * asm_do_IRQ is the interface to be used from assembly code.
88 */
89 asmlinkage void __exception_irq_entry
90 asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
91 {
92 handle_IRQ(irq, regs);
93 }
在同一文件中
58 /*
59 * handle_IRQ handles all hardware IRQ's. Decoded IRQs should
60 * not come via this function. Instead, they should provide their
61 * own 'handler'. Used by platform code implementing C-based 1st
62 * level decoding.
63 */
64 void handle_IRQ(unsigned int irq, struct pt_regs *regs)
65 {
66 struct pt_regs *old_regs = set_irq_regs(regs);
67
68 irq_enter();
69
70 /*
71 * Some hardware gives randomly wrong interrupts. Rather
72 * than crashing, do something sensible.
73 */
74 if (unlikely(irq >= nr_irqs)) {
75 if (printk_ratelimit())
76 printk(KERN_WARNING "Bad IRQ%u\n", irq);
77 ack_bad_irq(irq);
78 } else {
79 generic_handle_irq(irq);
80 }
81
82 irq_exit();
83 set_irq_regs(old_regs);
84 }
可以看到调用了handle_IRQ后又调用了generic_handle_irq(irq);
kernel/irq/irqdesc.c
303 /**
304 * generic_handle_irq - Invoke the handler for a particular irq
305 * @irq: The irq number to handle
306 *
307 */
308 int generic_handle_irq(unsigned int irq)
309 {
310 struct irq_desc *desc = irq_to_desc(irq);
311
312 if (!desc)
313 return -EINVAL;
314 generic_handle_irq_desc(irq, desc);
315 return 0;
316 }
317 EXPORT_SYMBOL_GPL(generic_handle_irq);
include/linux/irqdesc.h
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt. If the descriptor is attached to an
* irqchip-style controller then we call the ->handle_irq() handler,
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
*/
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
{
desc->handle_irq(irq, desc);
}
是通过irq_desc 结构体的handle_irq函数调用的,这个函数应该在哪里注册才对
中断初始化
115 void __init init_IRQ(void)
116 {
117 machine_desc->init_irq();
118 }
异常向量表
arch/arm/kernel/entry-armv.S
1163 .globl __vectors_start
1164 __vectors_start:
1165 ARM( swi SYS_ERROR0 )
1166 THUMB( svc #0 )
1167 THUMB( nop )
1168 W(b) vector_und + stubs_offset
1169 W(ldr) pc, .LCvswi + stubs_offset
1170 W(b) vector_pabt + stubs_offset
1171 W(b) vector_dabt + stubs_offset
1172 W(b) vector_addrexcptn + stubs_offset
1173 W(b) vector_irq + stubs_offset
1174 W(b) vector_fiq + stubs_offset
中断向量的拷贝
arch/arm/kernel/traps.c
820 void __init early_trap_init(void *vectors_base)
821 {
822 unsigned long vectors = (unsigned long)vectors_base;
823 extern char __stubs_start[], __stubs_end[];
824 extern char __vectors_start[], __vectors_end[];
825 extern char __kuser_helper_start[], __kuser_helper_end[];
826 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
827
828 vectors_page = vectors_base;
829
830 /*
831 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
832 * into the vector page, mapped at 0xffff0000, and ensure these
833 * are visible to the instruction stream.
834 */
835 memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
836 memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
837 memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
838
839 /*
840 * Do processor specific fixups for the kuser helpers
841 */
842 kuser_get_tls_init(vectors);
843
844 /*
845 * Copy signal return handlers into the vector page, and
846 * set sigreturn to be a pointer to these.
847 */
848 memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
849 sigreturn_codes, sizeof(sigreturn_codes));
850
851 flush_icache_range(vectors, vectors + PAGE_SIZE);
852 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
853 }
arch/arm/mm/mmu.c
1085 /*
1086 * Set up the device mappings. Since we clear out the page tables for all
1087 * mappings above VMALLOC_START, we will remove any debug device mappings.
1088 * This means you have to be careful how you debug this function, or any
1089 * called function. This means you can't use any function or debugging
1090 * method which may touch any device, otherwise the kernel _will_ crash.
1091 */
1092 static void __init devicemaps_init(struct machine_desc *mdesc)
1093 {
1094 struct map_desc map;
1095 unsigned long addr;
1096 void *vectors;
1097
1098 /*
1099 * Allocate the vector page early.
1100 */
1101 vectors = early_alloc(PAGE_SIZE);
1102
1103 early_trap_init(vectors);
…………
由这里调用early_trap_init
在同一文件中paging_init调用 devicemaps_init
1207 /*
1208 * paging_init() sets up the page tables, initialises the zone memory
1209 * maps, and sets up the zero page, bad page and bad page tables.
1210 */
1211 void __init paging_init(struct machine_desc *mdesc)
1212 {
1213 void *zero_page;
1214
1215 memblock_set_current_limit(arm_lowmem_limit);
1216
1217 build_mem_type_table();
1218 prepare_page_table();
1219 map_lowmem();
1220 dma_contiguous_remap();
1221 devicemaps_init(mdesc);
1222 kmap_init();
1223
1224 top_pmd = pmd_off_k(0xffff0000);
1225
1226 /* allocate the zero page. */
1227 zero_page = early_alloc(PAGE_SIZE);
1228
1229 bootmem_init();
1230
1231 empty_zero_page = virt_to_page(zero_page);
1232 __flush_dcache_page(NULL, empty_zero_page);
1233 }
arch/arm/kernel/setup.c
722 void __init setup_arch(char **cmdline_p)
723 {
724 struct machine_desc *mdesc;
725
726 setup_processor();
727 mdesc = setup_machine_fdt(__atags_pointer);
728 if (!mdesc)
729 mdesc = setup_machine_tags(__atags_pointer, machine_arch_type);
730 machine_desc = mdesc;
731 machine_name = mdesc->name;
732
733 setup_dma_zone(mdesc);
734
735 if (mdesc->restart_mode)
736 reboot_setup(&mdesc->restart_mode);
737
738 init_mm.start_code = (unsigned long) _text;
739 init_mm.end_code = (unsigned long) _etext;
740 init_mm.end_data = (unsigned long) _edata;
741 init_mm.brk = (unsigned long) _end;
742
743 /* populate cmd_line too for later use, preserving boot_command_line */
744 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
745 *cmdline_p = cmd_line;
746
747 parse_early_param();
748
749 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
750 sanity_check_meminfo();
751 arm_memblock_init(&meminfo, mdesc);
752
753 paging_init(mdesc);
在setup_arch调用了 paging_init
setup_arch由start_kernel调用
init/main.c
468 asmlinkage void __init start_kernel(void)
469 {
……
499 setup_arch(&command_line);
…………
}
include/linux/interrupt.h
中断注册函数
129 static inline int __must_check
130 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
131 const char *name, void *dev)
132 {
133 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
134 }
kernel/irq/manage.c
1376 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1377 irq_handler_t thread_fn, unsigned long irqflags,
1378 const char *devname, void *dev_id)
1379 {
1380 struct irqaction *action;
1381 struct irq_desc *desc;
1382 int retval;
1383
1384 /*
1385 * Sanity-check: shared interrupts must pass in a real dev-ID,
1386 * otherwise we'll have trouble later trying to figure out
1387 * which interrupt is which (messes up the interrupt freeing
1388 * logic etc).
1389 */
1390 if ((irqflags & IRQF_SHARED) && !dev_id)
1391 return -EINVAL;
1392
1393 desc = irq_to_desc(irq);
1394 if (!desc)
1395 return -EINVAL;
1396
1397 if (!irq_settings_can_request(desc) ||
1398 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1399 return -EINVAL;
1400
1401 if (!handler) {
1402 if (!thread_fn)
1403 return -EINVAL;
1404 handler = irq_default_primary_handler;
1405 }
1406
1407 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1408 if (!action)
1409 return -ENOMEM;
1410
1411 action->handler = handler;
1412 action->thread_fn = thread_fn;
1413 action->flags = irqflags;
1414 action->name = devname;
1415 action->dev_id = dev_id;
1416
1417 chip_bus_lock(desc);
1418 retval = __setup_irq(irq, desc, action);
1419 chip_bus_sync_unlock(desc);
在这里,将申请的中断与struct irqaction *action;关联
__setup_irq将action与desc中的irq_handler进行关联
111 struct irq_desc *irq_to_desc(unsigned int irq)
112 {
113 return radix_tree_lookup(&irq_desc_tree, irq);
114 }
115 EXPORT_SYMBOL(irq_to_desc);
include/linux/irqnr.h
14 #define irq_to_desc(irq) (&irq_desc[irq])
kernel/irq/irqdesc.c
243 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
244 [0 ... NR_IRQS-1] = {
245 .handle_irq = handle_bad_irq,
246 .depth = 1,
247 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
248 }
249 };
include/linux/interrupt.h
106 struct irqaction {
107 irq_handler_t handler;
108 void *dev_id;
109 void __percpu *percpu_dev_id;
110 struct irqaction *next;
111 irq_handler_t thread_fn;
112 struct task_struct *thread;
113 unsigned int irq;
114 unsigned int flags;
115 unsigned long thread_flags;
116 unsigned long thread_mask;
117 const char *name;
118 struct proc_dir_entry *dir;
119 } ____cacheline_internodealigned_in_smp;
这个结构体就是用户中断处理函数关联入的地方
例子:定时器中就是利用setup_irq(irq_number, &s5p_clock_event_irq);函数将用户中断链入
252 static struct irqaction s5p_clock_event_irq = {
253 .name = "s5p_time_irq",
254 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
255 .handler = s5p_clock_event_isr,
256 .dev_id = &time_event_device,
257 };
kernel/irq/chip.c
462 /**
463 * handle_edge_irq - edge type IRQ handler
464 * @irq: the interrupt number
465 * @desc: the interrupt description structure for this irq
466 *
467 * Interrupt occures on the falling and/or rising edge of a hardware
468 * signal. The occurrence is latched into the irq controller hardware
469 * and must be acked in order to be reenabled. After the ack another
470 * interrupt can happen on the same source even before the first one
471 * is handled by the associated event handler. If this happens it
472 * might be necessary to disable (mask) the interrupt depending on the
473 * controller hardware. This requires to reenable the interrupt inside
474 * of the loop which handles the interrupts which have arrived while
475 * the handler was running. If all pending interrupts are handled, the
476 * loop is left.
477 */
478 void
479 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
480 {
…………
519 handle_irq_event(desc);
…………
359 /**
360 * handle_level_irq - Level type irq handler
361 * @irq: the interrupt number
362 * @desc: the interrupt description structure for this irq
363 *
364 * Level type interrupts are active as long as the hardware line has
365 * the active level. This may require to mask the interrupt and unmask
366 * it after the associated handler has acknowledged the device, so the
367 * interrupt line is back to inactive.
368 */
369 void
370 handle_level_irq(unsigned int irq, struct irq_desc *desc)
371 {
372 raw_spin_lock(&desc->lock);
373 mask_ack_irq(desc);
374
375 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
376 if (!irq_check_poll(desc))
377 goto out_unlock;
378
379 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
380 kstat_incr_irqs_this_cpu(irq, desc);
381
382 /*
383 * If its disabled or no action available
384 * keep it masked and get out of here
385 */
386 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
387 desc->istate |= IRQS_PENDING;
388 goto out_unlock;
389 }
390
391 handle_irq_event(desc);
392
393 cond_unmask_irq(desc);
394
395 out_unlock:
396 raw_spin_unlock(&desc->lock);
397 }
398 EXPORT_SYMBOL_GPL(handle_level_irq);
kernel/irq/handle.c
182 irqreturn_t handle_irq_event(struct irq_desc *desc)
183 {
184 struct irqaction *action = desc->action;
185 irqreturn_t ret;
186
187 desc->istate &= ~IRQS_PENDING;
188 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
189 raw_spin_unlock(&desc->lock);
190
191 ret = handle_irq_event_percpu(desc, action);
192
193 raw_spin_lock(&desc->lock);
194 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
195 return ret;
196 }
kernel/irq/handle.c
132 irqreturn_t
133 handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
134 {
135 irqreturn_t retval = IRQ_NONE;
136 unsigned int flags = 0, irq = desc->irq_data.irq;
137
138 do {
139 irqreturn_t res;
140
141 trace_irq_handler_entry(irq, action);
142 res = action->handler(irq, action->dev_id);
143 trace_irq_handler_exit(irq, action, res);
144
145 if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
146 irq, action->handler))
147 local_irq_disable();
148
149 switch (res) {
150 case IRQ_WAKE_THREAD:
151 /*
152 * Catch drivers which return WAKE_THREAD but
153 * did not set up a thread function
154 */
155 if (unlikely(!action->thread_fn)) {
156 warn_no_thread(irq, action);
157 break;
158 }
159
160 irq_wake_thread(desc, action);
161
162 /* Fall through to add to randomness */
163 case IRQ_HANDLED:
164 flags |= action->flags;
165 break;
166
167 default:
168 break;
169 }
170
171 retval |= res;
172 action = action->next;
173 } while (action);
174
最终会调用到142行的res = action->handler(irq, action->dev_id);
include/linux/irqdesc.h
38 struct irq_desc {
39 struct irq_data irq_data;
40 unsigned int __percpu *kstat_irqs;
41 irq_flow_handler_t handle_irq;
42 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
43 irq_preflow_handler_t preflow_handler;
44 #endif
45 struct irqaction *action; /* IRQ action list */
46 unsigned int status_use_accessors;
47 unsigned int core_internal_state__do_not_mess_with_it;
48 unsigned int depth; /* nested irq disables */
49 unsigned int wake_depth; /* nested wake enables */
50 unsigned int irq_count; /* For detecting broken IRQs */
51 unsigned long last_unhandled; /* Aging timer for unhandled count */
52 unsigned int irqs_unhandled;
53 raw_spinlock_t lock;
54 struct cpumask *percpu_enabled;
55 #ifdef CONFIG_SMP
56 const struct cpumask *affinity_hint;
57 struct irq_affinity_notify *affinity_notify;
58 #ifdef CONFIG_GENERIC_PENDING_IRQ
59 cpumask_var_t pending_mask;
60 #endif
61 #endif
62 unsigned long threads_oneshot;
63 atomic_t threads_active;
64 wait_queue_head_t wait_for_threads;
65 #ifdef CONFIG_PROC_FS
66 struct proc_dir_entry *dir;
67 #endif
68 struct module *owner;
69 const char *name;
70 } ____cacheline_internodealigned_in_smp;<
9f31
/p>
arch/arm/plat-samsung/s5p-irq-eint.c
202 static int __init s5p_init_irq_eint(void)
203 {
204 int irq;
205
206 for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++)
207 irq_set_chip(irq, &s5p_irq_vic_eint);
208
209 for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) {
210 irq_set_chip_and_handler(irq, &s5p_irq_eint, handle_level_irq);
211 set_irq_flags(irq, IRQF_VALID);
212 }
213
214 irq_set_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31);
215 return 0;
216 }
217
218 arch_initcall(s5p_init_irq_eint);
s5pv210通过irq_set_chip_and_handler来设置外部中断为电平触发
irq_set_chained_handler用来设置s5p_irq_demux_eint16_31为中断处理函数
汇编到C的中断入口asm_do_IRQ
arch/arm/kernel/irq.c
86 /*
87 * asm_do_IRQ is the interface to be used from assembly code.
88 */
89 asmlinkage void __exception_irq_entry
90 asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
91 {
92 handle_IRQ(irq, regs);
93 }
在同一文件中
58 /*
59 * handle_IRQ handles all hardware IRQ's. Decoded IRQs should
60 * not come via this function. Instead, they should provide their
61 * own 'handler'. Used by platform code implementing C-based 1st
62 * level decoding.
63 */
64 void handle_IRQ(unsigned int irq, struct pt_regs *regs)
65 {
66 struct pt_regs *old_regs = set_irq_regs(regs);
67
68 irq_enter();
69
70 /*
71 * Some hardware gives randomly wrong interrupts. Rather
72 * than crashing, do something sensible.
73 */
74 if (unlikely(irq >= nr_irqs)) {
75 if (printk_ratelimit())
76 printk(KERN_WARNING "Bad IRQ%u\n", irq);
77 ack_bad_irq(irq);
78 } else {
79 generic_handle_irq(irq);
80 }
81
82 irq_exit();
83 set_irq_regs(old_regs);
84 }
可以看到调用了handle_IRQ后又调用了generic_handle_irq(irq);
kernel/irq/irqdesc.c
303 /**
304 * generic_handle_irq - Invoke the handler for a particular irq
305 * @irq: The irq number to handle
306 *
307 */
308 int generic_handle_irq(unsigned int irq)
309 {
310 struct irq_desc *desc = irq_to_desc(irq);
311
312 if (!desc)
313 return -EINVAL;
314 generic_handle_irq_desc(irq, desc);
315 return 0;
316 }
317 EXPORT_SYMBOL_GPL(generic_handle_irq);
include/linux/irqdesc.h
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt. If the descriptor is attached to an
* irqchip-style controller then we call the ->handle_irq() handler,
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
*/
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
{
desc->handle_irq(irq, desc);
}
是通过irq_desc 结构体的handle_irq函数调用的,这个函数应该在哪里注册才对
中断初始化
115 void __init init_IRQ(void)
116 {
117 machine_desc->init_irq();
118 }
异常向量表
arch/arm/kernel/entry-armv.S
1163 .globl __vectors_start
1164 __vectors_start:
1165 ARM( swi SYS_ERROR0 )
1166 THUMB( svc #0 )
1167 THUMB( nop )
1168 W(b) vector_und + stubs_offset
1169 W(ldr) pc, .LCvswi + stubs_offset
1170 W(b) vector_pabt + stubs_offset
1171 W(b) vector_dabt + stubs_offset
1172 W(b) vector_addrexcptn + stubs_offset
1173 W(b) vector_irq + stubs_offset
1174 W(b) vector_fiq + stubs_offset
中断向量的拷贝
arch/arm/kernel/traps.c
820 void __init early_trap_init(void *vectors_base)
821 {
822 unsigned long vectors = (unsigned long)vectors_base;
823 extern char __stubs_start[], __stubs_end[];
824 extern char __vectors_start[], __vectors_end[];
825 extern char __kuser_helper_start[], __kuser_helper_end[];
826 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
827
828 vectors_page = vectors_base;
829
830 /*
831 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
832 * into the vector page, mapped at 0xffff0000, and ensure these
833 * are visible to the instruction stream.
834 */
835 memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
836 memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
837 memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
838
839 /*
840 * Do processor specific fixups for the kuser helpers
841 */
842 kuser_get_tls_init(vectors);
843
844 /*
845 * Copy signal return handlers into the vector page, and
846 * set sigreturn to be a pointer to these.
847 */
848 memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
849 sigreturn_codes, sizeof(sigreturn_codes));
850
851 flush_icache_range(vectors, vectors + PAGE_SIZE);
852 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
853 }
arch/arm/mm/mmu.c
1085 /*
1086 * Set up the device mappings. Since we clear out the page tables for all
1087 * mappings above VMALLOC_START, we will remove any debug device mappings.
1088 * This means you have to be careful how you debug this function, or any
1089 * called function. This means you can't use any function or debugging
1090 * method which may touch any device, otherwise the kernel _will_ crash.
1091 */
1092 static void __init devicemaps_init(struct machine_desc *mdesc)
1093 {
1094 struct map_desc map;
1095 unsigned long addr;
1096 void *vectors;
1097
1098 /*
1099 * Allocate the vector page early.
1100 */
1101 vectors = early_alloc(PAGE_SIZE);
1102
1103 early_trap_init(vectors);
…………
由这里调用early_trap_init
在同一文件中paging_init调用 devicemaps_init
1207 /*
1208 * paging_init() sets up the page tables, initialises the zone memory
1209 * maps, and sets up the zero page, bad page and bad page tables.
1210 */
1211 void __init paging_init(struct machine_desc *mdesc)
1212 {
1213 void *zero_page;
1214
1215 memblock_set_current_limit(arm_lowmem_limit);
1216
1217 build_mem_type_table();
1218 prepare_page_table();
1219 map_lowmem();
1220 dma_contiguous_remap();
1221 devicemaps_init(mdesc);
1222 kmap_init();
1223
1224 top_pmd = pmd_off_k(0xffff0000);
1225
1226 /* allocate the zero page. */
1227 zero_page = early_alloc(PAGE_SIZE);
1228
1229 bootmem_init();
1230
1231 empty_zero_page = virt_to_page(zero_page);
1232 __flush_dcache_page(NULL, empty_zero_page);
1233 }
arch/arm/kernel/setup.c
722 void __init setup_arch(char **cmdline_p)
723 {
724 struct machine_desc *mdesc;
725
726 setup_processor();
727 mdesc = setup_machine_fdt(__atags_pointer);
728 if (!mdesc)
729 mdesc = setup_machine_tags(__atags_pointer, machine_arch_type);
730 machine_desc = mdesc;
731 machine_name = mdesc->name;
732
733 setup_dma_zone(mdesc);
734
735 if (mdesc->restart_mode)
736 reboot_setup(&mdesc->restart_mode);
737
738 init_mm.start_code = (unsigned long) _text;
739 init_mm.end_code = (unsigned long) _etext;
740 init_mm.end_data = (unsigned long) _edata;
741 init_mm.brk = (unsigned long) _end;
742
743 /* populate cmd_line too for later use, preserving boot_command_line */
744 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
745 *cmdline_p = cmd_line;
746
747 parse_early_param();
748
749 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
750 sanity_check_meminfo();
751 arm_memblock_init(&meminfo, mdesc);
752
753 paging_init(mdesc);
在setup_arch调用了 paging_init
setup_arch由start_kernel调用
init/main.c
468 asmlinkage void __init start_kernel(void)
469 {
……
499 setup_arch(&command_line);
…………
}
include/linux/interrupt.h
中断注册函数
129 static inline int __must_check
130 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
131 const char *name, void *dev)
132 {
133 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
134 }
kernel/irq/manage.c
1376 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1377 irq_handler_t thread_fn, unsigned long irqflags,
1378 const char *devname, void *dev_id)
1379 {
1380 struct irqaction *action;
1381 struct irq_desc *desc;
1382 int retval;
1383
1384 /*
1385 * Sanity-check: shared interrupts must pass in a real dev-ID,
1386 * otherwise we'll have trouble later trying to figure out
1387 * which interrupt is which (messes up the interrupt freeing
1388 * logic etc).
1389 */
1390 if ((irqflags & IRQF_SHARED) && !dev_id)
1391 return -EINVAL;
1392
1393 desc = irq_to_desc(irq);
1394 if (!desc)
1395 return -EINVAL;
1396
1397 if (!irq_settings_can_request(desc) ||
1398 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1399 return -EINVAL;
1400
1401 if (!handler) {
1402 if (!thread_fn)
1403 return -EINVAL;
1404 handler = irq_default_primary_handler;
1405 }
1406
1407 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1408 if (!action)
1409 return -ENOMEM;
1410
1411 action->handler = handler;
1412 action->thread_fn = thread_fn;
1413 action->flags = irqflags;
1414 action->name = devname;
1415 action->dev_id = dev_id;
1416
1417 chip_bus_lock(desc);
1418 retval = __setup_irq(irq, desc, action);
1419 chip_bus_sync_unlock(desc);
在这里,将申请的中断与struct irqaction *action;关联
__setup_irq将action与desc中的irq_handler进行关联
111 struct irq_desc *irq_to_desc(unsigned int irq)
112 {
113 return radix_tree_lookup(&irq_desc_tree, irq);
114 }
115 EXPORT_SYMBOL(irq_to_desc);
include/linux/irqnr.h
14 #define irq_to_desc(irq) (&irq_desc[irq])
kernel/irq/irqdesc.c
243 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
244 [0 ... NR_IRQS-1] = {
245 .handle_irq = handle_bad_irq,
246 .depth = 1,
247 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
248 }
249 };
include/linux/interrupt.h
106 struct irqaction {
107 irq_handler_t handler;
108 void *dev_id;
109 void __percpu *percpu_dev_id;
110 struct irqaction *next;
111 irq_handler_t thread_fn;
112 struct task_struct *thread;
113 unsigned int irq;
114 unsigned int flags;
115 unsigned long thread_flags;
116 unsigned long thread_mask;
117 const char *name;
118 struct proc_dir_entry *dir;
119 } ____cacheline_internodealigned_in_smp;
这个结构体就是用户中断处理函数关联入的地方
例子:定时器中就是利用setup_irq(irq_number, &s5p_clock_event_irq);函数将用户中断链入
252 static struct irqaction s5p_clock_event_irq = {
253 .name = "s5p_time_irq",
254 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
255 .handler = s5p_clock_event_isr,
256 .dev_id = &time_event_device,
257 };
kernel/irq/chip.c
462 /**
463 * handle_edge_irq - edge type IRQ handler
464 * @irq: the interrupt number
465 * @desc: the interrupt description structure for this irq
466 *
467 * Interrupt occures on the falling and/or rising edge of a hardware
468 * signal. The occurrence is latched into the irq controller hardware
469 * and must be acked in order to be reenabled. After the ack another
470 * interrupt can happen on the same source even before the first one
471 * is handled by the associated event handler. If this happens it
472 * might be necessary to disable (mask) the interrupt depending on the
473 * controller hardware. This requires to reenable the interrupt inside
474 * of the loop which handles the interrupts which have arrived while
475 * the handler was running. If all pending interrupts are handled, the
476 * loop is left.
477 */
478 void
479 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
480 {
…………
519 handle_irq_event(desc);
…………
359 /**
360 * handle_level_irq - Level type irq handler
361 * @irq: the interrupt number
362 * @desc: the interrupt description structure for this irq
363 *
364 * Level type interrupts are active as long as the hardware line has
365 * the active level. This may require to mask the interrupt and unmask
366 * it after the associated handler has acknowledged the device, so the
367 * interrupt line is back to inactive.
368 */
369 void
370 handle_level_irq(unsigned int irq, struct irq_desc *desc)
371 {
372 raw_spin_lock(&desc->lock);
373 mask_ack_irq(desc);
374
375 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
376 if (!irq_check_poll(desc))
377 goto out_unlock;
378
379 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
380 kstat_incr_irqs_this_cpu(irq, desc);
381
382 /*
383 * If its disabled or no action available
384 * keep it masked and get out of here
385 */
386 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
387 desc->istate |= IRQS_PENDING;
388 goto out_unlock;
389 }
390
391 handle_irq_event(desc);
392
393 cond_unmask_irq(desc);
394
395 out_unlock:
396 raw_spin_unlock(&desc->lock);
397 }
398 EXPORT_SYMBOL_GPL(handle_level_irq);
kernel/irq/handle.c
182 irqreturn_t handle_irq_event(struct irq_desc *desc)
183 {
184 struct irqaction *action = desc->action;
185 irqreturn_t ret;
186
187 desc->istate &= ~IRQS_PENDING;
188 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
189 raw_spin_unlock(&desc->lock);
190
191 ret = handle_irq_event_percpu(desc, action);
192
193 raw_spin_lock(&desc->lock);
194 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
195 return ret;
196 }
kernel/irq/handle.c
132 irqreturn_t
133 handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
134 {
135 irqreturn_t retval = IRQ_NONE;
136 unsigned int flags = 0, irq = desc->irq_data.irq;
137
138 do {
139 irqreturn_t res;
140
141 trace_irq_handler_entry(irq, action);
142 res = action->handler(irq, action->dev_id);
143 trace_irq_handler_exit(irq, action, res);
144
145 if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
146 irq, action->handler))
147 local_irq_disable();
148
149 switch (res) {
150 case IRQ_WAKE_THREAD:
151 /*
152 * Catch drivers which return WAKE_THREAD but
153 * did not set up a thread function
154 */
155 if (unlikely(!action->thread_fn)) {
156 warn_no_thread(irq, action);
157 break;
158 }
159
160 irq_wake_thread(desc, action);
161
162 /* Fall through to add to randomness */
163 case IRQ_HANDLED:
164 flags |= action->flags;
165 break;
166
167 default:
168 break;
169 }
170
171 retval |= res;
172 action = action->next;
173 } while (action);
174
最终会调用到142行的res = action->handler(irq, action->dev_id);
include/linux/irqdesc.h
38 struct irq_desc {
39 struct irq_data irq_data;
40 unsigned int __percpu *kstat_irqs;
41 irq_flow_handler_t handle_irq;
42 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
43 irq_preflow_handler_t preflow_handler;
44 #endif
45 struct irqaction *action; /* IRQ action list */
46 unsigned int status_use_accessors;
47 unsigned int core_internal_state__do_not_mess_with_it;
48 unsigned int depth; /* nested irq disables */
49 unsigned int wake_depth; /* nested wake enables */
50 unsigned int irq_count; /* For detecting broken IRQs */
51 unsigned long last_unhandled; /* Aging timer for unhandled count */
52 unsigned int irqs_unhandled;
53 raw_spinlock_t lock;
54 struct cpumask *percpu_enabled;
55 #ifdef CONFIG_SMP
56 const struct cpumask *affinity_hint;
57 struct irq_affinity_notify *affinity_notify;
58 #ifdef CONFIG_GENERIC_PENDING_IRQ
59 cpumask_var_t pending_mask;
60 #endif
61 #endif
62 unsigned long threads_oneshot;
63 atomic_t threads_active;
64 wait_queue_head_t wait_for_threads;
65 #ifdef CONFIG_PROC_FS
66 struct proc_dir_entry *dir;
67 #endif
68 struct module *owner;
69 const char *name;
70 } ____cacheline_internodealigned_in_smp;<
9f31
/p>
arch/arm/plat-samsung/s5p-irq-eint.c
202 static int __init s5p_init_irq_eint(void)
203 {
204 int irq;
205
206 for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++)
207 irq_set_chip(irq, &s5p_irq_vic_eint);
208
209 for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) {
210 irq_set_chip_and_handler(irq, &s5p_irq_eint, handle_level_irq);
211 set_irq_flags(irq, IRQF_VALID);
212 }
213
214 irq_set_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31);
215 return 0;
216 }
217
218 arch_initcall(s5p_init_irq_eint);
s5pv210通过irq_set_chip_and_handler来设置外部中断为电平触发
irq_set_chained_handler用来设置s5p_irq_demux_eint16_31为中断处理函数
相关文章推荐
- arm-Linux中断处理体系结构与处理流程分析
- ARM Linux对中断的处理--中断注册方法
- ARM linux的中断处理过程
- Linux内核中断处理过程分析-基于arm平台
- Linux内核中断处理过程分析-基于arm平台
- linux for arm的中断处理流程[转载自:http://hi.baidu.com/wudx05/blog/item/5314935c834f4e41fbf2c0dc.html]
- ARM Linux对中断的处理--中断处理
- ARM Linux对中断的处理--相关数据结构
- ucOS-II基于ARM920T的中断处理过程
- <<Linux内核完全剖析 --基于0.12内核>>学习笔记 第4章 80x86保护模式及其编程 4.6 中断和异常处理
- ARM Linux对中断的处理--中断处理
- [ARM&Linux]Linux下中断处理的上下文保存与切换的一些细节
- Linux中断 - ARM中断处理过程
- [ARM笔记]嵌入式Linux中断处理程序架构
- 基于嵌入式linux和ARM设计的无线家庭网关
- 嵌入式arm学习总结(七)--中断-基于S3C2440
- 基于ARM 的Linux 的启动分析报告——ARM+Linux的启动分析(1)
- linux中断处理原理分析
- linux0.11内核中断处理
- Linux下中断处理程序源码分析