您的位置:首页 > 其它

MTK Kernel启动流程源码解析 4 start_kernel 上

2017-04-18 17:52 447 查看
http://blog.csdn.net/xichangbao/article/details/52888351

一 start_kernel

start_kernel函数是kernel启动过程执行的第一个c语言函数,其通过调用一系列初始化函数完成的内核的初始化工作,上篇分析local_irq_enable之前的代码。

1.0 start_kernel

定义在init/main.c中

asmlinkage void __init start_kernel(void)
{
    char * command_line;
    extern const struct kernel_param __start___param[], __stop___param[];

    /*
     * Need to run as early as possible, to initialize the
     * lockdep hash:
     */
    lockdep_init(); // 初始化内核死锁检测机制的哈希表
    smp_setup_processor_id(); // 返回cpu号,单核cpu返回0
    debug_objects_early_init(); // 对调试对象进行早期的初始化

    cgroup_init_early(); // 对Control Groups进行早期的初始化

    local_irq_disable(); // 关闭当前cpu的中断
    early_boot_irqs_disabled = true;

/*
 * Interrupts are still disabled. Do necessary setups, then
 * enable them
 */
    boot_cpu_init(); // 设置当前cpu位激活状态
    page_address_init();  // 初始化高端内存的,arm没有用到
    pr_notice("%s", linux_banner);
   
setup_arch(&command_line); // 内核架构相关初始化函数
    /*
     * Set up the the initial canary ASAP:
     */
    boot_init_stack_canary(); // 初始化栈canary值,canary值用于防止栈溢出攻击的堆栈的保护字
    mm_init_owner(&init_mm, &init_task); // mm.owner =&init_task
    mm_init_cpumask(&init_mm);
    setup_command_line(command_line); // 对cmdline进行备份
    setup_nr_cpu_ids(); // nr_cpu_ids
    setup_per_cpu_areas(); // 每个cpu的per-cpu变量副本分配空间
    smp_prepare_boot_cpu();    /* arch-specific boot-cpu hooks */

    build_all_zonelists(NULL, NULL); // 建立系统内存页区(zone)链表
    page_alloc_init(); // 内存页初始化

    pr_notice("Kernel command line: %s\n", boot_command_line);
    parse_early_param(); // 解析需要'早期'处理的启动参数用?setup_arch已经调用了一次
    parse_args("Booting kernel", static_command_line, __start___param,
           __stop___param - __start___param,
           -1, -1, &unknown_bootoption); // 解析cmdline中的启动参数

    jump_label_init(); // 处理静态定义在跳转标号

    /*
     * These use large bootmem allocations and must precede
     * kmem_cache_init()
     */
    setup_log_buf(0); // 使用memblock_alloc分配一个启动时log缓冲区
    pidhash_init(); // 初始化pid散列表
    vfs_caches_init_early(); // 初始化dentry和inode的hashtable
    sort_main_extable(); // 对内核异常向量表进行排序
    trap_init(); // 对内核陷阱异常进行初始化,arm没有用到
   
mm_init(); // 初始化内核内存分配器,过度到伙伴系统,启动slab机制,初始化非连续内存区

    /*
     * Set up the scheduler prior starting any interrupts (such as the
     * timer interrupt). Full topology setup happens at smp_init()
     * time - but meanwhile we still have a functioning scheduler.
     */
   
sched_init(); // 初始化进程调度器
    /*
     * Disable preemption - early bootup scheduling is extremely
     * fragile until we cpu_idle() for the first time.
     */
    preempt_disable(); // 进制内核抢占
    if (WARN(!irqs_disabled(), "Interrupts were enabled *very* early, fixing it\n"))
        local_irq_disable(); // 关闭本地中断
    idr_init_cache(); // 创建idr(整数id管理机制)高速缓存
    perf_event_init(); // 初始化性能诊断工具
    rcu_init(); // 初始化rcu机制(读-写-拷贝)
    tick_nohz_init(); // 初始化动态时钟框架
    radix_tree_init(); // 初始化内核基数树
    /* init some links before init_ISA_irqs() */
    early_irq_init(); // arm64没有用到
   
init_IRQ(); // 初始化中断
    tick_init(); // 初始化时钟滴答控制器
    init_timers(); // 初始化内核定时器
    hrtimers_init(); // 初始化高精度时钟
    softirq_init(); // 初始化软中断
    timekeeping_init();  // 初始化了大量的时钟相关全局变量
    time_init(); // 时钟初始化
    profile_init(); //  对内核的一个性能测试工具profile进行初始化
    call_function_init(); // smp下跨cpu的函数传递初始化
    WARN(!irqs_disabled(), "Interrupts were enabled early\n");
    early_boot_irqs_disabled = false;
    local_irq_enable(); // 使能当前cpu中断

    kmem_cache_init_late();

    /*
     * HACK ALERT! This is early. We're enabling the console before
     * we've done PCI setups etc, and console_init() must be aware of
     * this. But we do want output early, in case something goes wrong.
     */
    console_init();
    if (panic_later)
        panic(panic_later, panic_param);

    lockdep_info();

    /*
     * Need to run this when irqs are enabled, because it wants
     * to self-test [hard/soft]-irqs on/off lock inversion bugs
     * too:
     */
    locking_selftest();

#ifdef CONFIG_BLK_DEV_INITRD
    if (initrd_start && !initrd_below_start_ok &&
        page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
        pr_crit("initrd overwritten (0x%08lx < 0x%08lx) - disabling it.\n",
            page_to_pfn(virt_to_page((void *)initrd_start)),
            min_low_pfn);
        initrd_start = 0;
    }
#endif
    page_cgroup_init();
    debug_objects_mem_init();
    kmemleak_init();
    setup_per_cpu_pageset();
    numa_policy_init();
    if (late_time_init)
        late_time_init();
    sched_clock_init();
    calibrate_delay();
    pidmap_init();
    anon_vma_init();
#ifdef CONFIG_X86
    if (efi_enabled(EFI_RUNTIME_SERVICES))
        efi_enter_virtual_mode();
#endif
    thread_info_cache_init();
    cred_init();
    fork_init(totalram_pages);
    proc_caches_init();
    buffer_init();
    key_init();
    security_init();
    dbg_late_init();
    vfs_caches_init(totalram_pages);
    signals_init();
    /* rootfs populating might need page-writeback */
    page_writeback_init();
#ifdef CONFIG_PROC_FS
    proc_root_init();
#endif
    cgroup_init();
    cpuset_init();
    taskstats_init_early();
    delayacct_init();

    check_bugs();

    acpi_early_init(); /* before LAPIC and SMP init */
    sfi_init_late();

    if (efi_enabled(EFI_RUNTIME_SERVICES)) {
        efi_late_init();
        efi_free_boot_services();
    }

    ftrace_init();

    /* Do the rest non-__init'ed, we're now alive */
   
rest_init();
}

1.1 lockdep_init

定义在kernel/lockdep.c中

void lockdep_init(void)
{
    int i;

    /*
     * Some architectures have their own start_kernel()
     * code which calls lockdep_init(), while we also
     * call lockdep_init() from the start_kernel() itself,
     * and we want to initialize the hashes only once:
     */
    if (lockdep_initialized)
        return;

    for (i = 0; i < CLASSHASH_SIZE; i++)
        INIT_LIST_HEAD(classhash_table + i);

    for (i = 0; i < CHAINHASH_SIZE; i++)
        INIT_LIST_HEAD(chainhash_table + i);

    lockdep_initialized = 1;
}

1.2 debug_objects_early_init

定义在lib/debugobjects.c中

void __init debug_objects_early_init(void)
{
    int i;

    for (i = 0; i < ODEBUG_HASH_SIZE; i++)
        raw_spin_lock_init(&obj_hash[i].lock);

    for (i = 0; i < ODEBUG_POOL_SIZE; i++)
        hlist_add_head(&obj_static_pool[i].node, &obj_pool);
}

1.3 cgroup_init_early

定义在kernel/cgroup.c

int __init cgroup_init_early(void)
{
    int i;
    atomic_set(&init_css_set.refcount, 1);
    INIT_LIST_HEAD(&init_css_set.cg_links);
    INIT_LIST_HEAD(&init_css_set.tasks);
    INIT_HLIST_NODE(&init_css_set.hlist);
    css_set_count = 1;
    init_cgroup_root(&rootnode);
    root_count = 1;
    init_task.cgroups = &init_css_set;

    init_css_set_link.cg = &init_css_set;
    init_css_set_link.cgrp = dummytop;
    list_add(&init_css_set_link.cgrp_link_list,
         &rootnode.top_cgroup.css_sets);
    list_add(&init_css_set_link.cg_link_list,
         &init_css_set.cg_links);

    for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
        struct cgroup_subsys *ss = subsys[i];

        /* at bootup time, we don't worry about modular subsystems */
        if (!ss || ss->module)
            continue;

        BUG_ON(!ss->name);
        BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
        BUG_ON(!ss->css_alloc);
        BUG_ON(!ss->css_free);
        if (ss->subsys_id != i) {
            printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
                   ss->name, ss->subsys_id);
            BUG();
        }

        if (ss->early_init)
            cgroup_init_subsys(ss);
    }
    return 0;
}

1.4 local_irq_disable

trace_hardirqs_off_caller定义在kernel/lockdep.c中

#define local_irq_disable() \
    do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)

void trace_hardirqs_off(void)
{
    trace_hardirqs_off_caller(CALLER_ADDR0);
}

void trace_hardirqs_off_caller(unsigned long ip)
{
    struct task_struct *curr = current;

    time_hardirqs_off(CALLER_ADDR0, ip);

    if (unlikely(!debug_locks || current->lockdep_recursion))
        return;

    /*
     * So we're supposed to get called after you mask local IRQs, but for
     * some reason the hardware doesn't quite think you did a proper job.
     */
    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
        return;

    if (curr->hardirqs_enabled) {
        /*
         * We have done an ON -> OFF transition:
         */
        curr->hardirqs_enabled = 0;
        curr->hardirq_disable_ip = ip;
        curr->hardirq_disable_event = ++curr->irq_events;
        debug_atomic_inc(hardirqs_off_events);
    } else
        debug_atomic_inc(redundant_hardirqs_off);
}

1.5 boot_cpu_init

定义在init/main.c中

static void __init boot_cpu_init(void)
{
    int cpu = smp_processor_id();
    /* Mark the boot cpu "present", "online" etc for SMP and UP case */
    set_cpu_online(cpu, true);
    set_cpu_active(cpu, true);
    set_cpu_present(cpu, true);
    set_cpu_possible(cpu, true);
}

1.6 setup_per_cpu_areas

定义在mm/percpu.c中

void __init setup_per_cpu_areas(void)
{
    unsigned long delta;
    unsigned int cpu;
    int rc;

    /*
     * Always reserve area for module percpu variables.  That's
     * what the legacy allocator did.
     */
    rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
                    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
                    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
    if (rc < 0)
        panic("Failed to initialize percpu areas.");

    delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
    for_each_possible_cpu(cpu)
        __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
}

1.7 build_all_zonelists

定义在mm/page_alloc.c中

void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
{
    set_zonelist_order();

    if (system_state == SYSTEM_BOOTING) {
        __build_all_zonelists(NULL);
        mminit_verify_zonelist();
        cpuset_init_current_mems_allowed();
    } else {
        /* we have to stop all cpus to guarantee there is no user
           of zonelist */
#ifdef CONFIG_MEMORY_HOTPLUG
        if (zone)
            setup_zone_pageset(zone);
#endif
        stop_machine(__build_all_zonelists, pgdat, NULL);
        /* cpuset refresh routine should be here */
    }
    vm_total_pages = nr_free_pagecache_pages();
    /*
     * Disable grouping by mobility if the number of pages in the
     * system is too low to allow the mechanism to work. It would be
     * more accurate, but expensive to check per-zone. This check is
     * made on memory-hotadd so a system can start with mobility
     * disabled and enable it later
     */
    if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
        page_group_by_mobility_disabled = 1;
    else
        page_group_by_mobility_disabled = 0;

    printk("Built %i zonelists in %s order, mobility grouping %s.  "
        "Total pages: %ld\n",
            nr_online_nodes,
            zonelist_order_name[current_zonelist_order],
            page_group_by_mobility_disabled ? "off" : "on",
            vm_total_pages);
#ifdef CONFIG_NUMA
    printk("Policy zone: %s\n", zone_names[policy_zone]);
#endif
}

1.8 page_alloc_init

定义在mm/page_alloc.c中

void __init page_alloc_init(void)
{
    hotcpu_notifier(page_alloc_cpu_notify, 0);
}

static int page_alloc_cpu_notify(struct notifier_block *self,
                 unsigned long action, void *hcpu)
{
    int cpu = (unsigned long)hcpu;

    if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
        lru_add_drain_cpu(cpu);
        drain_pages(cpu);

        /*
         * Spill the event counters of the dead processor
         * into the current processors event counters.
         * This artificially elevates the count of the current
         * processor.
         */
        vm_events_fold_cpu(cpu);

        /*
         * Zero the differential counters of the dead processor
         * so that the vm statistics are consistent.
         *
         * This is only okay since the processor is dead and cannot
         * race with what we are doing.
         */
        refresh_cpu_vm_stats(cpu);
    }
    return NOTIFY_OK;
}

1.9 parse_args

 定义在kernel/params.c中

/* Args looks like "foo=bar,bar2 baz=fuz wiz". */
int parse_args(const char *doing,
           char *args,
           const struct kernel_param *params,
           unsigned num,
           s16 min_level,
           s16 max_level,
           int (*unknown)(char *param, char *val, const char *doing))
{
    char *param, *val;

    /* Chew leading spaces */
    args = skip_spaces(args);

    if (*args)
        pr_debug("doing %s, parsing ARGS: '%s'\n", doing, args);

    while (*args) {
        int ret;
        int irq_was_disabled;

        args = next_arg(args, ¶m, &val);
        irq_was_disabled = irqs_disabled();
        ret = parse_one(param, val, doing, params, num,
                min_level, max_level, unknown);
        if (irq_was_disabled && !irqs_disabled())
            pr_warn("%s: option '%s' enabled irq's!\n",
                doing, param);

        switch (ret) {
        case -ENOENT:
            pr_err("%s: Unknown parameter `%s'\n", doing, param);
            return ret;
        case -ENOSPC:
            pr_err("%s: `%s' too large for parameter `%s'\n",
                   doing, val ?: "", param);
            return ret;
        case 0:
            break;
        default:
            pr_err("%s: `%s' invalid for parameter `%s'\n",
                   doing, val ?: "", param);
            return ret;
        }
    }

    /* All parsed OK. */
    return 0;
}

1.10 jump_label_init

定义在kernel/jump_label.c中

void __init jump_label_init(void)
{
    struct jump_entry *iter_start = __start___jump_table;
    struct jump_entry *iter_stop = __stop___jump_table;
    struct static_key *key = NULL;
    struct jump_entry *iter;

    jump_label_lock();
    jump_label_sort_entries(iter_start, iter_stop);

    for (iter = iter_start; iter < iter_stop; iter++) {
        struct static_key *iterk;

        iterk = (struct static_key *)(unsigned long)iter->key;
        arch_jump_label_transform_static(iter, jump_label_type(iterk));
        if (iterk == key)
            continue;

        key = iterk;
        /*
         * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
         */
        *((unsigned long *)&key->entries) += (unsigned long)iter;
#ifdef CONFIG_MODULES
        key->next = NULL;
#endif
    }
    jump_label_unlock();
}

1.11 setup_log_buf

定义在kernel/printk.c中

void __init setup_log_buf(int early)
{
    unsigned long flags;
    char *new_log_buf;
    int free;

    if (!new_log_buf_len)
        return;

    if (early) {
        unsigned long mem;

        mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
        if (!mem)
            return;
        new_log_buf = __va(mem);
    } else {
        new_log_buf = alloc_bootmem_nopanic(new_log_buf_len);
    }

    if (unlikely(!new_log_buf)) {
        pr_err("log_buf_len: %ld bytes not available\n",
            new_log_buf_len);
        return;
    }

    raw_spin_lock_irqsave(&logbuf_lock, flags);
    log_buf_len = new_log_buf_len;
    log_buf = new_log_buf;
    new_log_buf_len = 0;
    free = __LOG_BUF_LEN - log_next_idx;
    memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
    raw_spin_unlock_irqrestore(&logbuf_lock, flags);

    pr_info("log_buf_len: %d\n", log_buf_len);
    pr_info("early log buf free: %d(%d%%)\n",
        free, (free * 100) / __LOG_BUF_LEN);
}

1.12 vfs_caches_init_early

定义在fs/dcache.c中

void __init vfs_caches_init_early(void)
{
    dcache_init_early();
    inode_init_early();
}

static void __init dcache_init_early(void)
{
    unsigned int loop;

    /* If hashes are distributed across NUMA nodes, defer
     * hash allocation until vmalloc space is available.
     */
    if (hashdist)
        return;

    dentry_hashtable =
        alloc_large_system_hash("Dentry cache",
                    sizeof(struct hlist_bl_head),
                    dhash_entries,
                    13,
                    HASH_EARLY,
                    &d_hash_shift,
                    &d_hash_mask,
                    0,
                    0);

    for (loop = 0; loop < (1U << d_hash_shift); loop++)
        INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
}

void __init inode_init_early(void)
{
    unsigned int loop;

    /* If hashes are distributed across NUMA nodes, defer
     * hash allocation until vmalloc space is available.
     */
    if (hashdist)
        return;

    inode_hashtable =
        alloc_large_system_hash("Inode-cache",
                    sizeof(struct hlist_head),
                    ihash_entries,
                    14,
                    HASH_EARLY,
                    &i_hash_shift,
                    &i_hash_mask,
                    0,
                    0);

    for (loop = 0; loop < (1U << i_hash_shift); loop++)
        INIT_HLIST_HEAD(&inode_hashtable[loop]);
}

1.13 perf_event_init

定义在kernel/events/core.c中

void __init perf_event_init(void)
{
    int ret;

    idr_init(&pmu_idr);

    perf_event_init_all_cpus();
    init_srcu_struct(&pmus_srcu);
    perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
    perf_pmu_register(&perf_cpu_clock, NULL, -1);
    perf_pmu_register(&perf_task_clock, NULL, -1);
    perf_tp_register();
    perf_cpu_notifier(perf_cpu_notify);
    register_reboot_notifier(&perf_reboot_notifier);

    ret = init_hw_breakpoint();
    WARN(ret, "hw_breakpoint initialization failed with: %d", ret);

    /* do not patch jump label more than once per second */
    jump_label_rate_limit(&perf_sched_events, HZ);

    /*
     * Build time assertion that we keep the data_head at the intended
     * location.  IOW, validation we got the __reserved[] size right.
     */
    BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
             != 1024);
}

1.14 rcu_init

定义在kernel/rcutree.c中

void __init rcu_init(void)
{
    int cpu;

    rcu_bootup_announce();
    rcu_init_geometry();
    rcu_init_one(&rcu_sched_state, &rcu_sched_data);
    rcu_init_one(&rcu_bh_state, &rcu_bh_data);
    __rcu_init_preempt();
    open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);

    /*
     * We don't need protection against CPU-hotplug here because
     * this is called early in boot, before either interrupts
     * or the scheduler are operational.
     */
    cpu_notifier(rcu_cpu_notify, 0);
    for_each_online_cpu(cpu)
        rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
}

1.15 tick_nohz_init

定义在kernel/time/tick-sched.c中

void __init tick_nohz_init(void)
{
    int cpu;

    if (!have_nohz_full_mask) {
        if (tick_nohz_init_all() < 0)
            return;
    }

    cpu_notifier(tick_nohz_cpu_down_callback, 0);

    /* Make sure full dynticks CPU are also RCU nocbs */
    for_each_cpu(cpu, nohz_full_mask) {
        if (!rcu_is_nocb_cpu(cpu)) {
            pr_warning("NO_HZ: CPU %d is not RCU nocb: "
                   "cleared from nohz_full range", cpu);
            cpumask_clear_cpu(cpu, nohz_full_mask);
        }
    }

    cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask);
    pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);
}

1.16 radix_tree_init

定义在lib/radix-tree.c中

void __init radix_tree_init(void)
{
    radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
            sizeof(struct radix_tree_node), 0,
            SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
            radix_tree_node_ctor);
    radix_tree_init_maxindex();
    hotcpu_notifier(radix_tree_callback, 0);
}

1.17 init_timers

定义在kernel/timer.c中

void __init init_timers(void)
{
    int err;

    /* ensure there are enough low bits for flags in timer->base pointer */
    BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);

    err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
                   (void *)(long)smp_processor_id());
    init_timer_stats();

    BUG_ON(err != NOTIFY_OK);
    register_cpu_notifier(&timers_nb);
    open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
}

1.18 hrtimers_init

定义在kernel/hrtimer.c中

void __init hrtimers_init(void)
{
    hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
              (void *)(long)smp_processor_id());
    register_cpu_notifier(&hrtimers_nb);
#ifdef CONFIG_HIGH_RES_TIMERS
    open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
#endif
}

1.19 softirq_init

定义在kernel/softirq.c中

void __init softirq_init(void)
{
    int cpu;

    for_each_possible_cpu(cpu) {
        int i;

        per_cpu(tasklet_vec, cpu).tail =
            &per_cpu(tasklet_vec, cpu).head;
        per_cpu(tasklet_hi_vec, cpu).tail =
            &per_cpu(tasklet_hi_vec, cpu).head;
        for (i = 0; i < NR_SOFTIRQS; i++)
            INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
    }

    register_hotcpu_notifier(&remote_softirq_cpu_notifier);

    open_softirq(TASKLET_SOFTIRQ, tasklet_action);
    open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}

1.20 timekeeping_init

定义在kernel/time/timekeeping.c中

void __init timekeeping_init(void)
{
    struct timekeeper *tk = &timekeeper;
    struct clocksource *clock;
    unsigned long flags;
    struct timespec now, boot, tmp;

    read_persistent_clock(&now);

    if (!timespec_valid_strict(&now)) {
        pr_warn("WARNING: Persistent clock returned invalid value!\n"
            "         Check your CMOS/BIOS settings.\n");
        now.tv_sec = 0;
        now.tv_nsec = 0;
    } else if (now.tv_sec || now.tv_nsec)
        persistent_clock_exist = true;

    read_boot_clock(&boot);
    if (!timespec_valid_strict(&boot)) {
        pr_warn("WARNING: Boot clock returned invalid value!\n"
            "         Check your CMOS/BIOS settings.\n");
        boot.tv_sec = 0;
        boot.tv_nsec = 0;
    }

    raw_spin_lock_irqsave(&timekeeper_lock, flags);
    write_seqcount_begin(&timekeeper_seq);
    ntp_init();

    clock = clocksource_default_clock();
    if (clock->enable)
        clock->enable(clock);
    tk_setup_internals(tk, clock);

    tk_set_xtime(tk, &now);
    tk->raw_time.tv_sec = 0;
    tk->raw_time.tv_nsec = 0;
    if (boot.tv_sec == 0 && boot.tv_nsec == 0)
        boot = tk_xtime(tk);

    set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
    tk_set_wall_to_mono(tk, tmp);

    tmp.tv_sec = 0;
    tmp.tv_nsec = 0;
    tk_set_sleep_time(tk, tmp);

    memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));

    write_seqcount_end(&timekeeper_seq);
    raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
}

1.21 time_init

定义在arch/arm64/kernel/time.c中

void __init time_init(void)
{
    u32 arch_timer_rate;

    clocksource_of_init();

    arch_timer_rate = arch_timer_get_rate();
    if (!arch_timer_rate)
        panic("Unable to initialise architected timer.\n");

    /* Cache the sched_clock multiplier to save a divide in the hot path. */
    sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;

    /* Calibrate the delay loop directly */
    lpj_fine = arch_timer_rate / HZ;
}

1.22 profile_init

定义在kernel/profile.c中

int __ref profile_init(void)
{
    int buffer_bytes;
    if (!prof_on)
        return 0;

    /* only text is profiled */
    prof_len = (_etext - _stext) >> prof_shift;
    buffer_bytes = prof_len*sizeof(atomic_t);

    if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
        return -ENOMEM;

    cpumask_copy(prof_cpu_mask, cpu_possible_mask);

    prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
    if (prof_buffer)
        return 0;

    prof_buffer = alloc_pages_exact(buffer_bytes,
                    GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
    if (prof_buffer)
        return 0;

    prof_buffer = vzalloc(buffer_bytes);
    if (prof_buffer)
        return 0;

    free_cpumask_var(prof_cpu_mask);
    return -ENOMEM;
}

1.23 call_function_init

定义在kernel/smp.c中

void __init call_function_init(void)
{
    void *cpu = (void *)(long)smp_processor_id();
    int i;

    for_each_possible_cpu(i) {
        struct call_single_queue *q = &per_cpu(call_single_queue, i);

        raw_spin_lock_init(&q->lock);
        INIT_LIST_HEAD(&q->list);
    }

    hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
    register_cpu_notifier(&hotplug_cfd_notifier);
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: