您的位置:首页 > 其它

内核与用户空间共享内存之mmap

2014-12-15 18:31 288 查看
http://blog.chinaunix.net/uid-24148050-id-359666.html

一、用到的API与数据结构

先看用户空间使用的API

#include <sys/mman.h>

void *mmap(void *start,
//映射的范围首地址,通常设NULL,让系统自动选地址,映射成功后返回该地址

size_t length, //映射的范围的大小

int prot, //映射区的保护属性 PROT_EXEC PROT_READ PROT_WRITE PROT_NONE

int flags, //映射区的属性,注意MAP_SHARED、MAP_PRIVATE必选其一

int fd, //文件描述符

off_t offsize);//偏移量,后面讲

返回值:若映射成功则返回映射区的内存起始地址,否则返回MAP_FAILED(-1),错误原因存于errno 中。

另外注意: offsize参数是有限制的。。。(以下摘自man手册)

EINVAL We don’t like start or length or offset.

(E.g., they are too large, or not aligned on a PAGESIZE boundary.)

在字符设备驱动模块里,有一个stuct file_operations结构

其中的fop->mmap() 指向你自己的mmap钩子函数。

用户空间里对一个字符设备文件进行mmap()系统调用后,最终会调用驱动模块里的mmap钩子函数

在mmap钩子函数里,需要调用下面这个API

int remap_pfn_range(struct vm_area_struct *vma,
//这个结构很重要!!后面讲

unsigned long virt_addr, //要映射的范围的首地址

unsigned long pfn, //要映射的范围对应的物理内存的页帧号!!重要

unsigned long size, //要映射的范围的大小

pgprot_t prot); //PROTECT属性,mmap()中来的

内核维护VMA的链表和树形结构(即vm_area_struct),我们没事别建新的VMA,否则会打破这种组织结构。

用户空间每次调用mmap(),内核都会新建一个vm_area_struct数据结构,我们只关注几个重要的成员

struct vm_area_struct{

unsigned long vm_start; //要映射的范围的首地址

unsigned long vm_end; //要映射的范围的尾地址

pgprot_t vm_page_prot; //来自mmap()的形参prot

sturct file *vm_file; //字符设备文件所对应的file数据结构

unsigned long vm_pgoff; //来自mmap()的形参offsize,注意它是以PAGE为单位的,不是字节

unsigned long vm_flags; //来自mmap()的形参flags

}

在你的mmap钩子函数里,象下面这样就可以了

int my_mmap(struct file *filp, struct
vm_area_struct *vma){

//......省略,page很重要,其他的参数一般照下面就可以了

remap_pfn_range(vma, vma->vm_start, page,

(vma->vm_end - vma->vm_start), vma->vm_page_prot);

//......省略

}

二、代码示例

内核模块里分配内存,通过proc把此内存的EA地址、大小告诉用户空间。

用户空间通过proc获取内存相关信息后,调用mmap,往内存里写字符串。

内核模块代码:

#include <linux/module.h>

#include <linux/types.h>

#include <linux/fs.h>

#include <asm/io.h>

#include <asm/system.h>

#include <linux/cdev.h>

#include <linux/proc_fs.h>

#include <linux/mm.h>

#define PROC_SHM_MAP_DIR "shm_dir"

#define PROC_SHM_MAP_INFO "shm_info"

#define PAGE_ORDER 0

#define PAGES_NUMBER 1

static int dbg_major = 215;

struct cdev dgb_dev;

struct proc_dir_entry *proc_shm_map_dir;

unsigned long kernel_memaddr = 0;

unsigned long kernel_memsize = 0;

int get_shm_proc_info(char *page, char **start, off_t
off, int count)

{

return sprintf(page, "lx %lu\n", __pa(kernel_memaddr), kernel_memsize);

}

void create_shm_proc(void)

{

if(NULL == (proc_shm_map_dir = proc_mkdir(PROC_SHM_MAP_DIR, NULL)) ){

printk("proc create error!\n");

return ;

}

create_proc_info_entry(PROC_SHM_MAP_INFO, 0, proc_shm_map_dir, get_shm_proc_info);

}

void destroy_shm_proc(void)

{

remove_proc_entry(PROC_SHM_MAP_INFO, proc_shm_map_dir);

remove_proc_entry(PROC_SHM_MAP_DIR, NULL);

}

int shm_mmap(struct file *filp, struct
vm_area_struct *vma)

{

unsigned long page;

page = virt_to_phys((void *)kernel_memaddr) >> PAGE_SHIFT;

if( remap_pfn_range(vma, vma->vm_start, page, (vma->vm_end - vma->vm_start),

vma->vm_page_prot) )

return -1;

vma->vm_flags |= VM_RESERVED;

printk("remap_pfn_rang page:[%lu] ok.\n", page);

return 0;

}

int create_shm_mem(void)

{

if( NULL == (kernel_memaddr =__get_free_pages(GFP_KERNEL, PAGE_ORDER)
)) {

printk("alloc kernel memory failed!\n");

return -1;

}

SetPageReserved(virt_to_page(kernel_memaddr)); // important !! only
1 page

kernel_memsize = PAGES_NUMBER * PAGE_SIZE;

printk("The kernel mem addr=lx, size=%lu\n",__pa(kernel_memaddr), kernel_memsize);

return 0;

}

void destroy_shm_mem(void)

{

printk("The string written by user is: %s\n", (unsigned
char *)kernel_memaddr);

ClearPageReserved(virt_to_page(kernel_memaddr));// important !!

free_pages(kernel_memaddr, PAGE_ORDER);

return;

}

static const struct file_operations dbg_fops ={

.owner = THIS_MODULE,

.mmap = shm_mmap,

};

static void dbg_exit(void)

{

cdev_del(&dgb_dev);

unregister_chrdev_region(MKDEV(dbg_major, 0), 1);

destroy_shm_proc();

destroy_shm_mem();

}

int dbg_init(void)

{

int result;

dev_t devno ;

struct cdev *p_cdev = &dgb_dev;

if (dbg_major){

devno =MKDEV(dbg_major,0);

result = register_chrdev_region(devno,1, "leon");

}

else{

result=alloc_chrdev_region(&devno, 0, 1,"leon");

dbg_major = MAJOR(devno);

}

if (result < 0)

return result;

printk("the major device No. is %d\n", dbg_major );

cdev_init(p_cdev,&dbg_fops);

p_cdev->owner = THIS_MODULE;

result = cdev_add(p_cdev,devno,1);

if(result){

printk(KERN_NOTICE "Error %d while adding dbg",result);

return result;

}

create_shm_mem();

create_shm_proc();

return 0;

}

MODULE_AUTHOR("leonwang202");

MODULE_LICENSE("Dual BSD/GPL");

module_init(dbg_init);

module_exit(dbg_exit);

用户空间代码:

#include <stdio.h>

#include <stdlib.h>

#include <unistd.h>

#include <string.h>

#include <fcntl.h>

#include <sys/stat.h>

#include <sys/types.h>

#include <sys/mman.h>

#include <errno.h>

int main(int argc, char* argv[])

{

if(argc != 2) {

printf("Usage: %s string\n", argv[0]);

return 0;

}

unsigned long phymem_addr, phymem_size;

char *map_addr;

char s[256];

int fd;

if( (fd = open("/proc/shm_dir/shm_info", O_RDONLY)) < 0) {

printf("cannot open file /proc/shm_dir/shm_info\n");

return 0;

}

read(fd, s, sizeof(s));

if(2 != sscanf(s, "lx
%lu", &phymem_addr, &phymem_size)){

printf("data format from /proc/shm_dir/shm_info error!\n");

close(fd);

return -1;

};

close(fd);

printf("phymem_addr=%lx, phymem_size=%lu\n", phymem_addr, phymem_size);

if( (fd = open("/dev/enetdbg", O_RDWR|O_NONBLOCK)) < 0){

printf("open /dev/enetdbg error!\n");

return -1;

}

map_addr = mmap(NULL,

phymem_size,

PROT_READ|PROT_WRITE,

MAP_SHARED,

fd,

0); // ignored, because
we don't use "vm->pgoff" in driver.

if( MAP_FAILED == map_addr){

printf("mmap() error:[%d]\n", errno);

return -1;

}

strcpy(map_addr, argv[1]);

munmap(map_addr, phymem_size);

close(fd);

return 0;

}

注意:我们在shm_mmap里没使用vm->pgoff,所以mmap()的形参offsize可以无视。

三、使用/dev/mem

想偷懒的,可以使/dev/mem,就不用费劲写内核模块里的mmap钩子函数了

(此用法在帖子http://bbs.chinaunix.net/thread-2017863-1-1.html中有写)

内核代码不用改,图干净的话只要把mmap钩子函数相关的东东全删掉就ok了

用户空间代码得改:

#include <stdio.h>

#include <stdlib.h>

#include <unistd.h>

#include <string.h>

#include <fcntl.h>

#include <sys/stat.h>

#include <sys/types.h>

#include <sys/mman.h>

#include <errno.h>

int main(int argc, char* argv[])

{

if(argc != 2) {

printf("Usage: %s string\n", argv[0]);

return 0;

}

unsigned long phymem_addr, phymem_size;

char *map_addr;

char s[256];

int fd;

if( (fd = open("/proc/shm_dir/shm_info", O_RDONLY)) < 0) {

printf("cannot open file /proc/shm_dir/shm_info\n");

return 0;

}

read(fd, s, sizeof(s));

if(2 != sscanf(s, "lx
%lu", &phymem_addr, &phymem_size)){

printf("data format from /proc/shm_dir/shm_info error!\n");

close(fd);

return -1;

};

close(fd);

printf("phymem_addr=%lx, phymem_size=%lu\n", phymem_addr, phymem_size);

if( (fd = open("/dev/mem", O_RDWR|O_NONBLOCK)) < 0){

printf("open /dev/mem error!\n");

exit(0);

}

map_addr = mmap(NULL,

phymem_size,

PROT_READ|PROT_WRITE,

MAP_SHARED,

fd,

phymem_addr);

strcpy(map_addr, argv[1]);

munmap(map_addr, phymem_size);

close(fd);

return 0;

}

注意:

1.你的linux环境下没有/dev/mem的话,敲命令 mknod /dev/mem c 1 1 原因后面讲

2.这次mmap()形参offsize不能无视,因为创建/dev/mem的内核模块的mmap钩子函数里用到了vm->pgoff

看到底是哪个模块创建了/dev/mem

start_kernel --> kernel_init --> do_basic_setup --> do_initcalls --> chr_dev_init

文件 linux_2_6_24/drivers/char/mem.c 中 chr_dev_init() 函数

static int __init chr_dev_init(void)

{

if (register_chrdev(MEM_MAJOR,"mem",&memory_fops)) //cat /proc/devices 时显示
1 mem

printk("unable to get major %d for memory devs\n", MEM_MAJOR);

for (i = 0; i < ARRAY_SIZE(devlist); i )

device_create(mem_class, NULL,
//据数组新建N多设备,主设备号为1,从设备号来自数组

MKDEV(MEM_MAJOR, devlist[i].minor),devlist[i].name);

}

static const struct {

unsigned int minor;

char *name;

umode_t mode;

const struct file_operations *fops;

} devlist[] = { /* list
of minor devices */ //我擦,major为1的字符设备这么多....

{1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, //mknod
/dev/mem c 1 1 原因知道了吧

{2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},

{3, "null", S_IRUGO | S_IWUGO, &null_fops},

#ifdef CONFIG_DEVPORT

{4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},

#endif

{5, "zero", S_IRUGO | S_IWUGO, &zero_fops},

{7, "full", S_IRUGO | S_IWUGO, &full_fops},

{8, "random", S_IRUGO | S_IWUSR, &random_fops},

{9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},

{11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},

#ifdef CONFIG_CRASH_DUMP

{12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},

#endif

};

---------------------------------- 华丽的分割线 -------------------------------------------

若共享小块连续内存,上面例子用get_free_pages可以分配多达几M的连续空间(MAX_ORDER目前是11)。

若共享大块连续内存,就得靠uboot帮忙了,给linux kernel传参数的时候,用mem=大小

《ldd3》中的分配内存那章有专门一节讲解,通过API

void *alloc_bootmem(unsigned long size);

void free_bootmem(unsigned long addr, unsigned long size);
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: