Linux 内核设计与实现2:https://developer.aliyun.com/article/1597349
5、vmalloc 函数
vmalloc 函数的工作方式类似于 kmalloc(),只不过 vmalloc 分配的内存虚拟地址是连续的,而物理地址则无须连续。
// include/linux/vmalloc.h void *vmalloc(unsigned long size);
// mm/vmalloc.c void *vmalloc(unsigned long size) { return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc);
6、slab
(1)kmem_cache 结构体
slab 根据配置不同,引用了不同的文件,具体如下:
#ifdef CONFIG_SLUB #include <linux/slub_def.h> #elif defined(CONFIG_SLOB) #include <linux/slob_def.h> #else #include <linux/slab_def.h> #endif
以下列举的是文件 slab_def.h 的信息。
// include/linux/slab_def.h struct kmem_cache { /* 1) per-cpu data, touched during every alloc/free */ struct array_cache *array[NR_CPUS]; /* 2) Cache tunables. Protected by cache_chain_mutex */ unsigned int batchcount; unsigned int limit; unsigned int shared; unsigned int buffer_size; u32 reciprocal_buffer_size; /* 3) touched by every alloc & free from the backend */ unsigned int flags; /* constant flags */ unsigned int num; /* # of objs per slab */ /* 4) cache_grow/shrink */ /* order of pgs per slab (2^n) */ unsigned int gfporder; /* force GFP flags, e.g. GFP_DMA */ gfp_t gfpflags; size_t colour; /* cache colouring range */ unsigned int colour_off; /* colour offset */ struct kmem_cache *slabp_cache; unsigned int slab_size; unsigned int dflags; /* dynamic flags */ /* constructor func */ void (*ctor)(void *obj); /* 5) cache creation/removal */ const char *name; struct list_head next; /* 6) statistics */ #ifdef CONFIG_DEBUG_SLAB unsigned long num_active; unsigned long num_allocations; unsigned long high_mark; unsigned long grown; unsigned long reaped; unsigned long errors; unsigned long max_freeable; unsigned long node_allocs; unsigned long node_frees; unsigned long node_overflow; atomic_t allochit; atomic_t allocmiss; atomic_t freehit; atomic_t freemiss; /* * If debugging is enabled, then the allocator can add additional * fields and/or padding to every object. buffer_size contains the total * object size including these internal fields, the following two * variables contain the offset to the user object and its size. */ int obj_offset; int obj_size; #endif /* CONFIG_DEBUG_SLAB */ /* * We put nodelists[] at the end of kmem_cache, because we want to size * this array to nr_node_ids slots instead of MAX_NUMNODES * (see kmem_cache_init()) * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache * is statically defined, so we reserve the max number of nodes. */ struct kmem_list3 *nodelists[MAX_NUMNODES]; /* * Do not add fields after nodelists[] */ };
kmem_list3 结构体
// mm/slab.c struct kmem_list3 { struct list_head slabs_partial; /* partial list first, better asm code */ struct list_head slabs_full; struct list_head slabs_free; unsigned long free_objects; unsigned int free_limit; unsigned int colour_next; /* Per-node cache coloring */ spinlock_t list_lock; struct array_cache *shared; /* shared per node */ struct array_cache **alien; /* on other nodes */ unsigned long next_reap; /* updated without locking */ int free_touched; /* updated without locking */ };
(2)slab 结构体
// mm/slab.c struct slab { struct list_head list; unsigned long colouroff; void *s_mem; /* including colour offset */ unsigned int inuse; /* num of objs active in slab */ kmem_bufctl_t free; unsigned short nodeid; };
(3)函数
// mm/slab.c static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid); struct kmem_cache * kmem_cache_create (const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); void kmem_cache_destroy(struct kmem_cache *cachep); void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags); void kmem_cache_free(struct kmem_cache *cachep, void *objp);
7、栈
内核栈一般为 8K,可动态配置,范围为 4~16K。当 1 页栈激活,中断处理程序获得自己的栈(中断栈),不再使用内核栈。
8、高端内存映射
// include/linux/highmem.h static inline void *kmap(struct page *page) { might_sleep(); return page_address(page); } static inline void kunmap(struct page *page) { } static inline void *kmap_atomic(struct page *page, enum km_type idx); #define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0)
9、分配函数的选择
在这么多分配函数和方法中,有时并不能搞清楚到底该选择那种方式分配——但这确实很重要。如果你需要连续的物理页,就可以使用某个低级页分配器或 kmalloc()。这是内核中内存分配的常用方式,也是大多数情况下你自己应该使用的内存分配方式。回忆一下,传递给这些函数的两个最常用的标志是 GFP_ATOMIC 和 GFP_KERNEL。GFP_ATOMIC 表示进行不睡眠的高优先级分配,这是中断处理程序和其他不能睡眠的代码段的需要。对于可以睡眠的代码,(比如没有持自旋锁的进程上下文代码)则应该使用 GFP_KERNEL获取所需的内存。这个标志表示如果有必要,分配时可以睡眠。
如果你想从高端内存进行分配,就使用 alloc_pages()。alloc_pages() 函数返回一个指向 struct page 结构的指针,而不是一个指向某个逻辑地址的指针。因为高端内存很可能并没有被映射,因此,访问它的唯一方式就是通过相应的 struct page 结构。为了获得真正的指针,应该调用 kmap(),把高端内存映射到内核的逻辑地址空间。
如果你不需要物理上连续的页,而仅仅需要虚拟地址上连续的页,那么就使用 vmalloc()(不过要记住 vmalloc() 相对 kmalloc() 来说,有一定的性能损失)。vmalloc() 函数分配的内存虚地址是连续的,但它本身并不保证物理上的连续。这与用户空间的分配非常类似,它也是把物理内存块映射到连续的逻辑地址空间上。
如果你要创建和撤销很多大的数据结构,那么考虑建立 slab 高速缓存。slab 层会给每个处理器维持一个对象高速缓存(空闲链表),这种高速缓存会极大地提高对象分配和回收的性能。slab 层不是频繁地分配和释放内存,而是为你把事先分配好的对象存放到高速缓存中。当你需要一块新的内存来存放数据结构时,slab 层一般无须另外去分配内存,而只需要从高速缓存中得到一个对象就可以了。
十、虚拟文件系统
1、VFS
VFS中有四个主要的对象类型,它们分别是:
- 超级块对象,它代表一个具体的已安装文件系统。
- 索引节点对象,它代表一个具体文件。
- 目录项对象,它代表一个目录项,是路径的一个组成部分。
- 文件对象,它代表由进程打开的文件。
2、超级块
(1)super_block 结构体
// include/linux/fs.h struct super_block { /* 指向所有超级块的链表 */ struct list_head s_list; /* Keep this first */ /* 设备标识符 */ dev_t s_dev; /* search index; _not_ kdev_t */ /* 修改(脏)标志 */ unsigned char s_dirt; /* 以位为单位的块大小 */ unsigned char s_blocksize_bits; /* 以字节为单位的块大小 */ unsigned long s_blocksize; /* 文件大小上限 */ loff_t s_maxbytes; /* Max file size */ /* 文件系统类型 */ struct file_system_type *s_type; /* 超级块方法 */ const struct super_operations *s_op; /* 磁盘限额方法 */ const struct dquot_operations *dq_op; /* 限额控制方法 */ const struct quotactl_ops *s_qcop; /* 导出方法 */ const struct export_operations *s_export_op; /* 挂载标志 */ unsigned long s_flags; /* 文件系统的幻数 */ unsigned long s_magic; /* 目录挂载点 */ struct dentry *s_root; /* 卸载信号量 */ struct rw_semaphore s_umount; /* 超级块互斥体 */ struct mutex s_lock; /* 超级块引用计数 */ int s_count; /* 尚未同步标志 */ int s_need_sync; /* 活动引用计数 */ atomic_t s_active; #ifdef CONFIG_SECURITY /* 安全模块 */ void *s_security; #endif /* 扩展的属性操作 */ struct xattr_handler **s_xattr; /* inodes 链表 */ struct list_head s_inodes; /* all inodes */ /* 匿名目录项 */ struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ /* 被分配文件链表 */ struct list_head s_files; /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ /* 未被使用目录项链表 */ struct list_head s_dentry_lru; /* unused dentry lru */ /* 链表中目录项的数目 */ int s_nr_dentry_unused; /* # of dentry on lru */ /* 相关的块设备 */ struct block_device *s_bdev; /* */ struct backing_dev_info *s_bdi; /* 存储磁盘信息 */ struct mtd_info *s_mtd; /* 该类型文件系统 */ struct list_head s_instances; /* 限额相关选项 */ struct quota_info s_dquot; /* Diskquota specific options */ /* frozen 标志位 */ int s_frozen; /* 冻结的等待队列 */ wait_queue_head_t s_wait_unfrozen; /* 文本名字 */ char s_id[32]; /* Informational name */ /* 文件系统特殊信息 */ void *s_fs_info; /* Filesystem private info */ /* 安装权限 */ fmode_t s_mode; /* 时间戳粒度 */ /* Granularity of c/m/atime in ns. Cannot be worse than a second */ u32 s_time_gran; /* * The next field is for VFS *only*. No filesystems have any business * even looking at it. You had been warned. */ /* */ struct mutex s_vfs_rename_mutex; /* Kludge */ /* * Filesystem subtype. If non-empty the filesystem type field * in /proc/mounts will be "type.subtype" */ /* 子类型名称 */ char *s_subtype; /* * Saved mount options for lazy filesystems using * generic_show_options() */ /* 已存安装选项 */ char *s_options; };
(2)super_operations 结构体
struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); void (*destroy_inode)(struct inode *); void (*dirty_inode) (struct inode *); int (*write_inode) (struct inode *, struct writeback_control *wbc); void (*drop_inode) (struct inode *); void (*delete_inode) (struct inode *); void (*put_super) (struct super_block *); void (*write_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); int (*freeze_fs) (struct super_block *); int (*unfreeze_fs) (struct super_block *); int (*statfs) (struct dentry *, struct kstatfs *); int (*remount_fs) (struct super_block *, int *, char *); void (*clear_inode) (struct inode *); void (*umount_begin) (struct super_block *); int (*show_options)(struct seq_file *, struct vfsmount *); int (*show_stats)(struct seq_file *, struct vfsmount *); #ifdef CONFIG_QUOTA ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); #endif int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); };
3、索引节点
(1)inode 结构体
// include/linux/fs.h struct inode { /* 散列表 */ struct hlist_node i_hash; /* 索引节点链表 */ struct list_head i_list; /* backing dev IO list */ /* 超级块链表 */ struct list_head i_sb_list; /* 目录项链表 */ struct list_head i_dentry; /* 节点号 */ unsigned long i_ino; /* 引用计数 */ atomic_t i_count; /* 硬链接数 */ unsigned int i_nlink; /* 使用者的 id */ uid_t i_uid; /* 使用组的 id */ gid_t i_gid; /* 实际设备标识符 */ dev_t i_rdev; /* 以位为单位的块大小 */ unsigned int i_blkbits; /* 版本号 */ u64 i_version; /* 以字节为单位的文件大小 */ loff_t i_size; #ifdef __NEED_I_SIZE_ORDERED /* 对 i_size 进行串行计数 */ seqcount_t i_size_seqcount; #endif /* 最后访问时间 */ struct timespec i_atime; /* 最后修改时间 */ struct timespec i_mtime; /* 最后改变时间 */ struct timespec i_ctime; /* 文件的块数 */ blkcnt_t i_blocks; /* 使用的字节数 */ unsigned short i_bytes; /* 访问权限 */ umode_t i_mode; /* 自旋锁 */ spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ /* */ struct mutex i_mutex; /* 嵌入 i_sem 内部 */ struct rw_semaphore i_alloc_sem; /* 索引节点操作表 */ const struct inode_operations *i_op; /* 缺省的索引节点操作 */ const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ /* 相关的超级块 */ struct super_block *i_sb; /* 文件锁链表 */ struct file_lock *i_flock; /* 相关的地址映射 */ struct address_space *i_mapping; /* 设备地址映射 */ struct address_space i_data; #ifdef CONFIG_QUOTA /* 索引节点的磁盘限额 */ struct dquot *i_dquot[MAXQUOTAS]; #endif /* 块设备链表 */ struct list_head i_devices; union { /* 管道信息 */ struct pipe_inode_info *i_pipe; /* 块设备驱动 */ struct block_device *i_bdev; /* 字符设备驱动 */ struct cdev *i_cdev; }; /* */ __u32 i_generation; #ifdef CONFIG_FSNOTIFY /* */ __u32 i_fsnotify_mask; /* all events this inode cares about */ /* */ struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */ #endif #ifdef CONFIG_INOTIFY /* 索引节点通知监测链表 */ struct list_head inotify_watches; /* watches on this inode */ /* 保护 inotify_watches */ struct mutex inotify_mutex; /* protects the watches list */ #endif /* 状态标志 */ unsigned long i_state; /* 第一次弄脏数据的时间 */ unsigned long dirtied_when; /* jiffies of first dirtying */ /* 文件系统标志 */ unsigned int i_flags; /* 写者计数 */ atomic_t i_writecount; #ifdef CONFIG_SECURITY /* 安全模块 */ void *i_security; #endif #ifdef CONFIG_FS_POSIX_ACL /* */ struct posix_acl *i_acl; /* */ struct posix_acl *i_default_acl; #endif /* fs 私有指针 */ void *i_private; /* fs or device private pointer */ };
(2)inode_operations结构体
struct inode_operations { int (*create) (struct inode *,struct dentry *,int, struct nameidata *); struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); int (*link) (struct dentry *,struct inode *,struct dentry *); int (*unlink) (struct inode *,struct dentry *); int (*symlink) (struct inode *,struct dentry *,const char *); int (*mkdir) (struct inode *,struct dentry *,int); int (*rmdir) (struct inode *,struct dentry *); int (*mknod) (struct inode *,struct dentry *,int,dev_t); int (*rename) (struct inode *, struct dentry *, struct inode *, struct dentry *); int (*readlink) (struct dentry *, char __user *,int); void * (*follow_link) (struct dentry *, struct nameidata *); void (*put_link) (struct dentry *, struct nameidata *, void *); void (*truncate) (struct inode *); int (*permission) (struct inode *, int); int (*check_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*removexattr) (struct dentry *, const char *); void (*truncate_range)(struct inode *, loff_t, loff_t); long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); };
4、目录项
(1)dentry 结构体
// include/linux/dcache.h struct dentry { /* 使用记数 */ atomic_t d_count; /* 目录项标识 */ unsigned int d_flags; /* protected by d_lock */ /* 单目录项锁 */ spinlock_t d_lock; /* per dentry lock */ /* 是登录点的目录项吗? */ int d_mounted; /* 相关联的索引节点 */ struct inode *d_inode; /* Where the name belongs to - NULL is * negative */ /* * The next three fields are touched by __d_lookup. Place them here * so they all fit in a cache line. */ /* 散列表 */ struct hlist_node d_hash; /* lookup hash list */ /* 父目录的目录项对象 */ struct dentry *d_parent; /* parent directory */ /* 目录项名称 */ struct qstr d_name; /* 未使用的链表 */ struct list_head d_lru; /* LRU list */ /* * d_child and d_rcu can share memory */ union { /* 目录项内部形成的链表 */ struct list_head d_child; /* child of parent list */ /* RCU 加锁 */ struct rcu_head d_rcu; } d_u; /* 子目录链表 */ struct list_head d_subdirs; /* our children */ /* 索引节点别名链表 */ struct list_head d_alias; /* inode alias list */ /* 重置时间 */ unsigned long d_time; /* used by d_revalidate */ /* 目录项操作指针 */ const struct dentry_operations *d_op; /* 文件的超级块 */ struct super_block *d_sb; /* The root of the dentry tree */ /* 文件系统特有数据 */ void *d_fsdata; /* fs-specific data */ /* 短文件名 */ unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ };
(2)super_operations 结构体
// include/linux/dcache.h struct dentry_operations { int (*d_revalidate)(struct dentry *, struct nameidata *); int (*d_hash) (struct dentry *, struct qstr *); int (*d_compare) (struct dentry *, struct qstr *, struct qstr *); int (*d_delete)(struct dentry *); void (*d_release)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char *(*d_dname)(struct dentry *, char *, int); };
5、文件
(1)file 结构体
// include/linux/fs.h struct file { /* * fu_list becomes invalid after file_free is called and queued via * fu_rcuhead for RCU freeing */ union { /* 文件对象链表 */ struct list_head fu_list; /* 释放之后的 RCU 链表 */ struct rcu_head fu_rcuhead; } f_u; /* 包含目录项 */ struct path f_path; #define f_dentry f_path.dentry #define f_vfsmnt f_path.mnt /* 文件操作表 */ const struct file_operations *f_op; /* 单个文件结构锁 */ spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */ /* 文件对象的使用计数 */ atomic_long_t f_count; /* 当打开文件时所指定的标志 */ unsigned int f_flags; /* 文件的访问模式 */ fmode_t f_mode; /* 文件当前的位移量(文件指针) */ loff_t f_pos; /* 拥有者通过信号进行异步 I/O 数据的传送 */ struct fown_struct f_owner; /* 文件的信任状 */ const struct cred *f_cred; /* 预读状态 */ struct file_ra_state f_ra; /* 版本号 */ u64 f_version; #ifdef CONFIG_SECURITY /* 安全模块 */ void *f_security; #endif /* needed for tty driver, and maybe others */ /* tty 设备驱动的钩子 */ void *private_data; #ifdef CONFIG_EPOLL /* Used by fs/eventpoll.c to link all the hooks to this file */ /* 事件池链表 */ struct list_head f_ep_links; #endif /* #ifdef CONFIG_EPOLL */ /* 页缓存映射 */ struct address_space *f_mapping; #ifdef CONFIG_DEBUG_WRITECOUNT /* 调试状态 */ unsigned long f_mnt_write_state; #endif };
(2)file_operations 结构体
// include/linux/fs.h struct file_operations { struct module *owner; loff_t (*llseek) (struct file *, loff_t, int); ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); int (*readdir) (struct file *, void *, filldir_t); unsigned int (*poll) (struct file *, struct poll_table_struct *); int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); int (*open) (struct inode *, struct file *); int (*flush) (struct file *, fl_owner_t id); int (*release) (struct inode *, struct file *); int (*fsync) (struct file *, struct dentry *, int datasync); int (*aio_fsync) (struct kiocb *, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock) (struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); int (*setlease)(struct file *, long, struct file_lock **); };
6、和文件系统相关的数据结构
(1)file_system_type 结构体
文件系统类型
// include/linux/fs.h struct file_system_type { /* 文件系统的名字 */ const char *name; /* 文件系统类型标志 */ int fs_flags; /* 用来从磁盘中读取超级块 */ int (*get_sb) (struct file_system_type *, int, const char *, void *, struct vfsmount *); /* 用来终止访问超级块 */ void (*kill_sb) (struct super_block *); /* 文件系统模块 */ struct module *owner; /* 链表中下一个文件系统类型 */ struct file_system_type * next; /* 超级块对象链表 */ struct list_head fs_supers; /* 剩下的几个字段运行时使锁生效 */ struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; struct lock_class_key i_alloc_sem_key; };
(2)vfsmount 结构体
VFS 文件安装点
// include/linux/mount.h struct vfsmount { /* 散列表 */ struct list_head mnt_hash; /* 父文件系统 */ struct vfsmount *mnt_parent; /* fs we are mounted on */ /* 安装点的目录项 */ struct dentry *mnt_mountpoint; /* dentry of mountpoint */ /* 该文件系统的根目录项 */ struct dentry *mnt_root; /* root of the mounted tree */ /* 该文件系统的超级块 */ struct super_block *mnt_sb; /* pointer to superblock */ /* 子文件系统链表 */ struct list_head mnt_mounts; /* list of children, anchored here */ /* 子文件系统链表 */ struct list_head mnt_child; /* and going through their mnt_child */ /* 安装标志 */ int mnt_flags; /* 4 bytes hole on 64bits arches */ /* 设备文件名 */ const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */ /* 描述符链表 */ struct list_head mnt_list; /* 在到期链表中的入口 */ struct list_head mnt_expire; /* link in fs-specific expiry list */ /* 在共享安装链表中的入口 */ struct list_head mnt_share; /* circular list of shared mounts */ /* 从安装链表 */ struct list_head mnt_slave_list;/* list of slave mounts */ /* 从安装链表中的入口 */ struct list_head mnt_slave; /* slave list entry */ /* 从安装链表的主入 */ struct vfsmount *mnt_master; /* slave is on master->mnt_slave_list */ /* 相关的命名空间 */ struct mnt_namespace *mnt_ns; /* containing namespace */ /* 安装标识符 */ int mnt_id; /* mount identifier */ /* 组标识符 */ int mnt_group_id; /* peer group identifier */ /* * We put mnt_count & mnt_expiry_mark at the end of struct vfsmount * to let these frequently modified fields in a separate cache line * (so that reads of mnt_flags wont ping-pong on SMP machines) */ /* 使用计数 */ atomic_t mnt_count; /* 如果标记为到期,则值为真 */ int mnt_expiry_mark; /* true if marked for expiry */ /* 钉住进程计数 */ int mnt_pinned; /* 镜像引用计数 */ int mnt_ghosts; #ifdef CONFIG_SMP /* 写者引用计数 */ int __percpu *mnt_writers; #else /* 写者引用计数 */ int mnt_writers; #endif };
7、和进程相关的数据结构
(1)files_struct 结构体
该结构体由进程描述符中的 files 目录项指向。
// include/linux/fdtable.h struct files_struct { /* * read mostly part */ /* 结构的使用计数 */ atomic_t count; /* 指向其他 fd 表的指针 */ struct fdtable *fdt; /* 基 fd 表 */ struct fdtable fdtab; /* * written part on a separate cache line in SMP */ /* 单个文件的锁? */ spinlock_t file_lock ____cacheline_aligned_in_smp; /* 缓存下一个可用的 fd */ int next_fd; /* exec() 时关闭的文件描述符链表 */ struct embedded_fd_set close_on_exec_init; /* 打开的文件描述符链表 */ struct embedded_fd_set open_fds_init; /* 缺省的文件对象数组 */ struct file * fd_array[NR_OPEN_DEFAULT]; };
(2)fs_struct 结构体
该结构体包含文件系统和进程相关的信息。
// include/linux/fs_struct.h struct fs_struct { int users; // 用户数目 rwlock_t lock; // 保护该结构体的锁 int umask; // 掩码 int in_exec; // 当前正在执行的文件 struct path root; // 跟目录路径 struct path pwd; // 当前工作目录的路径 };
(3)mnt_namespace 结构体
单进程命名空间,它使得每一个进程在系统中都看到唯一的安装文件系统——不仅是唯一的根目录,而且是唯一的文件系统层次结构。
// include/linux/mnt_namespace.h struct mnt_namespace { atomic_t count; // 结构的使用计数 struct vfsmount * root; // 根目录的安装点对象 struct list_head list; // 安装点链表 wait_queue_head_t poll; // 轮询的等待队列 int event; // 事件计数 };
十一、块 I/O 层
1、缓冲区
(1)buffer_head
// include/linux/buffer_head.h struct buffer_head { /* 缓冲区状态标志 */ unsigned long b_state; /* buffer state bitmap (see above) */ /* 页面中的缓冲区 */ struct buffer_head *b_this_page;/* circular list of page's buffers */ /* 存储缓冲区的页面 */ struct page *b_page; /* the page this bh is mapped to */ /* 起始块号 */ sector_t b_blocknr; /* start block number */ /* 映像的大小 */ size_t b_size; /* size of mapping */ /* 页面内的数据指针 */ char *b_data; /* pointer to data within the page */ /* 相关联的块设备 */ struct block_device *b_bdev; /* I/O 完成方法 */ bh_end_io_t *b_end_io; /* I/O completion */ /* io 完成方法 */ void *b_private; /* reserved for b_end_io */ /* 相关的映射链表 */ struct list_head b_assoc_buffers; /* associated with another mapping */ /* 相关的地址空间 */ struct address_space *b_assoc_map; /* mapping this buffer is associated with */ /* 缓冲区使用计数 */ atomic_t b_count; /* users using this buffer_head */ };
(2)bh_state_bits
b_state 域表示缓冲区的状态,合法的标志存放在 bh_state_bits 枚举中。
// include/linux/buffer_head.h enum bh_state_bits { BH_Uptodate, /* Contains valid data */ BH_Dirty, /* Is dirty */ BH_Lock, /* Is locked */ BH_Req, /* Has been submitted for I/O */ BH_Mapped, /* Has a disk mapping */ BH_New, /* Disk mapping was newly created by get_block */ BH_Async_Read, /* Is under end_buffer_async_read I/O */ BH_Async_Write, /* Is under end_buffer_async_write I/O */ BH_Delay, /* Buffer is not yet allocated on disk */ BH_Boundary, /* Block is followed by a discontiguity */ BH_Write_EIO, /* I/O error on write */ BH_Unwritten, /* Buffer is allocated on disk but not written */ BH_Quiet, /* Buffer Error Prinks to be quiet */ BH_Meta, /* Buffer contains metadata */ BH_Prio, /* Buffer should be submitted with REQ_PRIO */ BH_Defer_Completion, /* Defer AIO completion to workqueue */ BH_PrivateStart,/* not a state bit, but the first bit available * for private allocation by other entities */ };
2、bio 结构体
(1)bio 结构体
// include/linux/bio.h struct bio { /* 磁盘上相关的扇区 */ sector_t bi_sector; /* device address in 512 byte sectors */ /* 请求链表 */ struct bio *bi_next; /* request queue link */ /* 相关的块设备 */ struct block_device *bi_bdev; /* 状态和命令标志 */ unsigned long bi_flags; /* status, command, etc */ /* 读还是写 */ unsigned long bi_rw; /* bottom bits READ/WRITE, * top bits priority */ /* bio_vecs 偏移的个数 */ unsigned short bi_vcnt; /* how many bio_vec's */ /* bio_io_vect 的当前索引 */ unsigned short bi_idx; /* current index into bvl_vec */ /* Number of segments in this BIO after * physical address coalescing is performed. */ /* 结合后的片段数目 */ unsigned int bi_phys_segments; /* I/O 计数 */ unsigned int bi_size; /* residual I/O count */ /* * To keep track of the max segment size, we account for the * sizes of the first and last mergeable segments in this bio. */ /* 第一个可合并的段大小 */ unsigned int bi_seg_front_size; /* 最后一个可合并的段大小 */ unsigned int bi_seg_back_size; /* bio_vecs 数目上限 */ unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ /* 结束 CPU */ unsigned int bi_comp_cpu; /* completion CPU */ /* 使用计数 */ atomic_t bi_cnt; /* pin count */ /* bio_vecs 链表 */ struct bio_vec *bi_io_vec; /* the actual vec list */ /* I/O 完成方法 */ bio_end_io_t *bi_end_io; /* 拥有者的私有方法 */ void *bi_private; #if defined(CONFIG_BLK_DEV_INTEGRITY) /* */ struct bio_integrity_payload *bi_integrity; /* data integrity */ #endif /* 撤销方法 */ bio_destructor_t *bi_destructor; /* destructor */ /* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member * MUST obviously be kept at the very end of the bio. */ /* 内嵌 bio 向量 */ struct bio_vec bi_inline_vecs[0]; };
(2)bio_vec 结构体
// include/linux/bio.h struct bio_vec { /* 指向这个缓冲区所驻留的物理页 */ struct page *bv_page; /* 这个缓冲区以字节为单位的大小 */ unsigned int bv_len; /* 缓冲区所驻留的页中以字节为单位的偏移量 */ unsigned int bv_offset; };
(3)新老方法对比
缓冲区头和新的 bio 结构体之间存在显著差别。bio 结构体代表的是 I/O 操作,它可以包括内存中的一个或多个页:而另一方面,buffer_head 结构体代表的是一个缓冲区,它描述的仅仅是磁盘中的一个块。因为缓冲区头关联的是单独页中的单独磁盘块,所以它可能会引起不必要的分割,将请求按块为单位划分,只能靠以后才能再重新组合。由于 bio 结构体是轻量级的,它描述的块可以不需要连续存储区,并且不需要分割 I/O 操作。
利用 bio 结构体代替 buffer_bead 结构体还有以下好处:
bio 结构体很容易处理高端内存,因为它处理的是物理页而不是直接指针。
bio 结构体既可以代表普通页 I/O,同时也可以代表直接 I/O(指那些不通过页高速缓存的 I/O 操作-----请参考第 16 章中对页高速缓存的讨论)。
bio 结构体便于执行分散—集中(矢量化的)块 I/O 操作,操作中的数据可取自多个物理页面。
bio 结构体相比缓冲区头属于轻量级的结构体。因为它只需要包含块 I/O 操作所需的信息就行了,不用包含与缓冲区本身相关的不必要信息。
但是还是需要缓冲区头这个概念,毕竟它还负责描述磁盘块到页面的映射。bio 结构体不包含任何和缓冲区相关的状态信息——它仅仅是一个矢量数组,描述一个或多个单独块 I/O 操作的数据片段和相关信息。在当前设置中,当 bio 结构体描述当前正在使用的 I/O 操作时,buffer_head 结构体仍然需要包含缓冲区信息。内核通过这两种结构分别保存各自的信息,可以保证每种结构所含的信息量尽可能地少。
3、请求队列
(1)request_queue
// include/linux/blkdev.h struct request_queue { /* * Together with queue_head for cacheline sharing */ struct list_head queue_head; struct request *last_merge; struct elevator_queue *elevator; /* * the queue request freelist, one for reads and one for writes */ struct request_list rq; request_fn_proc *request_fn; make_request_fn *make_request_fn; prep_rq_fn *prep_rq_fn; unplug_fn *unplug_fn; merge_bvec_fn *merge_bvec_fn; prepare_flush_fn *prepare_flush_fn; softirq_done_fn *softirq_done_fn; rq_timed_out_fn *rq_timed_out_fn; dma_drain_needed_fn *dma_drain_needed; lld_busy_fn *lld_busy_fn; /* * Dispatch queue sorting */ sector_t end_sector; struct request *boundary_rq; /* * Auto-unplugging state */ struct timer_list unplug_timer; int unplug_thresh; /* After this many requests */ unsigned long unplug_delay; /* After this many jiffies */ struct work_struct unplug_work; struct backing_dev_info backing_dev_info; /* * The queue owner gets to use this for whatever they like. * ll_rw_blk doesn't touch it. */ void *queuedata; /* * queue needs bounce pages for pages above this limit */ gfp_t bounce_gfp; /* * various queue flags, see QUEUE_* below */ unsigned long queue_flags; /* * protects queue structures from reentrancy. ->__queue_lock should * _never_ be used directly, it is queue private. always use * ->queue_lock. */ spinlock_t __queue_lock; spinlock_t *queue_lock; /* * queue kobject */ struct kobject kobj; /* * queue settings */ unsigned long nr_requests; /* Max # of requests */ unsigned int nr_congestion_on; unsigned int nr_congestion_off; unsigned int nr_batching; void *dma_drain_buffer; unsigned int dma_drain_size; unsigned int dma_pad_mask; unsigned int dma_alignment; struct blk_queue_tag *queue_tags; struct list_head tag_busy_list; unsigned int nr_sorted; unsigned int in_flight[2]; unsigned int rq_timeout; struct timer_list timeout; struct list_head timeout_list; struct queue_limits limits; /* * sg stuff */ unsigned int sg_timeout; unsigned int sg_reserved_size; int node; #ifdef CONFIG_BLK_DEV_IO_TRACE struct blk_trace *blk_trace; #endif /* * reserved for flush operations */ unsigned int ordered, next_ordered, ordseq; int orderr, ordcolor; struct request pre_flush_rq, bar_rq, post_flush_rq; struct request *orig_bar_rq; struct mutex sysfs_lock; #if defined(CONFIG_BLK_DEV_BSG) struct bsg_class_device bsg_dev; #endif };
(2)request
// include/linux/blkdev.h struct request { struct list_head queuelist; struct call_single_data csd; struct request_queue *q; unsigned int cmd_flags; enum rq_cmd_type_bits cmd_type; unsigned long atomic_flags; int cpu; /* the following two fields are internal, NEVER access directly */ unsigned int __data_len; /* total data len */ sector_t __sector; /* sector cursor */ struct bio *bio; struct bio *biotail; struct hlist_node hash; /* merge hash */ /* * The rb_node is only used inside the io scheduler, requests * are pruned when moved to the dispatch queue. So let the * completion_data share space with the rb_node. */ union { struct rb_node rb_node; /* sort/lookup */ void *completion_data; }; /* * two pointers are available for the IO schedulers, if they need * more they have to dynamically allocate it. */ void *elevator_private; void *elevator_private2; struct gendisk *rq_disk; unsigned long start_time; /* Number of scatter-gather DMA addr+len pairs after * physical address coalescing is performed. */ unsigned short nr_phys_segments; unsigned short ioprio; int ref_count; void *special; /* opaque pointer available for LLD use */ char *buffer; /* kaddr of the current segment if available */ int tag; int errors; /* * when request is used as a packet command carrier */ unsigned char __cmd[BLK_MAX_CDB]; unsigned char *cmd; unsigned short cmd_len; unsigned int extra_len; /* length of alignment and padding */ unsigned int sense_len; unsigned int resid_len; /* residual count */ void *sense; unsigned long deadline; struct list_head timeout_list; unsigned int timeout; int retries; /* * completion callback. */ rq_end_io_fn *end_io; void *end_io_data; /* for bidi */ struct request *next_rq; };
十二、进程地址空间
内核除了管理本身的内存外,还必须管理用户空间中进程的内存。我们称这个内存为进程地址空间,也就是系统中每个用户空间进程所看到的内存。
1、内存描述符 mm_struct
内核使用内存描述符结构体表示进程的地址空间,该结构包含了和进程地址空间有关的全部信息。内存描述符由 mm_struct 结构体表示,定义在文件 include/linux/sched.h 中,sched.h 包含了头文件 mm_types.h 。
// include/linux/mm_types.h struct mm_struct { /* 内存区域链表 */ struct vm_area_struct * mmap; /* list of VMAs */ /* VMA 形成的红黑树 */ struct rb_root mm_rb; /* 最近使用的内存区域 */ struct vm_area_struct * mmap_cache; /* last find_vma result */ #ifdef CONFIG_MMU /* */ unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); /* */ void (*unmap_area) (struct mm_struct *mm, unsigned long addr); #endif /* */ unsigned long mmap_base; /* base of mmap area */ /* */ unsigned long task_size; /* size of task vm space */ /* */ unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ /* 地址空间第一个空洞 */ unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ /* 页全局目录 */ pgd_t * pgd; /* 使用地址空间的用户数 */ atomic_t mm_users; /* How many users with user space? */ /* 主使用计数器 */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ /* 内存区域的个数 */ int map_count; /* number of VMAs */ /* 内存区域的信号量 */ struct rw_semaphore mmap_sem; /* 页表锁 */ spinlock_t page_table_lock; /* Protects page tables and some counters */ /* 所有 mm_struct 形成的链表 */ struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung * together off init_mm.mmlist, and are protected * by mmlist_lock */ /* */ unsigned long hiwater_rss; /* High-watermark of RSS usage */ /* */ unsigned long hiwater_vm; /* High-water virtual memory usage */ unsigned long total_vm, locked_vm, shared_vm, exec_vm; unsigned long stack_vm, reserved_vm, def_flags, nr_ptes; /* 代码段的开始地址 */ unsigned long start_code; /* 代码段的结束地址 */ unsigned long end_code; /* 数据的首地址 */ unsigned long start_data; /* 数据的尾地址 */ unsigned long end_data; /* 堆的首地址 */ unsigned long start_brk; /* 堆的尾地址 */ unsigned long brk; /* 进程栈的首地址 */ unsigned long start_stack; /* 命令行参数的首地址 */ unsigned long arg_start; /* 命令行参数的尾地址 */ unsigned long arg_end; /* 环境变量的首地址 */ unsigned long env_start; /* 环境变量的尾地址 */ unsigned long env_end; unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ /* * Special counters, in some configurations protected by the * page_table_lock, in other configurations by being atomic. */ struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; /* 懒惰 TLB 交换掩码 */ cpumask_t cpu_vm_mask; /* Architecture-specific MM context */ /* 体系结构特殊数据 */ mm_context_t context; /* Swap token stuff */ /* * Last value of global fault stamp as seen by this process. * In other words, this value gives an indication of how long * it has been since this task got the token. * Look at mm/thrash.c */ unsigned int faultstamp; unsigned int token_priority; unsigned int last_interval; /* 状态标志 */ unsigned long flags; /* Must use atomic bitops to access the bits */ /* 核心转储的支持 */ struct core_state *core_state; /* coredumping support */ #ifdef CONFIG_AIO /* AIO I/O 链表锁 */ spinlock_t ioctx_lock; /* AIO I/O 链表 */ struct hlist_head ioctx_list; #endif #ifdef CONFIG_MM_OWNER /* * "owner" points to a task that is regarded as the canonical * user/owner of this mm. All of the following must be true in * order for it to be changed: * * current == mm->owner * current->mm != mm * new_owner->mm == mm * new_owner->alloc_lock is held */ struct task_struct *owner; #endif #ifdef CONFIG_PROC_FS /* store ref to file /proc/<pid>/exe symlink points to */ struct file *exe_file; unsigned long num_exe_file_vmas; #endif #ifdef CONFIG_MMU_NOTIFIER struct mmu_notifier_mm *mmu_notifier_mm; #endif };
在进程的进程描述符(在 中定义的 task_struct 结构体就表示进程描述符)中,mm 域存放着该进程使用的内存描述符,所以 current-> mm 便指向当前进程的内存描述符。fork() 函数利用 copy_mm() 函数复制父进程的内存描述符,也就是 current->mm 域给其子进程,而子进程中的 mm_struct 结构体实际是通过文件 kernel/fork.c 中的 allocate_mm() 宏从 mm_cachep slab 缓存中分配得到的。通常,每个进程都有唯一的 mm_struct 结构体,即唯一的进程地址空间。
如果父进程希望和其子进程共享地址空间,可以在调用 clone() 时,设置 CLONE_VM 标志。我们把这样的进程称作线程。回忆第 3 章,是否共享地址空间几乎是进程和 Linux 中所谓的线程间本质上的唯一区别。除此以外,Linux 内核并不区别对待它们,线程对内核来说仅仅是一个共享特定资源的进程而已。
(1)mm_struct 与内核线程
内核线程没有进程地址空间,也没有相关的内存描述符。所以内核线程对应的进程描述符中 mm 域为空。
2、虚拟内存区域
(1)vm_area_struct
内存区域由 vm_area_struct 结构体描述,内存区域在 Linux 内核中也经常称作虚拟内存区域(virtual memoryAreas, VMAs)。
// include/linux/mm_types.h struct vm_area_struct { /* 相关的 mm_struct 结构体 */ struct mm_struct * vm_mm; /* The address space we belong to. */ /* 区间的首地址 */ unsigned long vm_start; /* Our start address within vm_mm. */ /* 区间的尾地址 */ unsigned long vm_end; /* The first byte after our end address within vm_mm. */ /* linked list of VM areas per task, sorted by address */ /* VMA 链表 */ struct vm_area_struct *vm_next; /* 访问控制权限 */ pgprot_t vm_page_prot; /* Access permissions of this VMA. */ /* 标志 */ unsigned long vm_flags; /* Flags, see mm.h. */ /* 树上该 VMA 的节点 */ struct rb_node vm_rb; /* * For areas with an address space and backing store, * linkage into the address_space->i_mmap prio tree, or * linkage to the list of like vmas hanging off its node, or * linkage of vma in the address_space->i_mmap_nonlinear list. */ union { struct { struct list_head list; void *parent; /* aligns with prio_tree_node parent */ struct vm_area_struct *head; } vm_set; struct raw_prio_tree_node prio_tree_node; } shared; /* * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma * list, after a COW of one of the file pages. A MAP_SHARED vma * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack * or brk vma (with NULL file) can only be in an anon_vma list. */ /* anon_vma 项 */ struct list_head anon_vma_chain; /* Serialized by mmap_sem & * page_table_lock */ /* 匿名 VMA 对象 */ struct anon_vma *anon_vma; /* Serialized by page_table_lock */ /* Function pointers to deal with this struct. */ /* 相关的操作表 */ const struct vm_operations_struct *vm_ops; /* Information about our backing store: */ /* 文件中的偏移量 */ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE units, *not* PAGE_CACHE_SIZE */ /* 被映射的文件(如果存在) */ struct file * vm_file; /* File we map to (can be NULL). */ /* 私有数据 */ void * vm_private_data; /* was vm_pte (shared mem) */ unsigned long vm_truncate_count;/* truncate_count or restart_addr */ #ifndef CONFIG_MMU struct vm_region *vm_region; /* NOMMU mapping region */ #endif #ifdef CONFIG_NUMA struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif };
(2)VMA 标志
vm_flags 可为如下值,其定义在 include/linux/mm.h 文件中
(3)vm_operations_struct
// include/linux/mm.h struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); /* called by access_process_vm when get_user_pages() fails, typically * for use by special VMAs that can switch between memory and hardware */ int (*access)(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); #ifdef CONFIG_NUMA /* * set_policy() op must add a reference to any non-NULL @new mempolicy * to hold the policy upon return. Caller should pass NULL @new to * remove a policy and fall back to surrounding context--i.e. do not * install a MPOL_DEFAULT policy, nor the task or system default * mempolicy. */ int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); /* * get_policy() op must add reference [mpol_get()] to any policy at * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure * in mm/mempolicy.c will do this automatically. * get_policy() must NOT add a ref if the policy at (vma,addr) is not * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. * If no [shared/vma] mempolicy exists at the addr, get_policy() op * must return NULL--i.e., do not "fallback" to task or system default * policy. */ struct mempolicy *(*get_policy)(struct vm_area_struct *vma, unsigned long addr); int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, const nodemask_t *to, unsigned long flags); #endif };
(4)内存区域的树型结构和内存区域的链表结构
上文讨论过,可以通过内存描述符中的 mmap 和 mm_rb 域之一访问内存区域。这两个域各自独立地指向与内存描述符相关的全体内存区域对象。其实,它们包含完全相同的 vm_area_struct 结构体的指针,仅仅组织方法不同。
mmap 域使用单独链表连接所有的内存区域对象。每一个 vm_area_struct 结构体通过自身的 vm_next 域被连入链表,所有的区域按地址增长的方向排序,mmap 域指向链表中第一个内存区域,链中最后一个结构体指针指向空。
mm_rb 域使用红黑树连接所有的内存区域对象。mm_rb 域指向红黑树的根节点,地址空间中每一个 vm_area_struct 结构体通过自身的 vm_rb 域连接到树中。
红黑树是一种二叉树,树中的每一个元素称为一个节点,最初的节点称为树根。红 - 黑树的多数节点都由两个子节点:一个左子节点和一个右子节点,不过也有节点只有一个子节点的情况。树末端的节点称为叶子节点,它们没有子节点。红 - 黑树中的所有节点都遵从:左边节点值小于右边节点值;另外每个节点都被配以红色或黑色(要么红要么黑,所以叫做红 -黑树)。分配的规则为红节点的子节点为黑色,并且树中的任何一条从节点到叶子的路径必须包含同样数目的黑色节点。记住根节点总为红色。红 -黑树的搜索、插人、删除等操作的复杂度都为 O(log(n))。
链表用于需要遍历全部节点的时候,而红 -黑树适用于在地址空间中定位特定内存区域的时候。内核为了内存区域上的各种不同操作都能获得高性能,所以同时使用了这两种数据结构。
(5)实际使用中的内存区域
可以使用 /proc 文件系统和 pmap 工具查看给定进程的内存空间和其中所含的内存区域。我们来看一个非常简单的用户空间程序的例子:
int main(int, char **argv) { for (;;) { } return 0; }
查看 /proc/pid/maps 显示了该进程地址空间中的全部内存区域:
cat /proc/32698/maps # 每行数据格式如下: # 开始-结束 访问权限 偏移 主设备号:次设备号 i节点 文件 00400000-00401000 r-xp 00000000 08:01 3549836 /home/liuqz/learnCPlus/c6/build/c6 00600000-00601000 r--p 00000000 08:01 3549836 /home/liuqz/learnCPlus/c6/build/c6 00601000-00602000 rw-p 00001000 08:01 3549836 /home/liuqz/learnCPlus/c6/build/c6 7f0b474cb000-7f0b4768b000 r-xp 00000000 08:01 77337148 /lib/x86_64-linux-gnu/libc-2.23.so 7f0b4768b000-7f0b4788b000 ---p 001c0000 08:01 77337148 /lib/x86_64-linux-gnu/libc-2.23.so 7f0b4788b000-7f0b4788f000 r--p 001c0000 08:01 77337148 /lib/x86_64-linux-gnu/libc-2.23.so 7f0b4788f000-7f0b47891000 rw-p 001c4000 08:01 77337148 /lib/x86_64-linux-gnu/libc-2.23.so 7f0b47891000-7f0b47895000 rw-p 00000000 00:00 0 7f0b47895000-7f0b478bb000 r-xp 00000000 08:01 77332571 /lib/x86_64-linux-gnu/ld-2.23.so 7f0b47a9c000-7f0b47a9f000 rw-p 00000000 00:00 0 7f0b47aba000-7f0b47abb000 r--p 00025000 08:01 77332571 /lib/x86_64-linux-gnu/ld-2.23.so 7f0b47abb000-7f0b47abc000 rw-p 00026000 08:01 77332571 /lib/x86_64-linux-gnu/ld-2.23.so 7f0b47abc000-7f0b47abd000 rw-p 00000000 00:00 0 7ffd4dcdc000-7ffd4dcfd000 rw-p 00000000 00:00 0 [stack] 7ffd4ddcb000-7ffd4ddce000 r--p 00000000 00:00 0 [vvar] 7ffd4ddce000-7ffd4ddd0000 r-xp 00000000 00:00 0 [vdso] ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]
Linux 内核设计与实现4:https://developer.aliyun.com/article/1597351