进程描述符(task_struct)
用来描述进程的数据结构,可以理解为进程的属性。比如进程的状态、进程的标识(PID)等,都被封装在了进程描述符这个数据结构中,该数据结构被定义为task_struct
进程控制块(PCB)
是操作系统核心中一种数据结构,主要表示进程状态。
进程状态
fork()
fork()在父、子进程各返回一次。在父进程中返回子进程的 pid,在子进程中返回0。
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
int main(int argc, char * argv[])
{
int pid;
/* fork another process */
pid = fork();
if (pid < 0)
{
/* error occurred */
fprintf(stderr,"Fork Failed!");
exit(-1);
}
else if (pid == 0)
{
/* child process */
printf("This is Child Process!\n");
}
else
{
/* parent process */
printf("This is Parent Process!\n");
/* parent will wait for the child to complete*/
wait(NULL);
printf("Child Complete!\n");
}
}
fork 通过0x80中断(系统调用)来陷入内核,由系统提供的相应系统调用来完成进程的创建。
//fork
#ifdef __ARCH_WANT_SYS_FORK
SYSCALL_DEFINE0(fork)
{
#ifdef CONFIG_MMU
return do_fork(SIGCHLD, 0, 0, NULL, NULL);
#else
/* can not support in nommu mode */
return -EINVAL;
#endif
}
#endif
//vfork
#ifdef __ARCH_WANT_SYS_VFORK
SYSCALL_DEFINE0(vfork)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
0, NULL, NULL);
}
#endif
//clone
#ifdef __ARCH_WANT_SYS_CLONE
#ifdef CONFIG_CLONE_BACKWARDS
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
int __user *, parent_tidptr,
int, tls_val,
int __user *, child_tidptr)
#elif defined(CONFIG_CLONE_BACKWARDS2)
SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
int __user *, parent_tidptr,
int __user *, child_tidptr,
int, tls_val)
#elif defined(CONFIG_CLONE_BACKWARDS3)
SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
int, stack_size,
int __user *, parent_tidptr,
int __user *, child_tidptr,
int, tls_val)
#else
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
int __user *, parent_tidptr,
int __user *, child_tidptr,
int, tls_val)
#endif
{
return do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr);
}
#endif
通过看上边的代码,我们可以清楚的看到,不论是使用 fork 还是 vfork 来创建进程,最终都是通过 do_fork() 方法来实现的。接下来我们可以追踪到 do_fork()的代码(部分代码,经过笔者的精简):
long do_fork(unsigned long clone_flags,
unsigned long stack_start,
unsigned long stack_size,
int __user *parent_tidptr,
int __user *child_tidptr)
{
//创建进程描述符指针
struct task_struct *p;
//……
//复制进程描述符,copy_process()的返回值是一个 task_struct 指针。
p = copy_process(clone_flags, stack_start, stack_size,
child_tidptr, NULL, trace);
if (!IS_ERR(p)) {
struct completion vfork;
struct pid *pid;
trace_sched_process_fork(current, p);
//得到新创建的进程描述符中的pid
pid = get_task_pid(p, PIDTYPE_PID);
nr = pid_vnr(pid);
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
//如果调用的 vfork()方法,初始化 vfork 完成处理信息。
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
get_task_struct(p);
}
//将子进程加入到调度器中,为其分配 CPU,准备执行
wake_up_new_task(p);
//fork 完成,子进程即将开始运行
if (unlikely(trace))
ptrace_event_pid(trace, pid);
//如果是 vfork,将父进程加入至等待队列,等待子进程完成
if (clone_flags & CLONE_VFORK) {
if (!wait_for_vfork_done(p, &vfork))
ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
}
put_pid(pid);
} else {
nr = PTR_ERR(p);
}
return nr;
}
static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
unsigned long stack_size,
int __user *child_tidptr,
struct pid *pid,
int trace)
{
int retval;
//创建进程描述符指针
struct task_struct *p;
//……
//复制当前的 task_struct
p = dup_task_struct(current);
//……
//初始化互斥变量
rt_mutex_init_task(p);
//检查进程数是否超过限制,由操作系统定义
if (atomic_read(&p->real_cred->user->processes) >=
task_rlimit(p, RLIMIT_NPROC)) {
if (p->real_cred->user != INIT_USER &&
!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
goto bad_fork_free;
}
//……
//检查进程数是否超过 max_threads 由内存大小决定
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
//……
//初始化自旋锁
spin_lock_init(&p->alloc_lock);
//初始化挂起信号
init_sigpending(&p->pending);
//初始化 CPU 定时器
posix_cpu_timers_init(p);
//……
//初始化进程数据结构,并把进程状态设置为 TASK_RUNNING
retval = sched_fork(clone_flags, p);
//复制所有进程信息,包括文件系统、信号处理函数、信号、内存管理等
if (retval)
goto bad_fork_cleanup_policy;
retval = perf_event_init_task(p);
if (retval)
goto bad_fork_cleanup_policy;
retval = audit_alloc(p);
if (retval)
goto bad_fork_cleanup_perf;
/* copy all the process information */
shm_init_task(p);
retval = copy_semundo(clone_flags, p);
if (retval)
goto bad_fork_cleanup_audit;
retval = copy_files(clone_flags, p);
if (retval)
goto bad_fork_cleanup_semundo;
retval = copy_fs(clone_flags, p);
if (retval)
goto bad_fork_cleanup_files;
retval = copy_sighand(clone_flags, p);
if (retval)
goto bad_fork_cleanup_fs;
retval = copy_signal(clone_flags, p);
if (retval)
goto bad_fork_cleanup_sighand;
retval = copy_mm(clone_flags, p);
if (retval)
goto bad_fork_cleanup_signal;
retval = copy_namespaces(clone_flags, p);
if (retval)
goto bad_fork_cleanup_mm;
retval = copy_io(clone_flags, p);
//初始化子进程内核栈
retval = copy_thread(clone_flags, stack_start, stack_size, p);
//为新进程分配新的 pid
if (pid != &init_struct_pid) {
retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns_for_children);
if (!pid)
goto bad_fork_cleanup_io;
}
//设置子进程 pid
p->pid = pid_nr(pid);
//……
//返回结构体 p
return p;
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
int node = tsk_fork_get_node(orig);
int err;
//分配一个 task_struct 节点
tsk = alloc_task_struct_node(node);
if (!tsk)
return NULL;
//分配一个 thread_info 节点,包含进程的内核栈,ti 为栈底
ti = alloc_thread_info_node(tsk, node);
if (!ti)
goto free_tsk;
//将栈底的值赋给新节点的栈
tsk->stack = ti;
//……
return tsk;
}
union thread_union {
struct thread_info thread_info;
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
最终执行完dup_task_struct之后,子进程除了tsk->stack指针不同之外,全部都一样!
int sched_fork(unsigned long clone_flags, struct task_struct *p)
{
unsigned long flags;
int cpu = get_cpu();
__sched_fork(clone_flags, p);
//将子进程状态设置为 TASK_RUNNING
p->state = TASK_RUNNING;
//……
//为子进程分配 CPU
set_task_cpu(p, cpu);
put_cpu();
return 0;
}
int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p)
{
//获取寄存器信息
struct pt_regs *childregs = task_pt_regs(p);
struct task_struct *tsk;
int err;
p->thread.sp = (unsigned long) childregs;
p->thread.sp0 = (unsigned long) (childregs+1);
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
if (unlikely(p->flags & PF_KTHREAD)) {
//内核线程
memset(childregs, 0, sizeof(struct pt_regs));
p->thread.ip = (unsigned long) ret_from_kernel_thread;
task_user_gs(p) = __KERNEL_STACK_CANARY;
childregs->ds = __USER_DS;
childregs->es = __USER_DS;
childregs->fs = __KERNEL_PERCPU;
childregs->bx = sp; /* function */
childregs->bp = arg;
childregs->orig_ax = -1;
childregs->cs = __KERNEL_CS | get_kernel_rpl();
childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
p->thread.io_bitmap_ptr = NULL;
return 0;
}
//将当前寄存器信息复制给子进程
*childregs = *current_pt_regs();
//子进程 eax 置 0,因此fork 在子进程返回0
childregs->ax = 0;
if (sp)
childregs->sp = sp;
//子进程ip 设置为ret_from_fork,因此子进程从ret_from_fork开始执行
p->thread.ip = (unsigned long) ret_from_fork;
//……
return err;
}
copy_thread 这段代码为我们解释了两个相当重要的问题!
childregs->ax = 0;
这段代码将子进程的 eax 赋值为0p->thread.ip = (unsigned long) ret_from_fork;
将子进程的 ip 设置为 ret_form_fork 的首地址,因此子进程是从 ret_from_fork 开始执行的新进程的执行源于以下前提:
最终子进程从ret_from_fork开始执行
版权声明:本文为博主原创文章,未经博主允许不得转载。(文章来源:http://blog.luoyuanhang.com)
原文地址:http://blog.csdn.net/luoyhang003/article/details/47125787