码迷,mamicode.com
首页 > 系统相关 > 详细

Linux下的进程控制块——task_struct

时间:2016-06-01 22:52:07      阅读:677      评论:0      收藏:0      [点我收藏+]

标签:

在Linux中具体实现PCB的是 task_struct数据结构

我想说它真的很长很长...... ↓

  1 struct task_struct {
  2     volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped *ruxia/
  3     void *stack;
  4     atomic_t usage;
  5     unsigned int flags; /* per process flags, defined below */
  6     unsigned int ptrace;
  7 
  8 #ifdef CONFIG_SMP
  9     struct llist_node wake_entry;
 10     int on_cpu;
 11     unsigned int wakee_flips;
 12     unsigned long wakee_flip_decay_ts;
 13     struct task_struct *last_wakee;
 14 
 15     int wake_cpu;
 16 #endif
 17     int on_rq;
 18 
 19     int prio, static_prio, normal_prio;
 20     unsigned int rt_priority;
 21     const struct sched_class *sched_class;
 22     struct sched_entity se;
 23     struct sched_rt_entity rt;
 24 #ifdef CONFIG_CGROUP_SCHED
 25     struct task_group *sched_task_group;
 26 #endif
 27     struct sched_dl_entity dl;
 28 
 29 #ifdef CONFIG_PREEMPT_NOTIFIERS
 30     /* list of struct preempt_notifier: */
 31     struct hlist_head preempt_notifiers;
 32 #endif
 33 
 34 #ifdef CONFIG_BLK_DEV_IO_TRACE
 35     unsigned int btrace_seq;
 36 #endif
 37 
 38     unsigned int policy;
 39     int nr_cpus_allowed;
 40     cpumask_t cpus_allowed;
 41 
 42 #ifdef CONFIG_PREEMPT_RCU
 43     int rcu_read_lock_nesting;
 44     union rcu_special rcu_read_unlock_special;
 45     struct list_head rcu_node_entry;
 46     struct rcu_node *rcu_blocked_node;
 47 #endif /* #ifdef CONFIG_PREEMPT_RCU */
 48 #ifdef CONFIG_TASKS_RCU
 49     unsigned long rcu_tasks_nvcsw;
 50     bool rcu_tasks_holdout;
 51     struct list_head rcu_tasks_holdout_list;
 52     int rcu_tasks_idle_cpu;
 53 #endif /* #ifdef CONFIG_TASKS_RCU */
 54 
 55 #ifdef CONFIG_SCHED_INFO
 56     struct sched_info sched_info;
 57 #endif
 58 
 59     struct list_head tasks;
 60 #ifdef CONFIG_SMP
 61     struct plist_node pushable_tasks;
 62     struct rb_node pushable_dl_tasks;
 63 #endif
 64 
 65     struct mm_struct *mm, *active_mm;
 66     /* per-thread vma caching */
 67     u32 vmacache_seqnum;
 68     struct vm_area_struct *vmacache[VMACACHE_SIZE];
 69 #if defined(SPLIT_RSS_COUNTING)
 70     struct task_rss_stat    rss_stat;
 71 #endif
 72 /* task state */
 73     int exit_state;
 74     int exit_code, exit_signal;
 75     int pdeath_signal;  /*  The signal sent when the parent dies  */
 76     unsigned long jobctl;   /* JOBCTL_*, siglock protected */
 77 
 78     /* Used for emulating ABI behavior of previous Linux versions */
 79     unsigned int personality;
 80 
 81     /* scheduler bits, serialized by scheduler locks */
 82     unsigned sched_reset_on_fork:1;
 83     unsigned sched_contributes_to_load:1;
 84     unsigned sched_migrated:1;
 85     unsigned sched_remote_wakeup:1;
 86     unsigned :0; /* force alignment to the next boundary */
 87 
 88     /* unserialized, strictly ‘current‘ */
 89     unsigned in_execve:1; /* bit to tell LSMs we‘re in execve */
 90     unsigned in_iowait:1;
 91 #ifdef CONFIG_MEMCG
 92     unsigned memcg_may_oom:1;
 93 #ifndef CONFIG_SLOB
 94     unsigned memcg_kmem_skip_account:1;
 95 #endif
 96 #endif
 97 #ifdef CONFIG_COMPAT_BRK
 98     unsigned brk_randomized:1;
 99 #endif
100 
101     unsigned long atomic_flags; /* Flags needing atomic access. */
102 
103     struct restart_block restart_block;
104 
105     pid_t pid;
106     pid_t tgid;
107 
108 #ifdef CONFIG_CC_STACKPROTECTOR
109     /* Canary value for the -fstack-protector gcc feature */
110     unsigned long stack_canary;
111 #endif
112     /*
113      * pointers to (original) parent process, youngest child, younger sibling,
114      * older sibling, respectively.  (p->father can be replaced with
115      * p->real_parent->pid)
116      */
117     struct task_struct __rcu *real_parent; /* real parent process */
118     struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
119     /*
120      * children/sibling forms the list of my natural children
121      */
122     struct list_head children;  /* list of my children */
123     struct list_head sibling;   /* linkage in my parent‘s children list */
124     struct task_struct *group_leader;   /* threadgroup leader */
125 
126     /*
127      * ptraced is the list of tasks this task is using ptrace on.
128      * This includes both natural children and PTRACE_ATTACH targets.
129      * p->ptrace_entry is p‘s link on the p->parent->ptraced list.
130      */
131     struct list_head ptraced;
132     struct list_head ptrace_entry;
133 
134     /* PID/PID hash table linkage. */
135     struct pid_link pids[PIDTYPE_MAX];
136     struct list_head thread_group;
137     struct list_head thread_node;
138 
139     struct completion *vfork_done;      /* for vfork() */
140     int __user *set_child_tid;      /* CLONE_CHILD_SETTID */
141     int __user *clear_child_tid;        /* CLONE_CHILD_CLEARTID */
142 
143     cputime_t utime, stime, utimescaled, stimescaled;
144     cputime_t gtime;
145     struct prev_cputime prev_cputime;
146 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
147     seqcount_t vtime_seqcount;
148     unsigned long long vtime_snap;
149     enum {
150         /* Task is sleeping or running in a CPU with VTIME inactive */
151         VTIME_INACTIVE = 0,
152         /* Task runs in userspace in a CPU with VTIME active */
153         VTIME_USER,
154         /* Task runs in kernelspace in a CPU with VTIME active */
155         VTIME_SYS,
156     } vtime_snap_whence;
157 #endif
158 
159 #ifdef CONFIG_NO_HZ_FULL
160     atomic_t tick_dep_mask;
161 #endif
162     unsigned long nvcsw, nivcsw; /* context switch counts */
163     u64 start_time;     /* monotonic time in nsec */
164     u64 real_start_time;    /* boot based time in nsec */
165 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
166     unsigned long min_flt, maj_flt;
167 
168     struct task_cputime cputime_expires;
169     struct list_head cpu_timers[3];
170 
171 /* process credentials */
172     const struct cred __rcu *real_cred; /* objective and real subjective task
173                      * credentials (COW) */
174     const struct cred __rcu *cred;  /* effective (overridable) subjective task
175                      * credentials (COW) */
176     char comm[TASK_COMM_LEN]; /* executable name excluding path
177                      - access with [gs]et_task_comm (which lock
178                        it with task_lock())
179                      - initialized normally by setup_new_exec */
180 /* file system info */
181     struct nameidata *nameidata;
182 #ifdef CONFIG_SYSVIPC
183 /* ipc stuff */
184     struct sysv_sem sysvsem;
185     struct sysv_shm sysvshm;
186 #endif
187 #ifdef CONFIG_DETECT_HUNG_TASK
188 /* hung task detection */
189     unsigned long last_switch_count;
190 #endif
191 /* filesystem information */
192     struct fs_struct *fs;
193 /* open file information */
194     struct files_struct *files;
195 /* namespaces */
196     struct nsproxy *nsproxy;
197 /* signal handlers */
198     struct signal_struct *signal;
199     struct sighand_struct *sighand;
200 
201     sigset_t blocked, real_blocked;
202     sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
203     struct sigpending pending;
204 
205     unsigned long sas_ss_sp;
206     size_t sas_ss_size;
207     unsigned sas_ss_flags;
208 
209     struct callback_head *task_works;
210 
211     struct audit_context *audit_context;
212 #ifdef CONFIG_AUDITSYSCALL
213     kuid_t loginuid;
214     unsigned int sessionid;
215 #endif
216     struct seccomp seccomp;
217 
218 /* Thread group tracking */
219     u32 parent_exec_id;
220     u32 self_exec_id;
221 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
222  * mempolicy */
223     spinlock_t alloc_lock;
224 
225     /* Protection of the PI data structures: */
226     raw_spinlock_t pi_lock;
227 
228     struct wake_q_node wake_q;
229 
230 #ifdef CONFIG_RT_MUTEXES
231     /* PI waiters blocked on a rt_mutex held by this task */
232     struct rb_root pi_waiters;
233     struct rb_node *pi_waiters_leftmost;
234     /* Deadlock detection and priority inheritance handling */
235     struct rt_mutex_waiter *pi_blocked_on;
236 #endif
237 
238 #ifdef CONFIG_DEBUG_MUTEXES
239     /* mutex deadlock detection */
240     struct mutex_waiter *blocked_on;
241 #endif
242 #ifdef CONFIG_TRACE_IRQFLAGS
243     unsigned int irq_events;
244     unsigned long hardirq_enable_ip;
245     unsigned long hardirq_disable_ip;
246     unsigned int hardirq_enable_event;
247     unsigned int hardirq_disable_event;
248     int hardirqs_enabled;
249     int hardirq_context;
250     unsigned long softirq_disable_ip;
251     unsigned long softirq_enable_ip;
252     unsigned int softirq_disable_event;
253     unsigned int softirq_enable_event;
254     int softirqs_enabled;
255     int softirq_context;
256 #endif
257 #ifdef CONFIG_LOCKDEP
258 # define MAX_LOCK_DEPTH 48UL
259     u64 curr_chain_key;
260     int lockdep_depth;
261     unsigned int lockdep_recursion;
262     struct held_lock held_locks[MAX_LOCK_DEPTH];
263     gfp_t lockdep_reclaim_gfp;
264 #endif
265 #ifdef CONFIG_UBSAN
266     unsigned int in_ubsan;
267 #endif
268 
269 /* journalling filesystem info */
270     void *journal_info;
271 
272 /* stacked block device info */
273     struct bio_list *bio_list;
274 
275 #ifdef CONFIG_BLOCK
276 /* stack plugging */
277     struct blk_plug *plug;
278 #endif
279 
280 /* VM state */
281     struct reclaim_state *reclaim_state;
282 
283     struct backing_dev_info *backing_dev_info;
284 
285     struct io_context *io_context;
286 
287     unsigned long ptrace_message;
288     siginfo_t *last_siginfo; /* For ptrace use.  */
289     struct task_io_accounting ioac;
290 #if defined(CONFIG_TASK_XACCT)
291     u64 acct_rss_mem1;  /* accumulated rss usage */
292     u64 acct_vm_mem1;   /* accumulated virtual memory usage */
293     cputime_t acct_timexpd; /* stime + utime since last update */
294 #endif
295 #ifdef CONFIG_CPUSETS
296     nodemask_t mems_allowed;    /* Protected by alloc_lock */
297     seqcount_t mems_allowed_seq;    /* Seqence no to catch updates */
298     int cpuset_mem_spread_rotor;
299     int cpuset_slab_spread_rotor;
300 #endif
301 #ifdef CONFIG_CGROUPS
302     /* Control Group info protected by css_set_lock */
303     struct css_set __rcu *cgroups;
304     /* cg_list protected by css_set_lock and tsk->alloc_lock */
305     struct list_head cg_list;
306 #endif
307 #ifdef CONFIG_FUTEX
308     struct robust_list_head __user *robust_list;
309 #ifdef CONFIG_COMPAT
310     struct compat_robust_list_head __user *compat_robust_list;
311 #endif
312     struct list_head pi_state_list;
313     struct futex_pi_state *pi_state_cache;
314 #endif
315 #ifdef CONFIG_PERF_EVENTS
316     struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
317     struct mutex perf_event_mutex;
318     struct list_head perf_event_list;
319 #endif
320 #ifdef CONFIG_DEBUG_PREEMPT
321     unsigned long preempt_disable_ip;
322 #endif
323 #ifdef CONFIG_NUMA
324     struct mempolicy *mempolicy;    /* Protected by alloc_lock */
325     short il_next;
326     short pref_node_fork;
327 #endif
328 #ifdef CONFIG_NUMA_BALANCING
329     int numa_scan_seq;
330     unsigned int numa_scan_period;
331     unsigned int numa_scan_period_max;
332     int numa_preferred_nid;
333     unsigned long numa_migrate_retry;
334     u64 node_stamp;         /* migration stamp  */
335     u64 last_task_numa_placement;
336     u64 last_sum_exec_runtime;
337     struct callback_head numa_work;
338 
339     struct list_head numa_entry;
340     struct numa_group *numa_group;
341 
342     /*
343      * numa_faults is an array split into four regions:
344      * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
345      * in this precise order.
346      *
347      * faults_memory: Exponential decaying average of faults on a per-node
348      * basis. Scheduling placement decisions are made based on these
349      * counts. The values remain static for the duration of a PTE scan.
350      * faults_cpu: Track the nodes the process was running on when a NUMA
351      * hinting fault was incurred.
352      * faults_memory_buffer and faults_cpu_buffer: Record faults per node
353      * during the current scan window. When the scan completes, the counts
354      * in faults_memory and faults_cpu decay and these values are copied.
355      */
356     unsigned long *numa_faults;
357     unsigned long total_numa_faults;
358 
359     /*
360      * numa_faults_locality tracks if faults recorded during the last
361      * scan window were remote/local or failed to migrate. The task scan
362      * period is adapted based on the locality of the faults with different
363      * weights depending on whether they were shared or private faults
364      */
365     unsigned long numa_faults_locality[3];
366 
367     unsigned long numa_pages_migrated;
368 #endif /* CONFIG_NUMA_BALANCING */
369 
370 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
371     struct tlbflush_unmap_batch tlb_ubc;
372 #endif
373 
374     struct rcu_head rcu;
375 
376     /*
377      * cache last used pipe for splice
378      */
379     struct pipe_inode_info *splice_pipe;
380 
381     struct page_frag task_frag;
382 
383 #ifdef  CONFIG_TASK_DELAY_ACCT
384     struct task_delay_info *delays;
385 #endif
386 #ifdef CONFIG_FAULT_INJECTION
387     int make_it_fail;
388 #endif
389     /*
390      * when (nr_dirtied >= nr_dirtied_pause), it‘s time to call
391      * balance_dirty_pages() for some dirty throttling pause
392      */
393     int nr_dirtied;
394     int nr_dirtied_pause;
395     unsigned long dirty_paused_when; /* start of a write-and-pause period */
396 
397 #ifdef CONFIG_LATENCYTOP
398     int latency_record_count;
399     struct latency_record latency_record[LT_SAVECOUNT];
400 #endif
401     /*
402      * time slack values; these are used to round up poll() and
403      * select() etc timeout values. These are in nanoseconds.
404      */
405     u64 timer_slack_ns;
406     u64 default_timer_slack_ns;
407 
408 #ifdef CONFIG_KASAN
409     unsigned int kasan_depth;
410 #endif
411 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
412     /* Index of current stored address in ret_stack */
413     int curr_ret_stack;
414     /* Stack of return addresses for return function tracing */
415     struct ftrace_ret_stack *ret_stack;
416     /* time stamp for last schedule */
417     unsigned long long ftrace_timestamp;
418     /*
419      * Number of functions that haven‘t been traced
420      * because of depth overrun.
421      */
422     atomic_t trace_overrun;
423     /* Pause for the tracing */
424     atomic_t tracing_graph_pause;
425 #endif
426 #ifdef CONFIG_TRACING
427     /* state flags for use by tracers */
428     unsigned long trace;
429     /* bitmask and counter of trace recursion */
430     unsigned long trace_recursion;
431 #endif /* CONFIG_TRACING */
432 #ifdef CONFIG_KCOV
433     /* Coverage collection mode enabled for this task (0 if disabled). */
434     enum kcov_mode kcov_mode;
435     /* Size of the kcov_area. */
436     unsigned    kcov_size;
437     /* Buffer for coverage collection. */
438     void        *kcov_area;
439     /* kcov desciptor wired with this task or NULL. */
440     struct kcov *kcov;
441 #endif
442 #ifdef CONFIG_MEMCG
443     struct mem_cgroup *memcg_in_oom;
444     gfp_t memcg_oom_gfp_mask;
445     int memcg_oom_order;
446 
447     /* number of pages to reclaim on returning to userland */
448     unsigned int memcg_nr_pages_over_high;
449 #endif
450 #ifdef CONFIG_UPROBES
451     struct uprobe_task *utask;
452 #endif
453 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
454     unsigned int    sequential_io;
455     unsigned int    sequential_io_avg;
456 #endif
457 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
458     unsigned long   task_state_change;
459 #endif
460     int pagefault_disabled;
461 #ifdef CONFIG_MMU
462     struct task_struct *oom_reaper_list;
463 #endif
464 /* CPU-specific state of this task */
465     struct thread_struct thread;
466 /*
467  * WARNING: on x86, ‘thread_struct‘ contains a variable-sized
468  * structure.  It *MUST* be at the end of ‘task_struct‘.
469  *
470  * Do not put anything below here!
471  */
472 };

 

Linux下的进程控制块——task_struct

标签:

原文地址:http://www.cnblogs.com/qiaopei/p/5551141.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!