2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/capability.h>
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/ptrace.h>
18 #include <linux/security.h>
19 #include <linux/signal.h>
20 #include <linux/uio.h>
21 #include <linux/audit.h>
22 #include <linux/pid_namespace.h>
23 #include <linux/syscalls.h>
24 #include <linux/uaccess.h>
25 #include <linux/regset.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/cn_proc.h>
28 #include <linux/compat.h>
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/sys_calls.h>
33 static int ptrace_trapping_sleep_fn(void *flags)
40 * ptrace a task: make the debugger its new parent and
41 * move it to the ptrace list.
43 * Must be called with the tasklist lock write-held.
45 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
47 BUG_ON(!list_empty(&child->ptrace_entry));
48 list_add(&child->ptrace_entry, &new_parent->ptraced);
49 child->parent = new_parent;
53 * __ptrace_unlink - unlink ptracee and restore its execution state
54 * @child: ptracee to be unlinked
56 * Remove @child from the ptrace list, move it back to the original parent,
57 * and restore the execution state so that it conforms to the group stop
60 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
61 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
62 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
63 * If the ptracer is exiting, the ptracee can be in any state.
65 * After detach, the ptracee should be in a state which conforms to the
66 * group stop. If the group is stopped or in the process of stopping, the
67 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
68 * up from TASK_TRACED.
70 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
71 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
72 * to but in the opposite direction of what happens while attaching to a
73 * stopped task. However, in this direction, the intermediate RUNNING
74 * state is not hidden even from the current ptracer and if it immediately
75 * re-attaches and performs a WNOHANG wait(2), it may fail.
78 * write_lock_irq(tasklist_lock)
80 void __ptrace_unlink(struct task_struct *child)
82 BUG_ON(!child->ptrace);
85 child->parent = child->real_parent;
86 list_del_init(&child->ptrace_entry);
88 spin_lock(&child->sighand->siglock);
91 * Clear all pending traps and TRAPPING. TRAPPING should be
92 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
94 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
95 task_clear_jobctl_trapping(child);
98 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
101 if (!(child->flags & PF_EXITING) &&
102 (child->signal->flags & SIGNAL_STOP_STOPPED ||
103 child->signal->group_stop_count)) {
104 child->jobctl |= JOBCTL_STOP_PENDING;
107 * This is only possible if this thread was cloned by the
108 * traced task running in the stopped group, set the signal
109 * for the future reports.
110 * FIXME: we should change ptrace_init_task() to handle this
113 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
114 child->jobctl |= SIGSTOP;
118 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
119 * @child in the butt. Note that @resume should be used iff @child
120 * is in TASK_TRACED; otherwise, we might unduly disrupt
121 * TASK_KILLABLE sleeps.
123 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
124 ptrace_signal_wake_up(child, true);
126 spin_unlock(&child->sighand->siglock);
129 /* Ensure that nothing can wake it up, even SIGKILL */
130 static bool ptrace_freeze_traced(struct task_struct *task)
134 /* Lockless, nobody but us can set this flag */
135 if (task->jobctl & JOBCTL_LISTENING)
138 spin_lock_irq(&task->sighand->siglock);
139 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
140 task->state = __TASK_TRACED;
143 spin_unlock_irq(&task->sighand->siglock);
148 static void ptrace_unfreeze_traced(struct task_struct *task)
150 if (task->state != __TASK_TRACED)
153 WARN_ON(!task->ptrace || task->parent != current);
155 spin_lock_irq(&task->sighand->siglock);
156 if (__fatal_signal_pending(task))
157 wake_up_state(task, __TASK_TRACED);
159 task->state = TASK_TRACED;
160 spin_unlock_irq(&task->sighand->siglock);
164 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
165 * @child: ptracee to check for
166 * @ignore_state: don't check whether @child is currently %TASK_TRACED
168 * Check whether @child is being ptraced by %current and ready for further
169 * ptrace operations. If @ignore_state is %false, @child also should be in
170 * %TASK_TRACED state and on return the child is guaranteed to be traced
171 * and not executing. If @ignore_state is %true, @child can be in any
175 * Grabs and releases tasklist_lock and @child->sighand->siglock.
178 * 0 on success, -ESRCH if %child is not ready.
180 static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
185 * We take the read lock around doing both checks to close a
186 * possible race where someone else was tracing our child and
187 * detached between these two checks. After this locked check,
188 * we are sure that this is our traced child and that can only
189 * be changed by us so it's not changing right after this.
191 read_lock(&tasklist_lock);
192 if (child->ptrace && child->parent == current) {
193 WARN_ON(child->state == __TASK_TRACED);
195 * child->sighand can't be NULL, release_task()
196 * does ptrace_unlink() before __exit_signal().
198 if (ignore_state || ptrace_freeze_traced(child))
201 read_unlock(&tasklist_lock);
203 if (!ret && !ignore_state) {
204 if (!wait_task_inactive(child, __TASK_TRACED)) {
206 * This can only happen if may_ptrace_stop() fails and
207 * ptrace_stop() changes ->state back to TASK_RUNNING,
208 * so we should not worry about leaking __TASK_TRACED.
210 WARN_ON(child->state == __TASK_TRACED);
218 static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
220 if (mode & PTRACE_MODE_NOAUDIT)
221 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
223 return has_ns_capability(current, ns, CAP_SYS_PTRACE);
226 /* Returns 0 on success, -errno on denial. */
227 static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
229 const struct cred *cred = current_cred(), *tcred;
231 /* May we inspect the given task?
232 * This check is used both for attaching with ptrace
233 * and for allowing access to sensitive information in /proc.
235 * ptrace_attach denies several cases that /proc allows
236 * because setting up the necessary parent/child relationship
237 * or halting the specified task is impossible.
240 /* Don't let security modules deny introspection */
241 if (same_thread_group(task, current))
244 tcred = __task_cred(task);
245 if (uid_eq(cred->uid, tcred->euid) &&
246 uid_eq(cred->uid, tcred->suid) &&
247 uid_eq(cred->uid, tcred->uid) &&
248 gid_eq(cred->gid, tcred->egid) &&
249 gid_eq(cred->gid, tcred->sgid) &&
250 gid_eq(cred->gid, tcred->gid))
252 if (ptrace_has_cap(tcred->user_ns, mode))
260 dumpable = get_dumpable(task->mm);
262 if (dumpable != SUID_DUMP_USER &&
263 !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
269 return security_ptrace_access_check(task, mode);
272 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
276 err = __ptrace_may_access(task, mode);
281 static int ptrace_attach(struct task_struct *task, long request,
285 bool seize = (request == PTRACE_SEIZE);
292 if (flags & ~(unsigned long)PTRACE_O_MASK)
294 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
302 if (unlikely(task->flags & PF_KTHREAD))
304 if (same_thread_group(task, current))
308 * Protect exec's credential calculations against our interference;
309 * SUID, SGID and LSM creds get determined differently
312 retval = -ERESTARTNOINTR;
313 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
317 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
322 write_lock_irq(&tasklist_lock);
324 if (unlikely(task->exit_state))
325 goto unlock_tasklist;
327 goto unlock_tasklist;
332 if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
333 flags |= PT_PTRACE_CAP;
335 task->ptrace = flags;
337 __ptrace_link(task, current);
339 /* SEIZE doesn't trap tracee on attach */
341 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
343 spin_lock(&task->sighand->siglock);
346 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
347 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
348 * will be cleared if the child completes the transition or any
349 * event which clears the group stop states happens. We'll wait
350 * for the transition to complete before returning from this
353 * This hides STOPPED -> RUNNING -> TRACED transition from the
354 * attaching thread but a different thread in the same group can
355 * still observe the transient RUNNING state. IOW, if another
356 * thread's WNOHANG wait(2) on the stopped tracee races against
357 * ATTACH, the wait(2) may fail due to the transient RUNNING.
359 * The following task_is_stopped() test is safe as both transitions
360 * in and out of STOPPED are protected by siglock.
362 if (task_is_stopped(task) &&
363 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
364 signal_wake_up_state(task, __TASK_STOPPED);
366 spin_unlock(&task->sighand->siglock);
370 write_unlock_irq(&tasklist_lock);
372 mutex_unlock(&task->signal->cred_guard_mutex);
375 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
376 ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
377 proc_ptrace_connector(task, PTRACE_ATTACH);
384 * ptrace_traceme -- helper for PTRACE_TRACEME
386 * Performs checks and sets PT_PTRACED.
387 * Should be used by all ptrace implementations for PTRACE_TRACEME.
389 static int ptrace_traceme(void)
393 write_lock_irq(&tasklist_lock);
394 /* Are we already being traced? */
395 if (!current->ptrace) {
396 ret = security_ptrace_traceme(current->parent);
398 * Check PF_EXITING to ensure ->real_parent has not passed
399 * exit_ptrace(). Otherwise we don't report the error but
400 * pretend ->real_parent untraces us right after return.
402 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
403 current->ptrace = PT_PTRACED;
404 __ptrace_link(current, current->real_parent);
407 write_unlock_irq(&tasklist_lock);
413 * Called with irqs disabled, returns true if childs should reap themselves.
415 static int ignoring_children(struct sighand_struct *sigh)
418 spin_lock(&sigh->siglock);
419 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
420 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
421 spin_unlock(&sigh->siglock);
426 * Called with tasklist_lock held for writing.
427 * Unlink a traced task, and clean it up if it was a traced zombie.
428 * Return true if it needs to be reaped with release_task().
429 * (We can't call release_task() here because we already hold tasklist_lock.)
431 * If it's a zombie, our attachedness prevented normal parent notification
432 * or self-reaping. Do notification now if it would have happened earlier.
433 * If it should reap itself, return true.
435 * If it's our own child, there is no notification to do. But if our normal
436 * children self-reap, then this child was prevented by ptrace and we must
437 * reap it now, in that case we must also wake up sub-threads sleeping in
440 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
446 if (p->exit_state != EXIT_ZOMBIE)
449 dead = !thread_group_leader(p);
451 if (!dead && thread_group_empty(p)) {
452 if (!same_thread_group(p->real_parent, tracer))
453 dead = do_notify_parent(p, p->exit_signal);
454 else if (ignoring_children(tracer->sighand)) {
455 __wake_up_parent(p, tracer);
459 /* Mark it as in the process of being reaped. */
461 p->exit_state = EXIT_DEAD;
465 static int ptrace_detach(struct task_struct *child, unsigned int data)
469 if (!valid_signal(data))
472 /* Architecture-specific hardware disable .. */
473 ptrace_disable(child);
474 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
476 write_lock_irq(&tasklist_lock);
478 * This child can be already killed. Make sure de_thread() or
479 * our sub-thread doing do_wait() didn't do release_task() yet.
482 child->exit_code = data;
483 dead = __ptrace_detach(current, child);
485 write_unlock_irq(&tasklist_lock);
487 proc_ptrace_connector(child, PTRACE_DETACH);
495 * Detach all tasks we were using ptrace on. Called with tasklist held
496 * for writing, and returns with it held too. But note it can release
497 * and reacquire the lock.
499 void exit_ptrace(struct task_struct *tracer)
500 __releases(&tasklist_lock)
501 __acquires(&tasklist_lock)
503 struct task_struct *p, *n;
504 LIST_HEAD(ptrace_dead);
506 if (likely(list_empty(&tracer->ptraced)))
509 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
510 if (unlikely(p->ptrace & PT_EXITKILL))
511 send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
513 if (__ptrace_detach(tracer, p))
514 list_add(&p->ptrace_entry, &ptrace_dead);
517 write_unlock_irq(&tasklist_lock);
518 BUG_ON(!list_empty(&tracer->ptraced));
520 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
521 list_del_init(&p->ptrace_entry);
525 write_lock_irq(&tasklist_lock);
528 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
534 int this_len, retval;
536 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
537 retval = access_process_vm(tsk, src, buf, this_len, 0);
543 if (copy_to_user(dst, buf, retval))
553 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
559 int this_len, retval;
561 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
562 if (copy_from_user(buf, src, this_len))
564 retval = access_process_vm(tsk, dst, buf, this_len, 1);
578 static int ptrace_setoptions(struct task_struct *child, unsigned long data)
582 if (data & ~(unsigned long)PTRACE_O_MASK)
585 /* Avoid intermediate state when all opts are cleared */
586 flags = child->ptrace;
587 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
588 flags |= (data << PT_OPT_FLAG_SHIFT);
589 child->ptrace = flags;
594 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
599 if (lock_task_sighand(child, &flags)) {
601 if (likely(child->last_siginfo != NULL)) {
602 *info = *child->last_siginfo;
605 unlock_task_sighand(child, &flags);
610 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
615 if (lock_task_sighand(child, &flags)) {
617 if (likely(child->last_siginfo != NULL)) {
618 *child->last_siginfo = *info;
621 unlock_task_sighand(child, &flags);
626 static int ptrace_peek_siginfo(struct task_struct *child,
630 struct ptrace_peeksiginfo_args arg;
631 struct sigpending *pending;
635 ret = copy_from_user(&arg, (void __user *) addr,
636 sizeof(struct ptrace_peeksiginfo_args));
640 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
641 return -EINVAL; /* unknown flags */
646 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
647 pending = &child->signal->shared_pending;
649 pending = &child->pending;
651 for (i = 0; i < arg.nr; ) {
653 s32 off = arg.off + i;
655 spin_lock_irq(&child->sighand->siglock);
656 list_for_each_entry(q, &pending->list, list) {
658 copy_siginfo(&info, &q->info);
662 spin_unlock_irq(&child->sighand->siglock);
664 if (off >= 0) /* beyond the end of the list */
668 if (unlikely(is_compat_task())) {
669 compat_siginfo_t __user *uinfo = compat_ptr(data);
671 if (copy_siginfo_to_user32(uinfo, &info) ||
672 __put_user(info.si_code, &uinfo->si_code)) {
680 siginfo_t __user *uinfo = (siginfo_t __user *) data;
682 if (copy_siginfo_to_user(uinfo, &info) ||
683 __put_user(info.si_code, &uinfo->si_code)) {
689 data += sizeof(siginfo_t);
692 if (signal_pending(current))
704 #ifdef PTRACE_SINGLESTEP
705 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
707 #define is_singlestep(request) 0
710 #ifdef PTRACE_SINGLEBLOCK
711 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
713 #define is_singleblock(request) 0
717 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
719 #define is_sysemu_singlestep(request) 0
722 static int ptrace_resume(struct task_struct *child, long request,
727 if (!valid_signal(data))
730 if (request == PTRACE_SYSCALL)
731 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
733 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
735 #ifdef TIF_SYSCALL_EMU
736 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
737 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
739 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
742 if (is_singleblock(request)) {
743 if (unlikely(!arch_has_block_step()))
745 user_enable_block_step(child);
746 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
747 if (unlikely(!arch_has_single_step()))
749 user_enable_single_step(child);
751 user_disable_single_step(child);
755 * Change ->exit_code and ->state under siglock to avoid the race
756 * with wait_task_stopped() in between; a non-zero ->exit_code will
757 * wrongly look like another report from tracee.
759 * Note that we need siglock even if ->exit_code == data and/or this
760 * status was not reported yet, the new status must not be cleared by
761 * wait_task_stopped() after resume.
763 * If data == 0 we do not care if wait_task_stopped() reports the old
764 * status and clears the code too; this can't race with the tracee, it
765 * takes siglock after resume.
767 need_siglock = data && !thread_group_empty(current);
769 spin_lock_irq(&child->sighand->siglock);
770 child->exit_code = data;
771 wake_up_state(child, __TASK_TRACED);
773 spin_unlock_irq(&child->sighand->siglock);
778 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
780 static const struct user_regset *
781 find_regset(const struct user_regset_view *view, unsigned int type)
783 const struct user_regset *regset;
786 for (n = 0; n < view->n; ++n) {
787 regset = view->regsets + n;
788 if (regset->core_note_type == type)
795 static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
798 const struct user_regset_view *view = task_user_regset_view(task);
799 const struct user_regset *regset = find_regset(view, type);
802 if (!regset || (kiov->iov_len % regset->size) != 0)
805 regset_no = regset - view->regsets;
806 kiov->iov_len = min(kiov->iov_len,
807 (__kernel_size_t) (regset->n * regset->size));
809 if (req == PTRACE_GETREGSET)
810 return copy_regset_to_user(task, view, regset_no, 0,
811 kiov->iov_len, kiov->iov_base);
813 return copy_regset_from_user(task, view, regset_no, 0,
814 kiov->iov_len, kiov->iov_base);
818 * This is declared in linux/regset.h and defined in machine-dependent
819 * code. We put the export here, near the primary machine-neutral use,
820 * to ensure no machine forgets it.
822 EXPORT_SYMBOL_GPL(task_user_regset_view);
825 int ptrace_request(struct task_struct *child, long request,
826 unsigned long addr, unsigned long data)
828 bool seized = child->ptrace & PT_SEIZED;
830 siginfo_t siginfo, *si;
831 void __user *datavp = (void __user *) data;
832 unsigned long __user *datalp = datavp;
836 case PTRACE_PEEKTEXT:
837 case PTRACE_PEEKDATA:
838 return generic_ptrace_peekdata(child, addr, data);
839 case PTRACE_POKETEXT:
840 case PTRACE_POKEDATA:
841 return generic_ptrace_pokedata(child, addr, data);
843 #ifdef PTRACE_OLDSETOPTIONS
844 case PTRACE_OLDSETOPTIONS:
846 case PTRACE_SETOPTIONS:
847 ret = ptrace_setoptions(child, data);
849 case PTRACE_GETEVENTMSG:
850 ret = put_user(child->ptrace_message, datalp);
853 case PTRACE_PEEKSIGINFO:
854 ret = ptrace_peek_siginfo(child, addr, data);
857 case PTRACE_GETSIGINFO:
858 ret = ptrace_getsiginfo(child, &siginfo);
860 ret = copy_siginfo_to_user(datavp, &siginfo);
863 case PTRACE_SETSIGINFO:
864 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
867 ret = ptrace_setsiginfo(child, &siginfo);
870 case PTRACE_INTERRUPT:
872 * Stop tracee without any side-effect on signal or job
873 * control. At least one trap is guaranteed to happen
874 * after this request. If @child is already trapped, the
875 * current trap is not disturbed and another trap will
876 * happen after the current trap is ended with PTRACE_CONT.
878 * The actual trap might not be PTRACE_EVENT_STOP trap but
879 * the pending condition is cleared regardless.
881 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
885 * INTERRUPT doesn't disturb existing trap sans one
886 * exception. If ptracer issued LISTEN for the current
887 * STOP, this INTERRUPT should clear LISTEN and re-trap
890 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
891 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
893 unlock_task_sighand(child, &flags);
899 * Listen for events. Tracee must be in STOP. It's not
900 * resumed per-se but is not considered to be in TRACED by
901 * wait(2) or ptrace(2). If an async event (e.g. group
902 * stop state change) happens, tracee will enter STOP trap
903 * again. Alternatively, ptracer can issue INTERRUPT to
904 * finish listening and re-trap tracee into STOP.
906 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
909 si = child->last_siginfo;
910 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
911 child->jobctl |= JOBCTL_LISTENING;
913 * If NOTIFY is set, it means event happened between
914 * start of this trap and now. Trigger re-trap.
916 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
917 ptrace_signal_wake_up(child, true);
920 unlock_task_sighand(child, &flags);
923 case PTRACE_DETACH: /* detach a process that was attached. */
924 ret = ptrace_detach(child, data);
927 #ifdef CONFIG_BINFMT_ELF_FDPIC
928 case PTRACE_GETFDPIC: {
929 struct mm_struct *mm = get_task_mm(child);
930 unsigned long tmp = 0;
937 case PTRACE_GETFDPIC_EXEC:
938 tmp = mm->context.exec_fdpic_loadmap;
940 case PTRACE_GETFDPIC_INTERP:
941 tmp = mm->context.interp_fdpic_loadmap;
948 ret = put_user(tmp, datalp);
953 #ifdef PTRACE_SINGLESTEP
954 case PTRACE_SINGLESTEP:
956 #ifdef PTRACE_SINGLEBLOCK
957 case PTRACE_SINGLEBLOCK:
961 case PTRACE_SYSEMU_SINGLESTEP:
965 return ptrace_resume(child, request, data);
968 if (child->exit_state) /* already dead */
970 return ptrace_resume(child, request, SIGKILL);
972 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
973 case PTRACE_GETREGSET:
974 case PTRACE_SETREGSET:
977 struct iovec __user *uiov = datavp;
979 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
982 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
983 __get_user(kiov.iov_len, &uiov->iov_len))
986 ret = ptrace_regset(child, request, addr, &kiov);
988 ret = __put_user(kiov.iov_len, &uiov->iov_len);
999 static struct task_struct *ptrace_get_task_struct(pid_t pid)
1001 struct task_struct *child;
1004 child = find_task_by_vpid(pid);
1006 get_task_struct(child);
1010 return ERR_PTR(-ESRCH);
1014 #ifndef arch_ptrace_attach
1015 #define arch_ptrace_attach(child) do { } while (0)
1018 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1019 unsigned long, data)
1021 struct task_struct *child;
1024 if (request == PTRACE_TRACEME) {
1025 ret = ptrace_traceme();
1027 arch_ptrace_attach(current);
1031 child = ptrace_get_task_struct(pid);
1032 if (IS_ERR(child)) {
1033 ret = PTR_ERR(child);
1037 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1038 ret = ptrace_attach(child, request, addr, data);
1040 * Some architectures need to do book-keeping after
1044 arch_ptrace_attach(child);
1045 goto out_put_task_struct;
1048 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1049 request == PTRACE_INTERRUPT);
1051 goto out_put_task_struct;
1053 ret = arch_ptrace(child, request, addr, data);
1054 if (ret || request != PTRACE_DETACH)
1055 ptrace_unfreeze_traced(child);
1057 out_put_task_struct:
1058 put_task_struct(child);
1063 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1069 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
1070 if (copied != sizeof(tmp))
1072 return put_user(tmp, (unsigned long __user *)data);
1075 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1080 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
1081 return (copied == sizeof(data)) ? 0 : -EIO;
1084 #if defined CONFIG_COMPAT
1085 #include <linux/compat.h>
1087 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1088 compat_ulong_t addr, compat_ulong_t data)
1090 compat_ulong_t __user *datap = compat_ptr(data);
1091 compat_ulong_t word;
1096 case PTRACE_PEEKTEXT:
1097 case PTRACE_PEEKDATA:
1098 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
1099 if (ret != sizeof(word))
1102 ret = put_user(word, datap);
1105 case PTRACE_POKETEXT:
1106 case PTRACE_POKEDATA:
1107 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
1108 ret = (ret != sizeof(data) ? -EIO : 0);
1111 case PTRACE_GETEVENTMSG:
1112 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1115 case PTRACE_GETSIGINFO:
1116 ret = ptrace_getsiginfo(child, &siginfo);
1118 ret = copy_siginfo_to_user32(
1119 (struct compat_siginfo __user *) datap,
1123 case PTRACE_SETSIGINFO:
1124 memset(&siginfo, 0, sizeof siginfo);
1125 if (copy_siginfo_from_user32(
1126 &siginfo, (struct compat_siginfo __user *) datap))
1129 ret = ptrace_setsiginfo(child, &siginfo);
1131 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1132 case PTRACE_GETREGSET:
1133 case PTRACE_SETREGSET:
1136 struct compat_iovec __user *uiov =
1137 (struct compat_iovec __user *) datap;
1141 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1144 if (__get_user(ptr, &uiov->iov_base) ||
1145 __get_user(len, &uiov->iov_len))
1148 kiov.iov_base = compat_ptr(ptr);
1151 ret = ptrace_regset(child, request, addr, &kiov);
1153 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1159 ret = ptrace_request(child, request, addr, data);
1165 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
1166 compat_long_t addr, compat_long_t data)
1168 struct task_struct *child;
1171 if (request == PTRACE_TRACEME) {
1172 ret = ptrace_traceme();
1176 child = ptrace_get_task_struct(pid);
1177 if (IS_ERR(child)) {
1178 ret = PTR_ERR(child);
1182 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1183 ret = ptrace_attach(child, request, addr, data);
1185 * Some architectures need to do book-keeping after
1189 arch_ptrace_attach(child);
1190 goto out_put_task_struct;
1193 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1194 request == PTRACE_INTERRUPT);
1196 ret = compat_arch_ptrace(child, request, addr, data);
1197 if (ret || request != PTRACE_DETACH)
1198 ptrace_unfreeze_traced(child);
1201 out_put_task_struct:
1202 put_task_struct(child);
1206 #endif /* CONFIG_COMPAT */
1208 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1209 int ptrace_get_breakpoints(struct task_struct *tsk)
1211 if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt))
1217 void ptrace_put_breakpoints(struct task_struct *tsk)
1219 if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt))
1220 flush_ptrace_hw_breakpoint(tsk);
1222 #endif /* CONFIG_HAVE_HW_BREAKPOINT */