Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 89877e42827f16fa5f86b1df0c2860b1 > files > 2371

kernel-2.6.18-128.1.10.el5.src.rpm

 Documentation/utrace.txt                |  579 ++++++++++
 arch/alpha/kernel/asm-offsets.c         |    2 
 arch/alpha/kernel/entry.S               |    4 
 arch/arm/kernel/ptrace.c                |   36 -
 arch/arm26/kernel/ptrace.c              |   32 -
 arch/frv/kernel/ptrace.c                |   15 
 arch/i386/kernel/entry.S                |    7 
 arch/i386/kernel/i387.c                 |  143 +-
 arch/i386/kernel/process.c              |    3 
 arch/i386/kernel/ptrace.c               |  863 ++++++++------
 arch/i386/kernel/signal.c               |   37 -
 arch/i386/kernel/vm86.c                 |    7 
 arch/ia64/ia32/ia32_entry.S             |    2 
 arch/ia64/ia32/sys_ia32.c               |  537 +++++++++
 arch/ia64/kernel/asm-offsets.c          |    2 
 arch/ia64/kernel/fsys.S                 |   16 
 arch/ia64/kernel/mca.c                  |    2 
 arch/ia64/kernel/ptrace.c               | 1680 ++++++++++++++--------------
 arch/ia64/kernel/signal.c               |    4 
 arch/mips/kernel/ptrace.c               |   21 
 arch/mips/kernel/sysirix.c              |    2 
 arch/powerpc/kernel/Makefile            |    4 
 arch/powerpc/kernel/asm-offsets.c       |    2 
 arch/powerpc/kernel/process.c           |    5 
 arch/powerpc/kernel/ptrace-common.h     |  161 ---
 arch/powerpc/kernel/ptrace.c            |  959 ++++++++++------
 arch/powerpc/kernel/ptrace32.c          |  436 -------
 arch/powerpc/kernel/signal_32.c         |   55 +
 arch/powerpc/kernel/signal_64.c         |    3 
 arch/powerpc/kernel/sys_ppc32.c         |    5 
 arch/powerpc/lib/sstep.c                |    3 
 arch/powerpc/platforms/cell/spufs/run.c |    2 
 arch/ppc/kernel/asm-offsets.c           |    2 
 arch/s390/kernel/Makefile               |    2 
 arch/s390/kernel/compat_linux.c         |    3 
 arch/s390/kernel/compat_signal.c        |    5 
 arch/s390/kernel/process.c              |    3 
 arch/s390/kernel/ptrace.c               | 1073 ++++++++----------
 arch/s390/kernel/signal.c               |    3 
 arch/s390/kernel/traps.c                |    6 
 arch/sparc64/Makefile                   |    0 
 arch/sparc64/kernel/Makefile            |    2 
 arch/sparc64/kernel/binfmt_aout32.c     |    2 
 arch/sparc64/kernel/entry.S             |    6 
 arch/sparc64/kernel/process.c           |    3 
 arch/sparc64/kernel/ptrace.c            | 1221 +++++++++++---------
 arch/sparc64/kernel/signal.c            |    2 
 arch/sparc64/kernel/signal32.c          |    2 
 arch/sparc64/kernel/sys_sparc32.c       |    3 
 arch/sparc64/kernel/systbls.S           |    4 
 arch/x86_64/ia32/fpu32.c                |   92 +-
 arch/x86_64/ia32/ia32_aout.c            |    6 
 arch/x86_64/ia32/ia32_signal.c          |    8 
 arch/x86_64/ia32/ia32entry.S            |    2 
 arch/x86_64/ia32/ptrace32.c             |  715 ++++++++----
 arch/x86_64/ia32/sys_ia32.c             |    5 
 arch/x86_64/kernel/process.c            |    5 
 arch/x86_64/kernel/ptrace.c             |  648 +++++++----
 arch/x86_64/kernel/signal.c             |   28 
 arch/x86_64/kernel/traps.c              |    8 
 arch/x86_64/mm/fault.c                  |    4 
 drivers/connector/cn_proc.c             |    4 
 fs/binfmt_aout.c                        |    6 
 fs/binfmt_elf.c                         |    6 
 fs/binfmt_elf_fdpic.c                   |    7 
 fs/binfmt_flat.c                        |    3 
 fs/binfmt_som.c                         |    2 
 fs/exec.c                               |   11 
 fs/proc/array.c                         |   14 
 fs/proc/base.c                          |   17 
 include/asm-i386/i387.h                 |   13 
 include/asm-i386/signal.h               |    4 
 include/asm-i386/thread_info.h          |    7 
 include/asm-i386/tracehook.h            |   49 +
 include/asm-ia64/elf.h                  |   24 
 include/asm-ia64/tracehook.h            |   83 +
 include/asm-powerpc/tracehook.h         |   80 +
 include/asm-s390/tracehook.h            |   53 +
 include/asm-sparc64/tracehook.h         |   44 +
 include/asm-x86_64/fpu32.h              |    3 
 include/asm-x86_64/thread_info.h        |    2 
 include/asm-x86_64/tracehook.h          |   54 +
 include/linux/init_task.h               |    3 
 include/linux/ptrace.h                  |  224 +++-
 include/linux/sched.h                   |   25 
 include/linux/tracehook.h               |  707 ++++++++++++
 include/linux/utrace.h                  |  504 ++++++++
 init/Kconfig                            |   29 
 kernel/Makefile                         |    1 
 kernel/exit.c                           |  254 +---
 kernel/fork.c                           |   62 -
 kernel/ptrace.c                         | 1812 +++++++++++++++++++++++++-----
 kernel/signal.c                         |  211 +---
 kernel/sys.c                            |    2 
 kernel/timer.c                          |    4 
 kernel/utrace.c                         | 1859 +++++++++++++++++++++++++++++++
 security/selinux/hooks.c                |   54 +
 security/selinux/include/objsec.h       |    1 
 98 files changed, 10662 insertions(+), 5068 deletions(-)
 create mode 100644 Documentation/utrace.txt
 delete arch/powerpc/kernel/ptrace-common.h
 delete arch/powerpc/kernel/ptrace32.c
 create mode 100644 include/asm-i386/tracehook.h
 create mode 100644 include/asm-ia64/tracehook.h
 create mode 100644 include/asm-powerpc/tracehook.h
 create mode 100644 include/asm-s390/tracehook.h
 create mode 100644 include/asm-sparc64/tracehook.h
 create mode 100644 include/asm-x86_64/tracehook.h
 create mode 100644 include/linux/tracehook.h
 create mode 100644 include/linux/utrace.h
 create mode 100644 kernel/utrace.c

--- linux-2.6/include/asm-powerpc/tracehook.h.utrace-ptrace-compat
+++ linux-2.6/include/asm-powerpc/tracehook.h
@@ -0,0 +1,80 @@
+/*
+ * Tracing hooks, PowerPC CPU support
+ */
+
+#ifndef _ASM_TRACEHOOK_H
+#define _ASM_TRACEHOOK_H	1
+
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+
+/*
+ * See linux/tracehook.h for the descriptions of what these need to do.
+ */
+
+#define ARCH_HAS_SINGLE_STEP	(1)
+
+static inline void tracehook_enable_single_step(struct task_struct *task)
+{
+	struct pt_regs *regs = task->thread.regs;
+	if (regs != NULL) {
+#if defined(CONFIG_PPC32) && (defined(CONFIG_40x) || defined(CONFIG_BOOKE))
+		task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC;
+		regs->msr |= MSR_DE;
+#else
+		regs->msr |= MSR_SE;
+#endif
+	}
+	set_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+static inline void tracehook_disable_single_step(struct task_struct *task)
+{
+	struct pt_regs *regs = task->thread.regs;
+	if (regs != NULL) {
+#if defined(CONFIG_PPC32) && (defined(CONFIG_40x) || defined(CONFIG_BOOKE))
+		task->thread.dbcr0 = 0;
+		regs->msr &= ~MSR_DE;
+#else
+		regs->msr &= ~MSR_SE;
+#endif
+	}
+	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+static inline int tracehook_single_step_enabled(struct task_struct *tsk)
+{
+	return test_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+}
+
+static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
+{
+	set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
+{
+	clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_abort_syscall(struct pt_regs *regs)
+{
+	regs->orig_gpr3 = -1L;
+}
+
+
+extern const struct utrace_regset_view utrace_ppc_native_view;
+static inline const struct utrace_regset_view *
+utrace_native_view(struct task_struct *tsk)
+{
+#ifdef CONFIG_PPC64
+	extern const struct utrace_regset_view utrace_ppc32_view;
+
+	if (test_tsk_thread_flag(tsk, TIF_32BIT))
+		return &utrace_ppc32_view;
+#endif
+	return &utrace_ppc_native_view;
+}
+
+
+#endif
--- linux-2.6/include/asm-ia64/elf.h.utrace-ptrace-compat
+++ linux-2.6/include/asm-ia64/elf.h
@@ -154,6 +154,30 @@ extern void ia64_init_addr_space (void);
 #define ELF_NGREG	128	/* we really need just 72 but let's leave some headroom... */
 #define ELF_NFPREG	128	/* f0 and f1 could be omitted, but so what... */
 
+/* elf_gregset_t register offsets */
+#define ELF_GR_0_OFFSET     0
+#define ELF_NAT_OFFSET     (32 * sizeof(elf_greg_t))
+#define ELF_PR_OFFSET      (33 * sizeof(elf_greg_t))
+#define ELF_BR_0_OFFSET    (34 * sizeof(elf_greg_t))
+#define ELF_CR_IIP_OFFSET  (42 * sizeof(elf_greg_t))
+#define ELF_CFM_OFFSET     (43 * sizeof(elf_greg_t))
+#define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t))
+#define ELF_GR_OFFSET(i)   (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_BR_OFFSET(i)   (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_AR_RSC_OFFSET  (45 * sizeof(elf_greg_t))
+#define ELF_AR_BSP_OFFSET  (46 * sizeof(elf_greg_t))
+#define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t))
+#define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t))
+#define ELF_AR_CCV_OFFSET  (49 * sizeof(elf_greg_t))
+#define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t))
+#define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t))
+#define ELF_AR_PFS_OFFSET  (52 * sizeof(elf_greg_t))
+#define ELF_AR_LC_OFFSET   (53 * sizeof(elf_greg_t))
+#define ELF_AR_EC_OFFSET   (54 * sizeof(elf_greg_t))
+#define ELF_AR_CSD_OFFSET  (55 * sizeof(elf_greg_t))
+#define ELF_AR_SSD_OFFSET  (56 * sizeof(elf_greg_t))
+#define ELF_AR_END_OFFSET  (57 * sizeof(elf_greg_t))
+
 typedef unsigned long elf_fpxregset_t;
 
 typedef unsigned long elf_greg_t;
--- linux-2.6/include/asm-ia64/tracehook.h.utrace-ptrace-compat
+++ linux-2.6/include/asm-ia64/tracehook.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C)2006 Intel Co
+ *	Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ *	and Bibo Mao <bibo.mao@intel.com> adapted from i386.
+ *
+ * 	Tracing hooks, ia64 CPU support
+ */
+
+#ifndef _ASM_TRACEHOOK_H
+#define _ASM_TRACEHOOK_H	1
+
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+
+/*
+ * See linux/tracehook.h for the descriptions of what these need to do.
+ */
+
+#define ARCH_HAS_SINGLE_STEP	(1)
+#define ARCH_HAS_BLOCK_STEP	(1)
+
+static inline void tracehook_enable_single_step(struct task_struct *tsk)
+{
+	struct pt_regs *pt = task_pt_regs(tsk);
+	ia64_psr(pt)->ss = 1;
+}
+
+static inline void tracehook_disable_single_step(struct task_struct *tsk)
+{
+	struct pt_regs *pt = task_pt_regs(tsk);
+	ia64_psr(pt)->ss = 0;
+}
+
+static inline void tracehook_enable_block_step(struct task_struct *tsk)
+{
+	struct pt_regs *pt = task_pt_regs(tsk);
+	ia64_psr(pt)->tb = 1;
+}
+
+static inline void tracehook_disable_block_step(struct task_struct *tsk)
+{
+	struct pt_regs *pt = task_pt_regs(tsk);
+	ia64_psr(pt)->tb = 0;
+}
+
+static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
+{
+	set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
+{
+	clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline int tracehook_single_step_enabled(struct task_struct *tsk)
+{
+	struct pt_regs *pt = task_pt_regs(tsk);
+	return ia64_psr(pt)->ss;
+}
+
+static inline void tracehook_abort_syscall(struct pt_regs *regs)
+{
+	if (IS_IA32_PROCESS(regs))
+		regs->r1 = -1UL;
+	else
+		regs->r15 = -1UL;
+}
+
+extern const struct utrace_regset_view utrace_ia64_native;
+static inline const struct utrace_regset_view *
+utrace_native_view(struct task_struct *tsk)
+{
+#ifdef CONFIG_IA32_SUPPORT
+	extern const struct utrace_regset_view utrace_ia32_view;
+	if (IS_IA32_PROCESS(task_pt_regs(tsk)))
+		return &utrace_ia32_view;
+#endif
+	return &utrace_ia64_native;
+}
+
+
+#endif	/* asm/tracehook.h */
--- linux-2.6/include/asm-i386/i387.h.utrace-ptrace-compat
+++ linux-2.6/include/asm-i386/i387.h
@@ -126,17 +126,12 @@ extern int save_i387( struct _fpstate __
 extern int restore_i387( struct _fpstate __user *buf );
 
 /*
- * ptrace request handers...
+ * ptrace request handlers...
  */
-extern int get_fpregs( struct user_i387_struct __user *buf,
-		       struct task_struct *tsk );
-extern int set_fpregs( struct task_struct *tsk,
-		       struct user_i387_struct __user *buf );
+extern int get_fpregs(struct user_i387_struct *, struct task_struct *);
+extern int set_fpregs(struct task_struct *, const struct user_i387_struct *);
+extern void updated_fpxregs(struct task_struct *tsk);
 
-extern int get_fpxregs( struct user_fxsr_struct __user *buf,
-			struct task_struct *tsk );
-extern int set_fpxregs( struct task_struct *tsk,
-			struct user_fxsr_struct __user *buf );
 
 /*
  * FPU state for core dumps...
--- linux-2.6/include/asm-i386/signal.h.utrace-ptrace-compat
+++ linux-2.6/include/asm-i386/signal.h
@@ -221,10 +221,8 @@ struct pt_regs;
 
 #define ptrace_signal_deliver(regs, cookie)		\
 	do {						\
-		if (current->ptrace & PT_DTRACE) {	\
-			current->ptrace &= ~PT_DTRACE;	\
+		if (test_and_clear_thread_flag(TIF_FORCED_TF)) \
 			(regs)->eflags &= ~TF_MASK;	\
-		}					\
 	} while (0)
 
 #endif /* __KERNEL__ */
--- linux-2.6/include/asm-i386/thread_info.h.utrace-ptrace-compat
+++ linux-2.6/include/asm-i386/thread_info.h
@@ -135,13 +135,13 @@ static inline struct thread_info *curren
 #define TIF_NEED_RESCHED	3	/* rescheduling necessary */
 #define TIF_SINGLESTEP		4	/* restore singlestep on return to user mode */
 #define TIF_IRET		5	/* return with iret */
-#define TIF_SYSCALL_EMU		6	/* syscall emulation active */
 #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
 #define TIF_SECCOMP		8	/* secure computing */
 #define TIF_RESTORE_SIGMASK	9	/* restore signal mask in do_signal() */
 #define TIF_MEMDIE		16
 #define TIF_DEBUG		17	/* uses debug registers */
 #define TIF_IO_BITMAP		18	/* uses I/O bitmap */
+#define TIF_FORCED_TF		19	/* true if TF in eflags artificially */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
@@ -149,17 +149,16 @@ static inline struct thread_info *curren
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
 #define _TIF_IRET		(1<<TIF_IRET)
-#define _TIF_SYSCALL_EMU	(1<<TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP		(1<<TIF_SECCOMP)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_DEBUG		(1<<TIF_DEBUG)
 #define _TIF_IO_BITMAP		(1<<TIF_IO_BITMAP)
+#define _TIF_FORCED_TF		(1<<TIF_FORCED_TF)
 
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK \
-  (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-		  _TIF_SECCOMP | _TIF_SYSCALL_EMU))
+  (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP))
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK	(0x0000FFFF & ~_TIF_SECCOMP)
 
--- linux-2.6/include/asm-i386/tracehook.h.utrace-ptrace-compat
+++ linux-2.6/include/asm-i386/tracehook.h
@@ -0,0 +1,49 @@
+/*
+ * Tracing hooks, i386 CPU support
+ */
+
+#ifndef _ASM_TRACEHOOK_H
+#define _ASM_TRACEHOOK_H	1
+
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+
+/*
+ * See linux/tracehook.h for the descriptions of what these need to do.
+ */
+
+#define ARCH_HAS_SINGLE_STEP	(1)
+
+/* These two are defined in arch/i386/kernel/ptrace.c.  */
+void tracehook_enable_single_step(struct task_struct *tsk);
+void tracehook_disable_single_step(struct task_struct *tsk);
+
+static inline int tracehook_single_step_enabled(struct task_struct *tsk)
+{
+	return test_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+}
+
+static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
+{
+	set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
+{
+	clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_abort_syscall(struct pt_regs *regs)
+{
+	regs->orig_eax = -1;
+}
+
+extern const struct utrace_regset_view utrace_i386_native;
+static inline const struct utrace_regset_view *
+utrace_native_view(struct task_struct *tsk)
+{
+	return &utrace_i386_native;
+}
+
+
+#endif
--- linux-2.6/include/asm-s390/tracehook.h.utrace-ptrace-compat
+++ linux-2.6/include/asm-s390/tracehook.h
@@ -0,0 +1,53 @@
+/*
+ * Tracing hooks, s390/s390x support.
+ */
+
+#ifndef _ASM_TRACEHOOK_H
+#define _ASM_TRACEHOOK_H	1
+
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+
+/*
+ * See linux/tracehook.h for the descriptions of what these need to do.
+ */
+
+#define ARCH_HAS_SINGLE_STEP	(1)
+
+/* These three are defined in arch/s390/kernel/ptrace.c.  */
+void tracehook_enable_single_step(struct task_struct *tsk);
+void tracehook_disable_single_step(struct task_struct *tsk);
+int tracehook_single_step_enabled(struct task_struct *tsk);
+
+
+static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
+{
+	set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
+{
+	clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_abort_syscall(struct pt_regs *regs)
+{
+	regs->gprs[2] = -1L;
+}
+
+
+extern const struct utrace_regset_view utrace_s390_native_view;
+static inline const struct utrace_regset_view *
+utrace_native_view(struct task_struct *tsk)
+{
+#ifdef CONFIG_COMPAT
+        extern const struct utrace_regset_view utrace_s390_compat_view;
+
+        if (test_tsk_thread_flag(tsk, TIF_31BIT))
+                return &utrace_s390_compat_view;
+#endif
+        return &utrace_s390_native_view;
+}
+
+
+#endif
--- linux-2.6/include/asm-x86_64/fpu32.h.utrace-ptrace-compat
+++ linux-2.6/include/asm-x86_64/fpu32.h
@@ -7,4 +7,7 @@ int restore_i387_ia32(struct task_struct
 int save_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, 
 		   struct pt_regs *regs, int fsave);
 
+int get_fpregs32(struct user_i387_ia32_struct *, struct task_struct *);
+int set_fpregs32(struct task_struct *, const struct user_i387_ia32_struct *);
+
 #endif
--- linux-2.6/include/asm-x86_64/thread_info.h.utrace-ptrace-compat
+++ linux-2.6/include/asm-x86_64/thread_info.h
@@ -119,6 +119,7 @@ static inline struct thread_info *stack_
 #define TIF_FORK		18	/* ret_from_fork */
 #define TIF_ABI_PENDING		19
 #define TIF_MEMDIE		20
+#define TIF_FORCED_TF		21	/* true if TF in eflags artificially */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
@@ -131,6 +132,7 @@ static inline struct thread_info *stack_
 #define _TIF_IA32		(1<<TIF_IA32)
 #define _TIF_FORK		(1<<TIF_FORK)
 #define _TIF_ABI_PENDING	(1<<TIF_ABI_PENDING)
+#define _TIF_FORCED_TF		(1<<TIF_FORCED_TF)
 
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK \
--- linux-2.6/include/asm-x86_64/tracehook.h.utrace-ptrace-compat
+++ linux-2.6/include/asm-x86_64/tracehook.h
@@ -0,0 +1,54 @@
+/*
+ * Tracing hooks, x86-64 CPU support
+ */
+
+#ifndef _ASM_TRACEHOOK_H
+#define _ASM_TRACEHOOK_H	1
+
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+#include <asm/proto.h>
+
+/*
+ * See linux/tracehook.h for the descriptions of what these need to do.
+ */
+
+#define ARCH_HAS_SINGLE_STEP	(1)
+
+/* These two are defined in arch/x86_64/kernel/ptrace.c.  */
+void tracehook_enable_single_step(struct task_struct *tsk);
+void tracehook_disable_single_step(struct task_struct *tsk);
+
+static inline int tracehook_single_step_enabled(struct task_struct *tsk)
+{
+	return test_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+}
+
+static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
+{
+	set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
+{
+	clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_abort_syscall(struct pt_regs *regs)
+{
+	regs->orig_rax = -1L;
+}
+
+extern const struct utrace_regset_view utrace_x86_64_native, utrace_ia32_view;
+static inline const struct utrace_regset_view *
+utrace_native_view(struct task_struct *tsk)
+{
+#ifdef CONFIG_IA32_EMULATION
+	if (test_tsk_thread_flag(tsk, TIF_IA32))
+		return &utrace_ia32_view;
+#endif
+	return &utrace_x86_64_native;
+}
+
+
+#endif
--- linux-2.6/include/asm-sparc64/tracehook.h.utrace-ptrace-compat
+++ linux-2.6/include/asm-sparc64/tracehook.h
@@ -0,0 +1,44 @@
+/*
+ * Tracing hooks, SPARC64 CPU support
+ */
+
+#ifndef _ASM_TRACEHOOK_H
+#define _ASM_TRACEHOOK_H	1
+
+
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+
+/*
+ * See linux/tracehook.h for the descriptions of what these need to do.
+ */
+
+
+static inline void tracehook_enable_syscall_trace(struct task_struct *tsk)
+{
+	set_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_disable_syscall_trace(struct task_struct *tsk)
+{
+	clear_tsk_thread_flag(tsk, TIF_SYSCALL_TRACE);
+}
+
+static inline void tracehook_abort_syscall(struct pt_regs *regs)
+{
+	regs->u_regs[UREG_G1] = -1L;
+}
+
+extern const struct utrace_regset_view utrace_sparc64_native_view;
+static inline const struct utrace_regset_view *
+utrace_native_view(struct task_struct *tsk)
+{
+#ifdef CONFIG_COMPAT
+	extern const struct utrace_regset_view utrace_sparc32_view;
+	if (test_tsk_thread_flag(tsk, TIF_32BIT))
+		return &utrace_sparc32_view;
+#endif
+	return &utrace_sparc64_native_view;
+}
+
+#endif
--- linux-2.6/include/linux/sched.h.utrace-ptrace-compat
+++ linux-2.6/include/linux/sched.h
@@ -769,7 +769,6 @@ struct task_struct {
 	struct thread_info *thread_info;
 	atomic_t usage;
 	unsigned long flags;	/* per process flags, defined below */
-	unsigned long ptrace;
 
 	int lock_depth;		/* BKL lock depth */
 
@@ -800,12 +799,6 @@ struct task_struct {
 #endif
 
 	struct list_head tasks;
-	/*
-	 * ptrace_list/ptrace_children forms the list of my children
-	 * that were stolen by a ptracer.
-	 */
-	struct list_head ptrace_children;
-	struct list_head ptrace_list;
 
 	struct mm_struct *mm, *active_mm;
 
@@ -820,15 +813,13 @@ struct task_struct {
 	pid_t pid;
 	pid_t tgid;
 	/* 
-	 * pointers to (original) parent process, youngest child, younger sibling,
+	 * pointers to parent process, youngest child, younger sibling,
 	 * older sibling, respectively.  (p->father can be replaced with 
 	 * p->parent->pid)
 	 */
-	struct task_struct *real_parent; /* real parent process (when being debugged) */
 	struct task_struct *parent;	/* parent process */
 	/*
-	 * children/sibling forms the list of my children plus the
-	 * tasks I'm ptracing.
+	 * children/sibling forms the list of my children
 	 */
 	struct list_head children;	/* list of my children */
 	struct list_head sibling;	/* linkage in my parent's children list */
@@ -900,6 +891,11 @@ struct task_struct {
 	struct audit_context *audit_context;
 	seccomp_t seccomp;
 
+#ifdef CONFIG_UTRACE
+	struct utrace *utrace;
+	unsigned long utrace_flags;
+#endif
+
 /* Thread group tracking */
    	u32 parent_exec_id;
    	u32 self_exec_id;
@@ -953,8 +949,6 @@ struct task_struct {
 
 	struct io_context *io_context;
 
-	unsigned long ptrace_message;
-	siginfo_t *last_siginfo; /* For ptrace use.  */
 /*
  * current io wait handle: wait queue entry to use for io waits
  * If this thread is processing aio, this points at the waitqueue
@@ -989,6 +983,10 @@ struct task_struct {
 	atomic_t fs_excl;	/* holding fs exclusive resources */
 	struct rcu_head rcu;
 
+#ifdef CONFIG_PTRACE
+	struct list_head ptracees;
+#endif
+
 	/*
 	 * cache last used pipe for splice
 	 */
@@ -1226,6 +1224,7 @@ extern int kill_pg_info(int, struct sigi
 extern int kill_proc_info(int, struct siginfo *, pid_t);
 extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t, u32);
 extern void do_notify_parent(struct task_struct *, int);
+extern void do_notify_parent_cldstop(struct task_struct *, int);
 extern void force_sig(int, struct task_struct *);
 extern void force_sig_specific(int, struct task_struct *);
 extern int send_sig(int, struct task_struct *, int);
--- linux-2.6/include/linux/utrace.h.utrace-ptrace-compat
+++ linux-2.6/include/linux/utrace.h
@@ -0,0 +1,504 @@
+/*
+ * User Debugging Data & Event Rendezvous
+ *
+ * This interface allows for notification of interesting events in a thread.
+ * It also mediates access to thread state such as registers.
+ * Multiple unrelated users can be associated with a single thread.
+ * We call each of these a tracing engine.
+ *
+ * A tracing engine starts by calling utrace_attach on the chosen thread,
+ * passing in a set of hooks (struct utrace_engine_ops), and some associated
+ * data.  This produces a struct utrace_attached_engine, which is the handle
+ * used for all other operations.  An attached engine has its ops vector,
+ * its data, and a flags word controlled by utrace_set_flags.
+ *
+ * Each engine's flags word contains two kinds of flags: events of
+ * interest, and action state flags.
+ *
+ * For each event flag that is set, that engine will get the
+ * appropriate ops->report_* callback when the event occurs.  The
+ * struct utrace_engine_ops need not provide callbacks for an event
+ * unless the engine sets one of the associated event flags.
+ *
+ * Action state flags change the normal behavior of the thread.
+ * These bits are in UTRACE_ACTION_STATE_MASK; these can be OR'd into
+ * flags set with utrace_set_flags.  Also, every callback that return
+ * an action value can reset these bits for the engine (see below).
+ *
+ * The bits UTRACE_ACTION_STATE_MASK of all attached engines are OR'd
+ * together, so each action is in force as long as any engine requests it.
+ * As long as some engine sets the UTRACE_ACTION_QUIESCE flag, the thread
+ * will block and not resume running user code.  When the last engine
+ * clears its UTRACE_ACTION_QUIESCE flag, the thread will resume running.
+ */
+
+#ifndef _LINUX_UTRACE_H
+#define _LINUX_UTRACE_H	1
+
+#include <linux/list.h>
+#include <linux/rcupdate.h>
+#include <linux/signal.h>
+
+struct linux_binprm;
+struct pt_regs;
+struct utrace_regset;
+struct utrace_regset_view;
+
+
+/*
+ * Flags in task_struct.utrace_flags and utrace_attached_engine.flags.
+ * Low four bits are UTRACE_ACTION_STATE_MASK bits (below).
+ * Higher bits are events of interest.
+ */
+
+#define UTRACE_FIRST_EVENT	4
+#define UTRACE_EVENT_BITS	(BITS_PER_LONG - UTRACE_FIRST_EVENT)
+#define UTRACE_EVENT_MASK	(-1UL &~ UTRACE_ACTION_STATE_MASK)
+
+enum utrace_events {
+	_UTRACE_EVENT_QUIESCE,	/* Tracing requests stop.  */
+	_UTRACE_EVENT_REAP,  	/* Zombie reaped, no more tracing possible.  */
+	_UTRACE_EVENT_CLONE,	/* Successful clone/fork/vfork just done.  */
+	_UTRACE_EVENT_VFORK_DONE, /* vfork woke from waiting for child.  */
+	_UTRACE_EVENT_EXEC,	/* Successful execve just completed.  */
+	_UTRACE_EVENT_EXIT,	/* Thread exit in progress.  */
+	_UTRACE_EVENT_DEATH,	/* Thread has died.  */
+	_UTRACE_EVENT_SYSCALL_ENTRY, /* User entered kernel for system call. */
+	_UTRACE_EVENT_SYSCALL_EXIT, /* Returning to user after system call.  */
+	_UTRACE_EVENT_SIGNAL,	/* Signal delivery will run a user handler.  */
+	_UTRACE_EVENT_SIGNAL_IGN, /* No-op signal to be delivered.  */
+	_UTRACE_EVENT_SIGNAL_STOP, /* Signal delivery will suspend.  */
+	_UTRACE_EVENT_SIGNAL_TERM, /* Signal delivery will terminate.  */
+	_UTRACE_EVENT_SIGNAL_CORE, /* Signal delivery will dump core.  */
+	_UTRACE_EVENT_JCTL,	/* Job control stop or continue completed.  */
+	_UTRACE_NEVENTS
+};
+#define UTRACE_EVENT_BIT(type)	(UTRACE_FIRST_EVENT + _UTRACE_EVENT_##type)
+#define UTRACE_EVENT(type)	(1UL << UTRACE_EVENT_BIT(type))
+
+/*
+ * All the kinds of signal events.  These all use the report_signal callback.
+ */
+#define UTRACE_EVENT_SIGNAL_ALL	(UTRACE_EVENT(SIGNAL) \
+				 | UTRACE_EVENT(SIGNAL_IGN) \
+				 | UTRACE_EVENT(SIGNAL_STOP) \
+				 | UTRACE_EVENT(SIGNAL_TERM) \
+				 | UTRACE_EVENT(SIGNAL_CORE))
+/*
+ * Both kinds of syscall events; these call the report_syscall_entry and
+ * report_syscall_exit callbacks, respectively.
+ */
+#define UTRACE_EVENT_SYSCALL	\
+	(UTRACE_EVENT(SYSCALL_ENTRY) | UTRACE_EVENT(SYSCALL_EXIT))
+
+
+/*
+ * Action flags, in return value of callbacks.
+ *
+ * UTRACE_ACTION_RESUME (zero) is the return value to do nothing special.
+ * For each particular callback, some bits in UTRACE_ACTION_OP_MASK can
+ * be set in the return value to change the thread's behavior (see below).
+ *
+ * If UTRACE_ACTION_NEWSTATE is set, then the UTRACE_ACTION_STATE_MASK
+ * bits in the return value replace the engine's flags as in utrace_set_flags
+ * (but the event flags remained unchanged).
+ *
+ * If UTRACE_ACTION_HIDE is set, then the callbacks to other engines
+ * should be suppressed for this event.  This is appropriate only when
+ * the event was artificially provoked by something this engine did,
+ * such as setting a breakpoint.
+ *
+ * If UTRACE_ACTION_DETACH is set, this engine is detached as by utrace_detach.
+ * The action bits in UTRACE_ACTION_OP_MASK work as normal, but the engine's
+ * UTRACE_ACTION_STATE_MASK bits will no longer affect the thread.
+ */
+#define UTRACE_ACTION_RESUME	0x0000 /* Continue normally after event.  */
+#define UTRACE_ACTION_HIDE	0x0010 /* Hide event from other tracing.  */
+#define UTRACE_ACTION_DETACH	0x0020 /* Detach me, state flags ignored.  */
+#define UTRACE_ACTION_NEWSTATE	0x0040 /* Replace state bits.  */
+
+/*
+ * These flags affect the state of the thread until they are changed via
+ * utrace_set_flags or by the next callback to the same engine that uses
+ * UTRACE_ACTION_NEWSTATE.
+ */
+#define UTRACE_ACTION_QUIESCE	0x0001 /* Stay quiescent after callbacks.  */
+#define UTRACE_ACTION_SINGLESTEP 0x0002 /* Resume for one instruction.  */
+#define UTRACE_ACTION_BLOCKSTEP 0x0004 /* Resume until next branch.  */
+#define UTRACE_ACTION_NOREAP	0x0008 /* Inhibit parent SIGCHLD and wait.  */
+#define UTRACE_ACTION_STATE_MASK 0x000f /* Lasting state bits.  */
+
+/* These flags have meanings specific to the particular event report hook.  */
+#define UTRACE_ACTION_OP_MASK	0xff00
+
+/*
+ * Action flags in return value and argument of report_signal callback.
+ */
+#define UTRACE_SIGNAL_DELIVER	0x0100 /* Deliver according to sigaction.  */
+#define UTRACE_SIGNAL_IGN	0x0200 /* Ignore the signal.  */
+#define UTRACE_SIGNAL_TERM	0x0300 /* Terminate the process.  */
+#define UTRACE_SIGNAL_CORE	0x0400 /* Terminate with core dump.  */
+#define UTRACE_SIGNAL_STOP	0x0500 /* Deliver as absolute stop.  */
+#define UTRACE_SIGNAL_TSTP	0x0600 /* Deliver as job control stop.  */
+#define UTRACE_SIGNAL_HOLD	0x1000 /* Flag, push signal back on queue.  */
+/*
+ * This value is passed to a report_signal callback after a signal
+ * handler is entered while UTRACE_ACTION_SINGLESTEP is in force.
+ * For this callback, no signal will never actually be delivered regardless
+ * of the return value, and the other callback parameters are null.
+ */
+#define UTRACE_SIGNAL_HANDLER	0x0700
+
+/* Action flag in return value of report_jctl.  */
+#define UTRACE_JCTL_NOSIGCHLD	0x0100 /* Do not notify the parent.  */
+
+
+/*
+ * Flags for utrace_attach.  If UTRACE_ATTACH_CREATE is not specified,
+ * you only look up an existing engine already attached to the
+ * thread.  If UTRACE_ATTACH_MATCH_* bits are set, only consider
+ * matching engines.  If UTRACE_ATTACH_EXCLUSIVE is set, attempting to
+ * attach a second (matching) engine fails with -EEXIST.
+ */
+#define UTRACE_ATTACH_CREATE		0x0010 /* Attach a new engine.  */
+#define UTRACE_ATTACH_EXCLUSIVE		0x0020 /* Refuse if existing match.  */
+#define UTRACE_ATTACH_MATCH_OPS		0x0001 /* Match engines on ops.  */
+#define UTRACE_ATTACH_MATCH_DATA	0x0002 /* Match engines on data.  */
+#define UTRACE_ATTACH_MATCH_MASK	0x000f
+
+
+/*
+ * Per-thread structure task_struct.utrace points to.
+ *
+ * The task itself never has to worry about this going away after
+ * some event is found set in task_struct.utrace_flags.
+ * Once created, this pointer is changed only when the task is quiescent
+ * (TASK_TRACED or TASK_STOPPED with the siglock held, or dead).
+ *
+ * For other parties, the pointer to this is protected by RCU and
+ * task_lock.  Since call_rcu is never used while the thread is alive and
+ * using this struct utrace, we can overlay the RCU data structure used
+ * only for a dead struct with some local state used only for a live utrace
+ * on an active thread.
+ */
+struct utrace
+{
+	union {
+		struct rcu_head dead;
+		struct {
+			struct task_struct *cloning;
+			struct utrace_signal *signal;
+		} live;
+		struct {
+			int report_death; /* report_death running */
+			int reap; /* release_task called */
+		} exit;
+	} u;
+
+	struct list_head engines;
+	spinlock_t lock;
+};
+#define utrace_lock(utrace)	spin_lock(&(utrace)->lock)
+#define utrace_unlock(utrace)	spin_unlock(&(utrace)->lock)
+
+
+/*
+ * Per-engine per-thread structure.
+ *
+ * The task itself never has to worry about engines detaching while
+ * it's doing event callbacks.  These structures are freed only when
+ * the task is quiescent.  For other parties, the list is protected
+ * by RCU and utrace_lock.
+ */
+struct utrace_attached_engine
+{
+	struct list_head entry;	/* Entry on thread's utrace.engines list.  */
+	struct rcu_head rhead;
+
+	const struct utrace_engine_ops *ops;
+	unsigned long data;
+
+	unsigned long flags;
+};
+
+
+struct utrace_engine_ops
+{
+	/*
+	 * Event reporting hooks.
+	 *
+	 * Return values contain UTRACE_ACTION_* flag bits.
+	 * The UTRACE_ACTION_OP_MASK bits are specific to each kind of event.
+	 *
+	 * All report_* hooks are called with no locks held, in a generally
+	 * safe environment when we will be returning to user mode soon.
+	 * It is fine to block for memory allocation and the like, but all
+	 * hooks are *asynchronous* and must not block on external events.
+	 * If you want the thread to block, request UTRACE_ACTION_QUIESCE in
+	 * your hook; then later wake it up with utrace_set_flags.
+	 *
+	 */
+
+	/*
+	 * Event reported for parent, before child might run.
+	 * The PF_STARTING flag prevents other engines from attaching
+	 * before this one has its chance.
+	 */
+	u32 (*report_clone)(struct utrace_attached_engine *engine,
+			    struct task_struct *parent,
+			    unsigned long clone_flags,
+			    struct task_struct *child);
+
+	/*
+	 * Event reported for parent using CLONE_VFORK or vfork system call.
+	 * The child has died or exec'd, so the vfork parent has unblocked
+	 * and is about to return child_pid.
+	 */
+	u32 (*report_vfork_done)(struct utrace_attached_engine *engine,
+				 struct task_struct *parent, pid_t child_pid);
+
+	/*
+	 * Event reported after UTRACE_ACTION_QUIESCE is set, when the target
+	 * thread is quiescent.  Either it's the current thread, or it's in
+	 * TASK_TRACED or TASK_STOPPED and will not resume running until the
+	 * UTRACE_ACTION_QUIESCE flag is no longer asserted by any engine.
+	 */
+	u32 (*report_quiesce)(struct utrace_attached_engine *engine,
+			      struct task_struct *tsk);
+
+	/*
+	 * Thread dequeuing a signal to be delivered.
+	 * The action and *return_ka values say what UTRACE_ACTION_RESUME
+	 * will do (possibly already influenced by another tracing engine).
+	 * An UTRACE_SIGNAL_* return value overrides the signal disposition.
+	 * The *info data (including info->si_signo) can be changed at will.
+	 * Changing *return_ka affects the sigaction that be used.
+	 * The *orig_ka value is the one in force before other tracing
+	 * engines intervened.
+	 */
+	u32 (*report_signal)(struct utrace_attached_engine *engine,
+			     struct task_struct *tsk,
+			     struct pt_regs *regs,
+			     u32 action, siginfo_t *info,
+			     const struct k_sigaction *orig_ka,
+			     struct k_sigaction *return_ka);
+
+	/*
+	 * Job control event completing, about to send SIGCHLD to parent
+	 * with CLD_STOPPED or CLD_CONTINUED as given in type.
+	 * UTRACE_JOBSTOP_NOSIGCHLD in the return value inhibits that.
+	 */
+	u32 (*report_jctl)(struct utrace_attached_engine *engine,
+			   struct task_struct *tsk,
+			   int type);
+
+	/*
+	 * Thread has just completed an exec.
+	 * The initial user register state is handy to be tweaked directly.
+	 */
+	u32 (*report_exec)(struct utrace_attached_engine *engine,
+			   struct task_struct *tsk,
+			   const struct linux_binprm *bprm,
+			   struct pt_regs *regs);
+
+	/*
+	 * Thread has entered the kernel to request a system call.
+	 * The user register state is handy to be tweaked directly.
+	 */
+	u32 (*report_syscall_entry)(struct utrace_attached_engine *engine,
+				    struct task_struct *tsk,
+				    struct pt_regs *regs);
+
+	/*
+	 * Thread is about to leave the kernel after a system call request.
+	 * The user register state is handy to be tweaked directly.
+	 */
+	u32 (*report_syscall_exit)(struct utrace_attached_engine *engine,
+				   struct task_struct *tsk,
+				   struct pt_regs *regs);
+
+	/*
+	 * Thread is exiting and cannot be prevented from doing so,
+	 * but all its state is still live.  The *code value will be
+	 * the wait result seen by the parent, and can be changed by
+	 * this engine or others.  The orig_code value is the real
+	 * status, not changed by any tracing engine.
+	 */
+	u32 (*report_exit)(struct utrace_attached_engine *engine,
+			   struct task_struct *tsk,
+			   long orig_code, long *code);
+
+	/*
+	 * Thread is really dead now.  If UTRACE_ACTION_NOREAP is in force,
+	 * it remains an unreported zombie.  Otherwise, it might be reaped
+	 * by its parent, or self-reap immediately.  Though the actual
+	 * reaping may happen in parallel, a report_reap callback will
+	 * always be ordered after a report_death callback.
+	 */
+	u32 (*report_death)(struct utrace_attached_engine *engine,
+			    struct task_struct *tsk);
+
+	/*
+	 * Called when someone reaps the dead task (parent, init, or self).
+	 * No more callbacks are made after this one.
+	 * The engine is always detached.
+	 * There is nothing more a tracing engine can do about this thread.
+	 */
+	void (*report_reap)(struct utrace_attached_engine *engine,
+			    struct task_struct *tsk);
+
+	/*
+	 * Miscellaneous hooks.  These are not associated with event reports.
+	 * Any of these may be null if the engine has nothing to say.
+	 * These hooks are called in more constrained environments and should
+	 * not block or do very much.
+	 */
+
+	/*
+	 * Return nonzero iff the caller task should be allowed to access
+	 * the memory of the target task via /proc/PID/mem and so forth,
+	 * by dint of this engine's attachment to the target.
+	 */
+	int (*allow_access_process_vm)(struct utrace_attached_engine *engine,
+				       struct task_struct *target,
+				       struct task_struct *caller);
+
+	/*
+	 * Return LSM_UNSAFE_* bits that apply to the exec in progress
+	 * due to tracing done by this engine.  These bits indicate that
+	 * someone is able to examine the process and so a set-UID or similar
+	 * privilege escalation may not be safe to permit.
+	 *
+	 * Called with task_lock held.
+	 */
+	int (*unsafe_exec)(struct utrace_attached_engine *engine,
+			   struct task_struct *target);
+
+	/*
+	 * Return the task_struct for the task using ptrace on this one, or
+	 * NULL.  Always called with rcu_read_lock held to keep the
+	 * returned struct alive.
+	 *
+	 * At exec time, this may be called with task_lock(target) still
+	 * held from when unsafe_exec was just called.  In that case it
+	 * must give results consistent with those unsafe_exec results,
+	 * i.e. non-NULL if any LSM_UNSAFE_PTRACE_* bits were set.
+	 *
+	 * The value is also used to display after "TracerPid:" in
+	 * /proc/PID/status, where it is called with only rcu_read_lock held.
+	 *
+	 * If this engine returns NULL, another engine may supply the result.
+	 */
+	struct task_struct *(*tracer_task)(struct utrace_attached_engine *,
+					   struct task_struct *target);
+};
+
+
+/***
+ *** These are the exported entry points for tracing engines to use.
+ ***/
+
+/*
+ * Attach a new tracing engine to a thread, or look up attached engines.
+ * See UTRACE_ATTACH_* flags, above.  The caller must ensure that the
+ * target thread does not get freed, i.e. hold a ref or be its parent.
+ */
+struct utrace_attached_engine *utrace_attach(struct task_struct *target,
+					     int flags,
+					     const struct utrace_engine_ops *,
+					     unsigned long data);
+
+/*
+ * Detach a tracing engine from a thread.  After this, the engine
+ * data structure is no longer accessible, and the thread might be reaped.
+ * The thread will start running again if it was being kept quiescent
+ * and no longer has any attached engines asserting UTRACE_ACTION_QUIESCE.
+ *
+ * If the target thread is not already quiescent, then a callback to this
+ * engine might be in progress or about to start on another CPU.  If it's
+ * quiescent when utrace_detach is called, then after successful return
+ * it's guaranteed that no more callbacks to the ops vector will be done.
+ * The only exception is SIGKILL (and exec by another thread in the group),
+ * which breaks quiescence and can cause asynchronous DEATH and/or REAP
+ * callbacks even when UTRACE_ACTION_QUIESCE is set.  In that event,
+ * utrace_detach fails with -ESRCH or -EALREADY to indicate that the
+ * report_reap or report_death callbacks have begun or will run imminently.
+ */
+int utrace_detach(struct task_struct *target,
+		  struct utrace_attached_engine *engine);
+
+/*
+ * Change the flags for a tracing engine.
+ * This resets the event flags and the action state flags.
+ * If UTRACE_ACTION_QUIESCE and UTRACE_EVENT(QUIESCE) are set,
+ * this will cause a report_quiesce callback soon, maybe immediately.
+ * If UTRACE_ACTION_QUIESCE was set before and is no longer set by
+ * any engine, this will wake the thread up.
+ *
+ * This fails with -EALREADY and does nothing if you try to clear
+ * UTRACE_EVENT(DEATH) when the report_death callback may already have
+ * begun, if you try to clear UTRACE_EVENT(REAP) when the report_reap
+ * callback may already have begun, if you try to newly set
+ * UTRACE_ACTION_NOREAP when the target may already have sent its
+ * parent SIGCHLD, or if you try to newly set UTRACE_EVENT(DEATH),
+ * UTRACE_EVENT(QUIESCE), or UTRACE_ACTION_QUIESCE, when the target is
+ * already dead or dying.  It can fail with -ESRCH when the target has
+ * already been detached (including forcible detach on reaping).  If
+ * the target was quiescent before the call, then after a successful
+ * call, no event callbacks not requested in the new flags will be
+ * made, and a report_quiesce callback will always be made if
+ * requested.  These rules provide for coherent synchronization based
+ * on quiescence, even when SIGKILL is breaking quiescence.
+ */
+int utrace_set_flags(struct task_struct *target,
+		     struct utrace_attached_engine *engine,
+		     unsigned long flags);
+
+/*
+ * Cause a specified signal delivery in the target thread, which must be
+ * quiescent (or the current thread).  The action has UTRACE_SIGNAL_* bits
+ * as returned from a report_signal callback.  If ka is non-null, it gives
+ * the sigaction to follow for UTRACE_SIGNAL_DELIVER; otherwise, the
+ * installed sigaction at the time of delivery is used.
+ */
+int utrace_inject_signal(struct task_struct *target,
+			 struct utrace_attached_engine *engine,
+			 u32 action, siginfo_t *info,
+			 const struct k_sigaction *ka);
+
+/*
+ * Prepare to access thread's machine state, see <linux/tracehook.h>.
+ * The given thread must be quiescent (or the current thread).
+ * When this returns, the struct utrace_regset calls may be used to
+ * interrogate or change the thread's state.  Do not cache the returned
+ * pointer when the thread can resume.  You must call utrace_regset to
+ * ensure that context switching has completed and consistent state is
+ * available.
+ */
+const struct utrace_regset *utrace_regset(struct task_struct *target,
+					  struct utrace_attached_engine *,
+					  const struct utrace_regset_view *,
+					  int which);
+
+
+/*
+ * Hooks in <linux/tracehook.h> call these entry points to the utrace dispatch.
+ */
+int utrace_quiescent(struct task_struct *, struct utrace_signal *);
+void utrace_release_task(struct task_struct *);
+int utrace_get_signal(struct task_struct *, struct pt_regs *,
+		      siginfo_t *, struct k_sigaction *);
+void utrace_report_clone(unsigned long clone_flags, struct task_struct *child);
+void utrace_report_vfork_done(pid_t child_pid);
+void utrace_report_exit(long *exit_code);
+void utrace_report_death(struct task_struct *, struct utrace *);
+int utrace_report_jctl(int type);
+void utrace_report_exec(struct linux_binprm *bprm, struct pt_regs *regs);
+void utrace_report_syscall(struct pt_regs *regs, int is_exit);
+struct task_struct *utrace_tracer_task(struct task_struct *);
+int utrace_allow_access_process_vm(struct task_struct *);
+int utrace_unsafe_exec(struct task_struct *);
+void utrace_signal_handler_singlestep(struct task_struct *, struct pt_regs *);
+
+
+#endif	/* linux/utrace.h */
--- linux-2.6/include/linux/ptrace.h.utrace-ptrace-compat
+++ linux-2.6/include/linux/ptrace.h
@@ -49,66 +49,184 @@
 #include <asm/ptrace.h>
 
 #ifdef __KERNEL__
+#include <linux/compiler.h>
+#include <linux/types.h>
+struct task_struct;
+struct siginfo;
+struct rusage;
+
+
+extern int ptrace_may_attach(struct task_struct *task);
+
+
+#ifdef CONFIG_PTRACE
+#include <asm/tracehook.h>
+struct utrace_attached_engine;
+struct utrace_regset_view;
+
 /*
- * Ptrace flags
+ * These must be defined by arch code to handle machine-specific ptrace
+ * requests such as PTRACE_PEEKUSR and PTRACE_GETREGS.  Returns -ENOSYS for
+ * any request it does not handle, then handled by machine-independent code.
+ * This can change *request and then return -ENOSYS to handle a
+ * machine-specific alias for a generic request.
  *
- * The owner ship rules for task->ptrace which holds the ptrace
- * flags is simple.  When a task is running it owns it's task->ptrace
- * flags.  When the a task is stopped the ptracer owns task->ptrace.
- */
-
-#define PT_PTRACED	0x00000001
-#define PT_DTRACE	0x00000002	/* delayed trace (used on m68k, i386) */
-#define PT_TRACESYSGOOD	0x00000004
-#define PT_PTRACE_CAP	0x00000008	/* ptracer can follow suid-exec */
-#define PT_TRACE_FORK	0x00000010
-#define PT_TRACE_VFORK	0x00000020
-#define PT_TRACE_CLONE	0x00000040
-#define PT_TRACE_EXEC	0x00000080
-#define PT_TRACE_VFORK_DONE	0x00000100
-#define PT_TRACE_EXIT	0x00000200
-#define PT_ATTACHED	0x00000400	/* parent != real_parent */
-
-#define PT_TRACE_MASK	0x000003f4
-
-/* single stepping state bits (used on ARM and PA-RISC) */
-#define PT_SINGLESTEP_BIT	31
-#define PT_SINGLESTEP		(1<<PT_SINGLESTEP_BIT)
-#define PT_BLOCKSTEP_BIT	30
-#define PT_BLOCKSTEP		(1<<PT_BLOCKSTEP_BIT)
-
-#include <linux/compiler.h>		/* For unlikely.  */
-#include <linux/sched.h>		/* For struct task_struct.  */
-
-
-extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
-extern struct task_struct *ptrace_get_task_struct(pid_t pid);
-extern int ptrace_traceme(void);
-extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
-extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
-extern int ptrace_attach(struct task_struct *tsk);
-extern int ptrace_detach(struct task_struct *, unsigned int);
-extern void ptrace_disable(struct task_struct *);
-extern int ptrace_check_attach(struct task_struct *task, int kill);
-extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
-extern void ptrace_notify(int exit_code);
-extern void __ptrace_link(struct task_struct *child,
-			  struct task_struct *new_parent);
-extern void __ptrace_unlink(struct task_struct *child);
-extern void ptrace_untrace(struct task_struct *child);
-extern int ptrace_may_attach(struct task_struct *task);
+ * This code should NOT access task machine state directly.  Instead it
+ * should use the utrace_regset accessors.  The functions below make this easy.
+ *
+ * Any nonzero return value should be for an error.  If the return value of
+ * the ptrace syscall should be a nonzero success value, this returns zero
+ * and sets *retval to the value--which might have any bit pattern at all,
+ * including one that looks like -ENOSYS or another error code.
+ */
+extern fastcall int arch_ptrace(long *request, struct task_struct *child,
+				struct utrace_attached_engine *engine,
+				unsigned long addr, unsigned long data,
+				long *retval);
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+extern fastcall int arch_compat_ptrace(compat_long_t *request,
+				       struct task_struct *child,
+				       struct utrace_attached_engine *engine,
+				       compat_ulong_t a, compat_ulong_t d,
+				       compat_long_t *retval);
+#endif
+
+/*
+ * Convenience function doing access to a single utrace_regset for ptrace.
+ * The offset and size are in bytes, giving the location in the regset data.
+ */
+extern fastcall int ptrace_regset_access(struct task_struct *child,
+					 struct utrace_attached_engine *engine,
+					 const struct utrace_regset_view *view,
+					 int setno, unsigned long offset,
+					 unsigned int size, void __user *data,
+					 int write);
 
-static inline void ptrace_link(struct task_struct *child,
-			       struct task_struct *new_parent)
+/*
+ * Convenience wrapper for doing access to a whole utrace_regset for ptrace.
+ */
+static inline int ptrace_whole_regset(struct task_struct *child,
+				      struct utrace_attached_engine *engine,
+				      long data, int setno, int write)
 {
-	if (unlikely(child->ptrace))
-		__ptrace_link(child, new_parent);
+	return ptrace_regset_access(child, engine, utrace_native_view(current),
+				    setno, 0, -1, (void __user *)data, write);
 }
-static inline void ptrace_unlink(struct task_struct *child)
+
+/*
+ * Convenience function doing access to a single slot in a utrace_regset.
+ * The regno value gives a slot number plus regset->bias.
+ * The value accessed is regset->size bytes long.
+ */
+extern fastcall int ptrace_onereg_access(struct task_struct *child,
+					 struct utrace_attached_engine *engine,
+					 const struct utrace_regset_view *view,
+					 int setno, unsigned long regno,
+					 void __user *data, int write);
+
+
+/*
+ * An array of these describes the layout of the virtual struct user
+ * accessed by PEEKUSR/POKEUSR, or the structure used by GETREGS et al.
+ * The array is terminated by an element with .end of zero.
+ * An element describes the range [.start, .end) of struct user offsets,
+ * measured in bytes; it maps to the regset in the view's regsets array
+ * at the index given by .regset, at .offset bytes into that regset's data.
+ * If .regset is -1, then the [.start, .end) range reads as zero.
+ */
+struct ptrace_layout_segment {
+	unsigned int start, end, regset, offset;
+};
+
+/*
+ * Convenience function for doing access to a ptrace compatibility layout.
+ * The offset and size are in bytes.
+ */
+extern fastcall int ptrace_layout_access(
+	struct task_struct *child, struct utrace_attached_engine *engine,
+	const struct utrace_regset_view *view,
+	const struct ptrace_layout_segment layout[],
+	unsigned long offset, unsigned int size,
+	void __user *data, void *kdata, int write);
+
+
+/* Convenience wrapper for the common PTRACE_PEEKUSR implementation.  */
+static inline int ptrace_peekusr(struct task_struct *child,
+				 struct utrace_attached_engine *engine,
+				 const struct ptrace_layout_segment layout[],
+				 unsigned long addr, long data)
+{
+	return ptrace_layout_access(child, engine, utrace_native_view(current),
+				    layout, addr, sizeof(long),
+				    (unsigned long __user *)data, NULL, 0);
+}
+
+/* Convenience wrapper for the common PTRACE_PEEKUSR implementation.  */
+static inline int ptrace_pokeusr(struct task_struct *child,
+				 struct utrace_attached_engine *engine,
+				 const struct ptrace_layout_segment layout[],
+				 unsigned long addr, long data)
+{
+	return ptrace_layout_access(child, engine, utrace_native_view(current),
+				    layout, addr, sizeof(long),
+				    NULL, &data, 1);
+}
+
+#ifdef CONFIG_COMPAT
+/* Convenience wrapper for the common PTRACE_PEEKUSR implementation.  */
+static inline int ptrace_compat_peekusr(
+	struct task_struct *child, struct utrace_attached_engine *engine,
+	const struct ptrace_layout_segment layout[],
+	compat_ulong_t addr, compat_ulong_t data)
+{
+	compat_ulong_t *udata = (compat_ulong_t __user *) (unsigned long) data;
+	return ptrace_layout_access(child, engine, utrace_native_view(current),
+				    layout, addr, sizeof(compat_ulong_t),
+				    udata, NULL, 0);
+}
+
+/* Convenience wrapper for the common PTRACE_PEEKUSR implementation.  */
+static inline int ptrace_compat_pokeusr(
+	struct task_struct *child, struct utrace_attached_engine *engine,
+	const struct ptrace_layout_segment layout[],
+	compat_ulong_t addr, compat_ulong_t data)
 {
-	if (unlikely(child->ptrace))
-		__ptrace_unlink(child);
+	return ptrace_layout_access(child, engine, utrace_native_view(current),
+				    layout, addr, sizeof(compat_ulong_t),
+				    NULL, &data, 1);
 }
+#endif
+
+
+/*
+ * Called in do_exit, after setting PF_EXITING, no locks are held.
+ */
+void ptrace_exit(struct task_struct *tsk);
+
+/*
+ * Called in do_wait, with tasklist_lock held for reading.
+ * This reports any ptrace-child that is ready as do_wait would a normal child.
+ * If there are no ptrace children, returns -ECHILD.
+ * If there are some ptrace children but none reporting now, returns 0.
+ * In those cases the tasklist_lock is still held so next_thread(tsk) works.
+ * For any other return value, tasklist_lock is released before return.
+ */
+int ptrace_do_wait(struct task_struct *tsk,
+		   pid_t pid, int options, struct siginfo __user *infop,
+		   int __user *stat_addr, struct rusage __user *rusagep);
+#else
+static inline void ptrace_exit(struct task_struct *tsk) { }
+static inline int ptrace_do_wait(struct task_struct *tsk,
+				 pid_t pid, int options,
+				 struct siginfo __user *infop,
+				 int __user *stat_addr,
+				 struct rusage __user *rusagep)
+{
+	return -ECHILD;
+}
+#endif
 
 
 #ifndef force_successful_syscall_return
--- linux-2.6/include/linux/init_task.h.utrace-ptrace-compat
+++ linux-2.6/include/linux/init_task.h
@@ -98,9 +98,6 @@ extern struct group_info init_groups;
 	.ioprio		= 0,						\
 	.time_slice	= HZ,						\
 	.tasks		= LIST_HEAD_INIT(tsk.tasks),			\
-	.ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children),		\
-	.ptrace_list	= LIST_HEAD_INIT(tsk.ptrace_list),		\
-	.real_parent	= &tsk,						\
 	.parent		= &tsk,						\
 	.children	= LIST_HEAD_INIT(tsk.children),			\
 	.sibling	= LIST_HEAD_INIT(tsk.sibling),			\
--- linux-2.6/include/linux/tracehook.h.utrace-ptrace-compat
+++ linux-2.6/include/linux/tracehook.h
@@ -0,0 +1,707 @@
+/*
+ * Tracing hooks
+ *
+ * This file defines hook entry points called by core code where
+ * user tracing/debugging support might need to do something.
+ * These entry points are called tracehook_*.  Each hook declared below
+ * has a detailed comment giving the context (locking et al) from
+ * which it is called, and the meaning of its return value (if any).
+ *
+ * We also declare here tracehook_* functions providing access to low-level
+ * interrogation and control of threads.  These functions must be called
+ * on either the current thread or on a quiescent thread.  We say a
+ * thread is "quiescent" if it is in TASK_STOPPED or TASK_TRACED state,
+ * we are guaranteed it will not be woken up and return to user mode, and
+ * we have called wait_task_inactive on it.
+ */
+
+#ifndef _LINUX_TRACEHOOK_H
+#define _LINUX_TRACEHOOK_H	1
+
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+struct linux_binprm;
+struct pt_regs;
+
+
+/*
+ * The machine-specific asm/tracehook.h file is responsible for declaring
+ * the following entry points.  These can be called only on a quiescent thread,
+ * or the current thread when it is about to return to user mode.
+ *
+ * Single-step control.  When enabled, the next instruction or syscall exit
+ * produces a SIGTRAP.  Enabling or disabling redundantly is harmless.
+ *
+ *	void tracehook_enable_single_step(struct task_struct *tsk);
+ *	void tracehook_disable_single_step(struct task_struct *tsk);
+ *	int tracehook_single_step_enabled(struct task_struct *tsk);
+ *
+ * If those calls are defined, #define ARCH_HAS_SINGLE_STEP to nonzero.
+ * Do not #define it if these calls are never available in this kernel config.
+ * If defined, the value of ARCH_HAS_SINGLE_STEP can be constant or variable.
+ * It should evaluate to nonzero if the hardware is able to support
+ * tracehook_enable_single_step.  If it's a variable expression, it
+ * should be one that can be evaluated in modules, i.e. uses exported symbols.
+ *
+ * Block-step control (trap on control transfer), when available.
+ * tracehook_disable_block_step will be called after tracehook_enable_single_step.
+ * When enabled, the next jump, or other control transfer or syscall exit,
+ * produces a SIGTRAP.  Enabling or disabling redundantly is harmless.
+ *
+ *	void tracehook_enable_block_step(struct task_struct *tsk);
+ *	void tracehook_disable_block_step(struct task_struct *tsk);
+ *	int tracehook_block_step_enabled(struct task_struct *tsk);
+ *
+ * If those calls are defined, #define ARCH_HAS_BLOCK_STEP to nonzero.
+ * Do not #define it if these calls are never available in this kernel config.
+ * If defined, the value of ARCH_HAS_BLOCK_STEP can be constant or variable.
+ * It should evaluate to nonzero if the hardware is able to support
+ * tracehook_enable_block_step.  If it's a variable expression, it
+ * should be one that can be evaluated in modules, i.e. uses exported symbols.
+ *
+ * Control system call tracing.  When enabled a syscall entry or exit
+ * produces a call to tracehook_report_syscall, below.
+ *
+ *	void tracehook_enable_syscall_trace(struct task_struct *tsk);
+ *	void tracehook_disable_syscall_trace(struct task_struct *tsk);
+ *
+ * When stopped in tracehook_report_syscall for syscall entry,
+ * abort the syscall so no kernel function is called.
+ * If the register state was not otherwise updated before,
+ * this produces an -ENOSYS error return as for an invalid syscall number.
+ *
+ *	void tracehook_abort_syscall(struct pt_regs *regs);
+ *
+ * Return the regset view (see below) that is native for the given process.
+ * For example, what it would access when it called ptrace.
+ * Throughout the life of the process, this only changes at exec.
+ *
+ *	const struct utrace_regset_view *utrace_native_view(struct task_struct *);
+ *
+ ***/
+
+
+/*
+ * This data structure describes a machine resource we call a register set.
+ * This is part of the state of an individual thread, not necessarily
+ * actual CPU registers per se.  A register set consists of a number of
+ * similar slots, given by ->n.  Each slot is ->size bytes, and aligned to
+ * ->align bytes (which is at least ->size).
+ *
+ * As described above, these entry points can be called on the current
+ * thread or on a quiescent thread.  The pos argument must be aligned
+ * according to ->align; the count argument must be a multiple of ->size.
+ * These functions are not responsible for checking for invalid arguments.
+ *
+ * When there is a natural value to use as an index, ->bias gives the
+ * difference between the natural index and the slot index for the
+ * register set.  For example, x86 GDT segment descriptors form a regset;
+ * the segment selector produces a natural index, but only a subset of
+ * that index space is available as a regset (the TLS slots); subtracting
+ * ->bias from a segment selector index value computes the regset slot.
+ */
+struct utrace_regset {
+	unsigned int n;		/* Number of slots (registers).  */
+	unsigned int size;	/* Size in bytes of a slot (register).  */
+	unsigned int align;	/* Required alignment, in bytes.  */
+	unsigned int bias;	/* Bias from natural indexing.  */
+
+	/*
+	 * Return -ENODEV if not available on the hardware found.
+	 * Return 0 if no interesting state in this thread.
+	 * Return >0 number of ->size units of interesting state.
+	 * Any get call fetching state beyond that number will
+	 * see the default initialization state for this data,
+	 * so a caller that knows that the default state is need
+	 * not copy it all out.
+	 * This call is optional; the pointer is NULL if there
+	 * so no inexpensive check to yield a value < .n.
+	 */
+	int (*active)(struct task_struct *, const struct utrace_regset *);
+
+	/*
+	 * Fetch and store register values.  Return 0 on success; -EIO or
+	 * -ENODEV are usual failure returns.  The pos and count values are
+	 * in bytes, but must be properly aligned.  If kbuf is non-null,
+	 * that buffer is used and ubuf is ignored.  If kbuf is NULL, then
+	 * ubuf gives a userland pointer to access directly, and an -EFAULT
+	 * return value is possible.
+	 */
+	int (*get)(struct task_struct *, const struct utrace_regset *,
+		   unsigned int pos, unsigned int count,
+		   void *kbuf, void __user *ubuf);
+	int (*set)(struct task_struct *, const struct utrace_regset *,
+		   unsigned int pos, unsigned int count,
+		   const void *kbuf, const void __user *ubuf);
+
+	/*
+	 * This call is optional; usually the pointer is NULL.
+	 * When provided, there is some user memory associated
+	 * with this regset's hardware, such as memory backing
+	 * cached register data on register window machines; the
+	 * regset's data controls what user memory is used
+	 * (e.g. via the stack pointer value).
+	 *
+	 * Write register data back to user memory.  If the
+	 * immediate flag is nonzero, it must be written to the
+	 * user memory so uaccess/access_process_vm can see it
+	 * when this call returns; if zero, then it must be
+	 * written back by the time the task completes a context
+	 * switch (as synchronized with wait_task_inactive).
+	 * Return 0 on success or if there was nothing to do,
+	 * -EFAULT for a memory problem (bad stack pointer or
+	 * whatever), or -EIO for a hardware problem.
+	 */
+	int (*writeback)(struct task_struct *, const struct utrace_regset *,
+			 int immediate);
+};
+
+/*
+ * A regset view is a collection of regsets (struct utrace_regset, above).
+ * This describes all the state of a thread that can be seen from a given
+ * architecture/ABI environment.  More than one view might refer to the
+ * same utrace_regset, or more than one regset might refer to the same
+ * machine-specific state in the thread.  For example, a 32-bit thread's
+ * state could be examined from the 32-bit view or from the 64-bit view.
+ * Either method reaches the same thread register state, doing appropriate
+ * widening or truncation.
+ */
+struct utrace_regset_view {
+	const char *name;	/* Identifier, e.g. ELF_PLATFORM string.  */
+
+	const struct utrace_regset *regsets;
+	unsigned int n;
+
+	/*
+	 * EM_* value for which this is the native view, if any.
+	 */
+	u16 e_machine;
+};
+
+
+/*
+ * These two are helpers for writing regset get/set functions in arch code.
+ * Use one or more calls sequentially for each chunk of regset data stored
+ * contiguously in memory.  Call with constants for start_pos and end_pos,
+ * giving the range of byte positions in the regset that data corresponds
+ * to; end_pos can be -1 if this chunk is at the end of the regset layout.
+ * Each call updates the arguments to point past its chunk.
+ */
+
+static inline int
+utrace_regset_copyout(unsigned int *pos, unsigned int *count,
+		      void **kbuf, void __user **ubuf,
+		      const void *data, int start_pos, int end_pos)
+{
+	if (*count == 0)
+		return 0;
+	BUG_ON(*pos < start_pos);
+	if (end_pos < 0 || *pos < end_pos) {
+		unsigned int copy = (end_pos < 0 ? *count
+				     : min(*count, end_pos - *pos));
+		data += *pos - start_pos;
+		if (*kbuf) {
+			memcpy(*kbuf, data, copy);
+			*kbuf += copy;
+		}
+		else if (copy_to_user(*ubuf, data, copy))
+			return -EFAULT;
+		else
+			*ubuf += copy;
+		*pos += copy;
+		*count -= copy;
+	}
+	return 0;
+}
+
+static inline int
+utrace_regset_copyin(unsigned int *pos, unsigned int *count,
+		     const void **kbuf, const void __user **ubuf,
+		     void *data, int start_pos, int end_pos)
+{
+	if (*count == 0)
+		return 0;
+	BUG_ON(*pos < start_pos);
+	if (end_pos < 0 || *pos < end_pos) {
+		unsigned int copy = (end_pos < 0 ? *count
+				     : min(*count, end_pos - *pos));
+		data += *pos - start_pos;
+		if (*kbuf) {
+			memcpy(data, *kbuf, copy);
+			*kbuf += copy;
+		}
+		else if (copy_from_user(data, *ubuf, copy))
+			return -EFAULT;
+		else
+			*ubuf += copy;
+		*pos += copy;
+		*count -= copy;
+	}
+	return 0;
+}
+
+/*
+ * These two parallel the two above, but for portions of a regset layout
+ * that always read as all-zero or for which writes are ignored.
+ */
+static inline int
+utrace_regset_copyout_zero(unsigned int *pos, unsigned int *count,
+			   void **kbuf, void __user **ubuf,
+			   int start_pos, int end_pos)
+{
+	if (*count == 0)
+		return 0;
+	BUG_ON(*pos < start_pos);
+	if (end_pos < 0 || *pos < end_pos) {
+		unsigned int copy = (end_pos < 0 ? *count
+				     : min(*count, end_pos - *pos));
+		if (*kbuf) {
+			memset(*kbuf, 0, copy);
+			*kbuf += copy;
+		}
+		else if (clear_user(*ubuf, copy))
+			return -EFAULT;
+		else
+			*ubuf += copy;
+		*pos += copy;
+		*count -= copy;
+	}
+	return 0;
+}
+
+static inline int
+utrace_regset_copyin_ignore(unsigned int *pos, unsigned int *count,
+			    const void **kbuf, const void __user **ubuf,
+			    int start_pos, int end_pos)
+{
+	if (*count == 0)
+		return 0;
+	BUG_ON(*pos < start_pos);
+	if (end_pos < 0 || *pos < end_pos) {
+		unsigned int copy = (end_pos < 0 ? *count
+				     : min(*count, end_pos - *pos));
+		if (*kbuf)
+			*kbuf += copy;
+		else
+			*ubuf += copy;
+		*pos += copy;
+		*count -= copy;
+	}
+	return 0;
+}
+
+/**/
+
+
+/***
+ ***
+ *** Following are entry points from core code, where the user debugging
+ *** support can affect the normal behavior.  The locking situation is
+ *** described for each call.
+ ***
+ ***/
+
+#ifdef CONFIG_UTRACE
+#include <linux/utrace.h>
+#endif
+
+
+/*
+ * Called in copy_process when setting up the copied task_struct,
+ * with tasklist_lock held for writing.
+ */
+static inline void tracehook_init_task(struct task_struct *child)
+{
+#ifdef CONFIG_UTRACE
+	child->utrace_flags = 0;
+	child->utrace = NULL;
+#endif
+}
+
+/*
+ * Called from release_task, no locks held.
+ * After this, there should be no tracing entanglements.
+ */
+static inline void tracehook_release_task(struct task_struct *p)
+{
+#ifdef CONFIG_UTRACE
+	smp_mb();
+	if (p->utrace != NULL)
+		utrace_release_task(p);
+#endif
+}
+
+/*
+ * Return nonzero to trigger a BUG_ON crash in release_task.
+ * This should verify that there is no tracing-related state
+ * still affecting the task_struct about to be released.
+ * Called with tasklist_lock held for writing.
+ */
+static inline int tracehook_check_released(struct task_struct *p)
+{
+#ifdef CONFIG_UTRACE
+	return unlikely(p->utrace != NULL);
+#endif
+	return 0;
+}
+
+/*
+ * do_notify_parent_cldstop calls this when it's about to generate a SIGCHLD
+ * for a job control stop.  Return nonzero to prevent that signal generation.
+ * Called with tasklist_lock held for reading, sometimes with irqs disabled.
+ */
+static inline int tracehook_notify_cldstop(struct task_struct *tsk,
+					   const siginfo_t *info)
+{
+#ifdef CONFIG_UTRACE
+	if (tsk->utrace_flags & UTRACE_ACTION_NOREAP)
+		return 1;
+#endif
+	return 0;
+}
+
+/*
+ * exit_notify calls this with tasklist_lock held for writing.
+ * Return nonzero to prevent any normal SIGCHLD generation for this
+ * thread's death (i.e. when it is not ignored and its thread group is
+ * empty).  This call must set *noreap to 0, or to 1 to force this thread
+ * to become a zombie when it would normally reap itself.
+ * The *death_cookie is passed to tracehook_report_death (below).
+ */
+static inline int tracehook_notify_death(struct task_struct *tsk,
+					 int *noreap, void **death_cookie)
+{
+	*death_cookie = NULL;
+#ifdef CONFIG_UTRACE
+	*death_cookie = tsk->utrace;
+	if (tsk->utrace_flags & UTRACE_ACTION_NOREAP) {
+		*noreap = 1;
+		return 1;
+	}
+#endif
+	*noreap = 0;
+	return 0;
+}
+
+/*
+ * Return zero iff tracing doesn't care to examine this fatal signal,
+ * so it can short-circuit normal delivery directly to a group exit.
+ * Called with tsk->sighand->siglock held.
+ */
+static inline int tracehook_consider_fatal_signal(struct task_struct *tsk,
+						  int sig)
+{
+#ifdef CONFIG_UTRACE
+	return (tsk->utrace_flags & (UTRACE_EVENT(SIGNAL_TERM)
+				     | UTRACE_EVENT(SIGNAL_CORE)));
+#endif
+	return 0;
+}
+
+/*
+ * Return zero iff tracing doesn't care to examine this ignored signal,
+ * so it can short-circuit normal delivery and never even get queued.
+ * Either the handler is SIG_DFL and sig's default is ignore, or it's SIG_IGN.
+ * Called with tsk->sighand->siglock held.
+ */
+static inline int tracehook_consider_ignored_signal(struct task_struct *tsk,
+						    int sig, void *handler)
+{
+#ifdef CONFIG_UTRACE
+	return (tsk->utrace_flags & UTRACE_EVENT(SIGNAL_IGN));
+#endif
+	return 0;
+}
+
+
+/*
+ * Called with the siglock held when computing tsk's signal_pending flag.
+ * Return nonzero to force the signal_pending flag on, so that
+ * tracehook_induce_signal will be called before the next return to user mode.
+ */
+static inline int tracehook_induce_sigpending(struct task_struct *tsk)
+{
+#ifdef CONFIG_UTRACE
+	return unlikely(tsk->utrace_flags & UTRACE_ACTION_QUIESCE);
+#endif
+	return 0;
+}
+
+/*
+ * Called with the siglock held before dequeuing pending signals.
+ * Return zero to check for a real pending signal normally.
+ * Return -1 after releasing the siglock to repeat the check.
+ * Return a signal number to induce an artifical signal delivery,
+ * setting *info and *return_ka to specify its details and behavior.
+ */
+static inline int tracehook_get_signal(struct task_struct *tsk,
+				       struct pt_regs *regs,
+				       siginfo_t *info,
+				       struct k_sigaction *return_ka)
+{
+#ifdef CONFIG_UTRACE
+	if (unlikely(tsk->utrace_flags))
+		return utrace_get_signal(tsk, regs, info, return_ka);
+#endif
+	return 0;
+}
+
+/*
+ * Called with no locks held when about to stop for job control;
+ * we are already in TASK_STOPPED state, about to call schedule.
+ * Return zero if the normal SIGCHLD should be generated, which
+ * will happen if last_one is true meaning this is the last thread
+ * in the thread group to stop.
+ */
+static inline int tracehook_finish_stop(int last_one)
+{
+#ifdef CONFIG_UTRACE
+	if (current->utrace_flags & UTRACE_EVENT(JCTL))
+		return utrace_report_jctl(CLD_STOPPED);
+#endif
+
+	return 0;
+}
+
+/*
+ * Called with tasklist_lock held for reading, for an event notification stop.
+ * We are already in TASK_TRACED.  Return zero to go back to running,
+ * or nonzero to actually stop until resumed.
+ */
+static inline int tracehook_stop_now(void)
+{
+	return 0;
+}
+
+
+/*
+ * Return nonzero if the child's parent (current) should be prevented
+ * from seeing its child in TASK_STOPPED state when it waits with WSTOPPED.
+ * Called with tasklist_lock held for reading.
+ */
+static inline int tracehook_inhibit_wait_stopped(struct task_struct *child)
+{
+#ifdef CONFIG_UTRACE
+	return (child->utrace_flags & UTRACE_ACTION_NOREAP);
+#endif
+	return 0;
+}
+
+/*
+ * Return nonzero if the child's parent (current) should be prevented
+ * from seeing its child in TASK_ZOMBIE state when it waits with WEXITED.
+ * Called with tasklist_lock held for reading.
+ */
+static inline int tracehook_inhibit_wait_zombie(struct task_struct *child)
+{
+#ifdef CONFIG_UTRACE
+	return (child->utrace_flags & UTRACE_ACTION_NOREAP);
+#endif
+	return 0;
+}
+
+/*
+ * Return nonzero if the child's parent (current) should be prevented
+ * from seeing its child resuming after job stop when it waits with WCONTINUED.
+ * Called with tasklist_lock held for reading.
+ */
+static inline int tracehook_inhibit_wait_continued(struct task_struct *child)
+{
+#ifdef CONFIG_UTRACE
+	return (child->utrace_flags & UTRACE_ACTION_NOREAP);
+#endif
+	return 0;
+}
+
+
+/*
+ * Return LSM_UNSAFE_* bits applied to an exec because of tracing.
+ * Called with task_lock(tsk) held.
+ */
+static inline int tracehook_unsafe_exec(struct task_struct *tsk)
+{
+#ifdef CONFIG_UTRACE
+	if (tsk->utrace_flags)
+		return utrace_unsafe_exec(tsk);
+#endif
+	return 0;
+}
+
+/*
+ * Return the task_struct for the task using ptrace on this one, or NULL.
+ * Must be called with rcu_read_lock held to keep the returned struct alive.
+ *
+ * At exec time, this may be called with task_lock(p) still held from when
+ * tracehook_unsafe_exec was just called.
+ *
+ * The value is also used to display after "TracerPid:" in /proc/PID/status,
+ * where it is called with only rcu_read_lock held.
+ */
+static inline struct task_struct *tracehook_tracer_task(struct task_struct *p)
+{
+#ifdef CONFIG_UTRACE
+	if (p->utrace_flags)
+		return utrace_tracer_task(p);
+#endif
+	return NULL;
+}
+
+/*
+ * Return nonzero if the current task should be allowed to use
+ * access_process_vm on the given task.
+ */
+static inline int tracehook_allow_access_process_vm(struct task_struct *tsk)
+{
+	if (tsk == current)
+		return 1;
+#ifdef CONFIG_UTRACE
+	if (tsk->utrace_flags)
+		return utrace_allow_access_process_vm(tsk);
+#endif
+	return 0;
+}
+
+
+/***
+ ***
+ *** Following decelarations are hook stubs where core code reports
+ *** events.  These are called without locks, from the thread having the
+ *** event.  In all tracehook_report_* calls, no locks are held and the thread
+ *** is in a state close to returning to user mode with little baggage to
+ *** unwind, except as noted below for tracehook_report_clone.  It is generally
+ *** OK to block in these places if you want the user thread to be suspended.
+ ***
+ ***/
+
+/*
+ * Thread has just become a zombie (exit_state==TASK_ZOMBIE) or is about to
+ * self-reap (exit_state==EXIT_DEAD).  If normal reaping is not inhibited,
+ * tsk->exit_state might be changing in parallel.  The death_cookie was
+ * passed back by tracehook_notify_death (above).
+ */
+static inline void tracehook_report_death(struct task_struct *tsk,
+					  int exit_state, void *death_cookie)
+{
+#ifdef CONFIG_UTRACE
+	smp_mb();
+	if (tsk->utrace_flags & (UTRACE_EVENT(DEATH) | UTRACE_ACTION_QUIESCE))
+		utrace_report_death(tsk, death_cookie);
+#endif
+}
+
+/*
+ * exec completed, we are shortly going to return to user mode.
+ * The freshly initialized register state can be seen and changed here.
+ */
+static inline void tracehook_report_exec(struct linux_binprm *bprm,
+				    struct pt_regs *regs)
+{
+#ifdef CONFIG_UTRACE
+	if (current->utrace_flags & UTRACE_EVENT(EXEC))
+		utrace_report_exec(bprm, regs);
+#endif
+}
+
+/*
+ * Called from do_exit, we are about to exit.  The code returned to the
+ * parent for wait can be changed here.
+ */
+static inline void tracehook_report_exit(long *exit_code)
+{
+#ifdef CONFIG_UTRACE
+	if (current->utrace_flags & UTRACE_EVENT(EXIT))
+		utrace_report_exit(exit_code);
+#endif
+}
+
+/*
+ * Called after a child is set up, but before it has been started or
+ * been given its CLONE_STOPPED initial stop.  (See also tracehook_init_task.)
+ * This is not a good place to block, because the child has not started yet.
+ * Suspend the child here if desired, and block in clone_complete (below).
+ * This must prevent the child from self-reaping if clone_complete uses
+ * the task_struct pointer; otherwise it might have died and been released
+ * by the time tracehook_report_clone_complete is called.
+ */
+static inline void tracehook_report_clone(unsigned long clone_flags,
+					  struct task_struct *child)
+{
+#ifdef CONFIG_UTRACE
+	if (current->utrace_flags & UTRACE_EVENT(CLONE))
+		utrace_report_clone(clone_flags, child);
+#endif
+}
+
+/*
+ * Called after the child has started running, shortly after
+ * tracehook_report_clone.  This is just before the clone/fork syscall
+ * returns, or blocks for vfork child completion if (clone_flags &
+ * CLONE_VFORK).  The child pointer may be invalid if a self-reaping
+ * child died and tracehook_report_clone took no action to prevent it
+ * from self-reaping.
+ */
+static inline void tracehook_report_clone_complete(unsigned long clone_flags,
+						   pid_t pid,
+						   struct task_struct *child)
+{
+#ifdef CONFIG_UTRACE
+	if (current->utrace_flags & UTRACE_ACTION_QUIESCE)
+		utrace_quiescent(current, NULL);
+#endif
+}
+
+/*
+ * Called after a CLONE_VFORK parent has waited for the child to complete.
+ * The clone/vfork system call will return immediately after this.
+ * The child pointer may be invalid if a self-reaping child died and
+ * tracehook_report_clone took no action to prevent it from self-reaping.
+ */
+static inline void tracehook_report_vfork_done(struct task_struct *child,
+					       pid_t child_pid)
+{
+#ifdef CONFIG_UTRACE
+	if (current->utrace_flags & UTRACE_EVENT(VFORK_DONE))
+		utrace_report_vfork_done(child_pid);
+#endif
+}
+
+/*
+ * Called for system call entry or exit.
+ */
+static inline void tracehook_report_syscall(struct pt_regs *regs, int is_exit)
+{
+#ifdef CONFIG_UTRACE
+	if (current->utrace_flags & (is_exit ? UTRACE_EVENT(SYSCALL_EXIT)
+				     : UTRACE_EVENT(SYSCALL_ENTRY)))
+		utrace_report_syscall(regs, is_exit);
+#endif
+}
+
+/*
+ * Called after system call exit if single/block-stepped into the syscall.
+ */
+static inline void tracehook_report_syscall_step(struct pt_regs *regs)
+{
+}
+
+/*
+ * Called when a signal handler has been set up.
+ * Register and stack state reflects the user handler about to run.
+ * Signal mask changes have already been made.
+ */
+static inline void tracehook_report_handle_signal(int sig,
+						  const struct k_sigaction *ka,
+						  const sigset_t *oldset,
+						  struct pt_regs *regs)
+{
+#ifdef CONFIG_UTRACE
+	struct task_struct *tsk = current;
+	if ((tsk->utrace_flags & UTRACE_EVENT_SIGNAL_ALL)
+	    && (tsk->utrace_flags & (UTRACE_ACTION_SINGLESTEP
+				     | UTRACE_ACTION_BLOCKSTEP)))
+		utrace_signal_handler_singlestep(tsk, regs);
+#endif
+}
+
+
+#endif	/* <linux/tracehook.h> */
--- linux-2.6/Documentation/utrace.txt.utrace-ptrace-compat
+++ linux-2.6/Documentation/utrace.txt
@@ -0,0 +1,579 @@
+DRAFT DRAFT DRAFT	WORK IN PROGRESS	DRAFT DRAFT DRAFT
+
+This is work in progress and likely to change.
+
+
+	Roland McGrath <roland@redhat.com>
+
+---
+
+		User Debugging Data & Event Rendezvous
+		---- --------- ---- - ----- ----------
+
+See linux/utrace.h for all the declarations used here.
+See also linux/tracehook.h for the utrace_regset declarations.
+
+The UTRACE is infrastructure code for tracing and controlling user
+threads.  This is the foundation for writing tracing engines, which
+can be loadable kernel modules.  The UTRACE interfaces provide three
+basic facilities:
+
+* Thread event reporting
+
+  Tracing engines can request callbacks for events of interest in
+  the thread: signals, system calls, exit, exec, clone, etc.
+
+* Core thread control
+
+  Tracing engines can prevent a thread from running (keeping it in
+  TASK_TRACED state), or make it single-step or block-step (when
+  hardware supports it).  Engines can cause a thread to abort system
+  calls, they change the behaviors of signals, and they can inject
+  signal-style actions at will.
+
+* Thread machine state access
+
+  Tracing engines can read and write a thread's registers and
+  similar per-thread CPU state.
+
+
+	Tracing engines
+	------- -------
+
+The basic actors in UTRACE are the thread and the tracing engine.
+A tracing engine is some body of code that calls into the utrace_*
+interfaces, represented by a struct utrace_engine_ops.  (Usually it's a
+kernel module, though the legacy ptrace support is a tracing engine
+that is not in a kernel module.)  The UTRACE interface operates on
+individual threads (struct task_struct).  If an engine wants to
+treat several threads as a group, that is up to its higher-level
+code.  Using the UTRACE starts out by attaching an engine to a thread.
+
+	struct utrace_attached_engine *
+	utrace_attach(struct task_struct *target, int flags,
+		      const struct utrace_engine_ops *ops, unsigned long data);
+
+Calling utrace_attach is what sets up a tracing engine to trace a
+thread.  Use UTRACE_ATTACH_CREATE in flags, and pass your engine's ops.
+Check the return value with IS_ERR.  If successful, it returns a
+struct pointer that is the handle used in all other utrace_* calls.
+The data argument is stored in the utrace_attached_engine structure,
+for your code to use however it wants.
+
+	int utrace_detach(struct task_struct *target,
+			  struct utrace_attached_engine *engine);
+
+The utrace_detach call removes an engine from a thread.
+No more callbacks will be made after this returns success.
+
+
+An attached engine does nothing by default.
+An engine makes something happen by setting its flags.
+
+	int utrace_set_flags(struct task_struct *target,
+			     struct utrace_attached_engine *engine,
+			     unsigned long flags);
+
+The synchronization issues related to these two calls
+are discussed further below in "Teardown Races".
+
+
+	Action Flags
+	------ -----
+
+There are two kinds of flags that an attached engine can set: event
+flags, and action flags.  Event flags register interest in particular
+events; when an event happens and an engine has the right event flag
+set, it gets a callback.  Action flags change the normal behavior of
+the thread.  The action flags available are:
+
+	UTRACE_ACTION_QUIESCE
+
+		The thread will stay quiescent (see below).  As long as
+		any engine asserts the QUIESCE action flag, the thread
+		will not resume running in user mode.  (Usually it will
+		be in TASK_TRACED state.)  Nothing will wake the thread
+		up except for SIGKILL (and implicit SIGKILLs such as a
+		core dump in another thread sharing the same address
+		space, or a group exit, fatal signal, or exec in another
+		thread in the same thread group).
+
+	UTRACE_ACTION_SINGLESTEP
+
+		When the thread runs, it will run one instruction and
+		then trap.  (Exiting a system call or entering a signal
+		handler is considered "an instruction" for this.)  This
+		is available on most machines.  This can be used only if
+		ARCH_HAS_SINGLE_STEP is #define'd by <asm/tracehook.h>
+		and evaluates to nonzero.
+
+	UTRACE_ACTION_BLOCKSTEP
+
+		When the thread runs, it will run until the next branch
+		taken, and then trap.  (Exiting a system call or
+		entering a signal handler is considered taking a branch
+		for this.)  When the SINGLESTEP flag is set, BLOCKSTEP
+		has no effect.  This is only available on some machines.
+		This can be used only if ARCH_HAS_BLOCK_STEP is
+		#define'd by <asm/tracehook.h> and evaluates to nonzero.
+
+	UTRACE_ACTION_NOREAP
+
+		When the thread exits or stops for job control, its
+		parent process will not receive a SIGCHLD and the
+		parent's wait calls will not wake up or report the child
+		as dead.  Even a self-reaping thread will remain a
+		zombie.  Note that this cannot prevent the reaping done
+		when an exec is done by another thread in the same
+		thread group; in that event, a REAP event (and callback
+		if requested) will happen regardless of this flag.
+		A well-behaved tracing engine does not want to interfere
+		with the parent's normal notifications.  This is
+		provided mainly for the ptrace compatibility code to
+		implement the traditional behavior.
+
+Event flags are specified using the macro UTRACE_EVENT(TYPE).
+Each event type is associated with a report_* callback in struct
+utrace_engine_ops.  A tracing engine can leave unused callbacks NULL.
+The only callbacks required are those used by the event flags it sets.
+
+Many engines can be attached to each thread.  When a thread has an
+event, each engine gets a report_* callback if it has set the event flag
+for that event type.  Engines are called in the order they attached.
+
+Each callback takes arguments giving the details of the particular
+event.  The first two arguments two every callback are the struct
+utrace_attached_engine and struct task_struct pointers for the engine
+and the thread producing the event.  Usually this will be the current
+thread that is running the callback functions.
+
+The return value of report_* callbacks is a bitmask.  Some bits are
+common to all callbacks, and some are particular to that callback and
+event type.  The value zero (UTRACE_ACTION_RESUME) always means the
+simplest thing: do what would have happened with no tracing engine here.
+These are the flags that can be set in any report_* return value:
+
+	UTRACE_ACTION_NEWSTATE
+
+		Update the action state flags, described above.  Those
+		bits from the return value (UTRACE_ACTION_STATE_MASK)
+		replace those bits in the engine's flags.  This has the
+		same effect as calling utrace_set_flags, but is a more
+		efficient short-cut.  To change the event flags, you must
+		call utrace_set_flags.
+
+	UTRACE_ACTION_DETACH
+
+		Detach this engine.  This has the effect of calling
+		utrace_detach, but is a more efficient short-cut.
+
+	UTRACE_ACTION_HIDE
+
+		Hide this event from other tracing engines.  This is
+		only appropriate to do when the event was induced by
+		some action of this engine, such as a breakpoint trap.
+		Some events cannot be hidden, since every engine has to
+		know about them: exit, death, reap.
+
+The return value bits in UTRACE_ACTION_OP_MASK indicate a change to the
+normal behavior of the event taking place.  If zero, the thread does
+whatever that event normally means.  For report_signal, other values
+control the disposition of the signal.
+
+
+	Quiescence
+	----------
+
+To control another thread and access its state, it must be "quiescent".
+This means that it is stopped and won't start running again while we access
+it.  A quiescent thread is stopped in a place close to user mode, where the
+user state can be accessed safely; either it's about to return to user
+mode, or it's just entered the kernel from user mode, or it has already
+finished exiting (EXIT_ZOMBIE).  Setting the UTRACE_ACTION_QUIESCE action
+flag will force the attached thread to become quiescent soon.  After
+setting the flag, an engine must wait for an event callback when the thread
+becomes quiescent.  The thread may be running on another CPU, or may be in
+an uninterruptible wait.  When it is ready to be examined, it will make
+callbacks to engines that set the UTRACE_EVENT(QUIESCE) event flag.
+
+As long as some engine has UTRACE_ACTION_QUIESCE set, then the thread will
+remain stopped.  SIGKILL will wake it up, but it will not run user code.
+When the flag is cleared via utrace_set_flags or a callback return value,
+the thread starts running again.  (See also "Teardown Races", below.)
+
+During the event callbacks (report_*), the thread in question makes the
+callback from a safe place.  It is not quiescent, but it can safely access
+its own state.  Callbacks can access thread state directly without setting
+the QUIESCE action flag.  If a callback does want to prevent the thread
+from resuming normal execution, it *must* use the QUIESCE action state
+rather than simply blocking; see "Core Events & Callbacks", below.
+
+
+	Thread control
+	------ -------
+
+These calls must be made on a quiescent thread (or the current thread):
+
+	int utrace_inject_signal(struct task_struct *target,
+				 struct utrace_attached_engine *engine,
+				 u32 action, siginfo_t *info,
+				 const struct k_sigaction *ka);
+
+Cause a specified signal delivery in the target thread.  This is not
+like kill, which generates a signal to be dequeued and delivered later.
+Injection directs the thread to deliver a signal now, before it next
+resumes in user mode or dequeues any other pending signal.  It's as if
+the tracing engine intercepted a signal event and its report_signal
+callback returned the action argument as its value (see below).  The
+info and ka arguments serve the same purposes as their counterparts in
+a report_signal callback.
+
+	const struct utrace_regset *
+	utrace_regset(struct task_struct *target,
+		      struct utrace_attached_engine *engine,
+		      const struct utrace_regset_view *view,
+		      int which);
+
+Get access to machine state for the thread.  The struct utrace_regset_view
+indicates a view of machine state, corresponding to a user mode
+architecture personality (such as 32-bit or 64-bit versions of a machine).
+The which argument selects one of the register sets available in that view.
+The utrace_regset call must be made before accessing any machine state,
+each time the thread has been running and has then become quiescent.
+It ensures that the thread's state is ready to be accessed, and returns
+the struct utrace_regset giving its accessor functions.
+
+XXX needs front ends for argument checks, export utrace_native_view
+
+
+	Core Events & Callbacks
+	---- ------ - ---------
+
+Event reporting callbacks have details particular to the event type, but
+are all called in similar environments and have the same constraints.
+Callbacks are made from safe spots, where no locks are held, no special
+resources are pinned, and the user-mode state of the thread is accessible.
+So, callback code has a pretty free hand.  But to be a good citizen,
+callback code should never block for long periods.  It is fine to block in
+kmalloc and the like, but never wait for i/o or for user mode to do
+something.  If you need the thread to wait, set UTRACE_ACTION_QUIESCE and
+return from the callback quickly.  When your i/o finishes or whatever, you
+can use utrace_set_flags to resume the thread.
+
+Well-behaved callbacks are important to maintain two essential properties
+of the interface.  The first of these is that unrelated tracing engines not
+interfere with each other.  If your engine's event callback does not return
+quickly, then another engine won't get the event notification in a timely
+manner.  The second important property is that tracing be as noninvasive as
+possible to the normal operation of the system overall and of the traced
+thread in particular.  That is, attached tracing engines should not perturb
+a thread's behavior, except to the extent that changing its user-visible
+state is explicitly what you want to do.  (Obviously some perturbation is
+unavoidable, primarily timing changes, ranging from small delays due to the
+overhead of tracing, to arbitrary pauses in user code execution when a user
+stops a thread with a debugger for examination.  When doing asynchronous
+utrace_attach to a thread doing a system call, more troublesome side
+effects are possible.)  Even when you explicitly want the pertrubation of
+making the traced thread block, just blocking directly in your callback has
+more unwanted effects.  For example, the CLONE event callbacks are called
+when the new child thread has been created but not yet started running; the
+child can never be scheduled until the CLONE tracing callbacks return.
+(This allows engines tracing the parent to attach to the child.)  If a
+CLONE event callback blocks the parent thread, it also prevents the child
+thread from running (even to process a SIGKILL).  If what you want is to
+make both the parent and child block, then use utrace_attach on the child
+and then set the QUIESCE action state flag on both threads.  A more crucial
+problem with blocking in callbacks is that it can prevent SIGKILL from
+working.  A thread that is blocking due to UTRACE_ACTION_QUIESCE will still
+wake up and die immediately when sent a SIGKILL, as all threads should.
+Relying on the utrace infrastructure rather than on private synchronization
+calls in event callbacks is an important way to help keep tracing robustly
+noninvasive.
+
+
+EVENT(REAP)		Dead thread has been reaped
+Callback:
+	void (*report_reap)(struct utrace_attached_engine *engine,
+			    struct task_struct *tsk);
+
+This means the parent called wait, or else this was a detached thread or
+a process whose parent ignores SIGCHLD.  This cannot happen while the
+UTRACE_ACTION_NOREAP flag is set.  This is the only callback you are
+guaranteed to get (if you set the flag; but see "Teardown Races", below).
+
+Unlike other callbacks, this can be called from the parent's context
+rather than from the traced thread itself--it must not delay the parent by
+blocking.  This callback is different from all others, it returns void.
+Once you get this callback, your engine is automatically detached and you
+cannot access this thread or use this struct utrace_attached_engine handle
+any longer.  This is the place to clean up your data structures and
+synchronize with your code that might try to make utrace_* calls using this
+engine data structure.  The struct is still valid during this callback,
+but will be freed soon after it returns (via RCU).
+
+In all other callbacks, the return value is as described above.
+The common UTRACE_ACTION_* flags in the return value are always observed.
+Unless otherwise specified below, other bits in the return value are ignored.
+
+
+EVENT(QUIESCE)		Thread is quiescent
+Callback:
+	u32 (*report_quiesce)(struct utrace_attached_engine *engine,
+			      struct task_struct *tsk);
+
+This is the least interesting callback.  It happens at any safe spot,
+including after any other event callback.  This lets the tracing engine
+know that it is safe to access the thread's state, or to report to users
+that it has stopped running user code.
+
+EVENT(CLONE)		Thread is creating a child
+Callback:
+	u32 (*report_clone)(struct utrace_attached_engine *engine,
+			    struct task_struct *parent,
+			    unsigned long clone_flags,
+			    struct task_struct *child);
+
+A clone/clone2/fork/vfork system call has succeeded in creating a new
+thread or child process.  The new process is fully formed, but not yet
+running.  During this callback, other tracing engines are prevented from
+using utrace_attach asynchronously on the child, so that engines tracing
+the parent get the first opportunity to attach.  After this callback
+returns, the child will start and the parent's system call will return.
+If CLONE_VFORK is set, the parent will block before returning.
+
+EVENT(VFORK_DONE)	Finished waiting for CLONE_VFORK child
+Callback:
+	u32 (*report_vfork_done)(struct utrace_attached_engine *engine,
+				 struct task_struct *parent, pid_t child_pid);
+
+Event reported for parent using CLONE_VFORK or vfork system call.
+The child has died or exec'd, so the vfork parent has unblocked
+and is about to return child_pid.
+
+UTRACE_EVENT(EXEC)		Completed exec
+Callback:
+	u32 (*report_exec)(struct utrace_attached_engine *engine,
+			   struct task_struct *tsk,
+			   const struct linux_binprm *bprm,
+			   struct pt_regs *regs);
+
+An execve system call has succeeded and the new program is about to
+start running.  The initial user register state is handy to be tweaked
+directly, or utrace_regset can be used for full machine state access.
+
+UTRACE_EVENT(EXIT)		Thread is exiting
+Callback:
+	u32 (*report_exit)(struct utrace_attached_engine *engine,
+			   struct task_struct *tsk,
+			   long orig_code, long *code);
+
+The thread is exiting and cannot be prevented from doing so, but all its
+state is still live.  The *code value will be the wait result seen by
+the parent, and can be changed by this engine or others.  The orig_code
+value is the real status, not changed by any tracing engine.
+
+UTRACE_EVENT(DEATH)		Thread has finished exiting
+Callback:
+	u32 (*report_death)(struct utrace_attached_engine *engine,
+			    struct task_struct *tsk);
+
+The thread is really dead now.  If the UTRACE_ACTION_NOREAP flag remains
+set after this callback, it remains an unreported zombie; If the flag was
+not set already, then it is too late to set it now--its parent has already
+been sent SIGCHLD.  Otherwise, it might be reaped by its parent, or
+self-reap immediately.  Though the actual reaping may happen in parallel, a
+report_reap callback will always be ordered after a report_death callback.
+
+UTRACE_EVENT(SYSCALL_ENTRY)	Thread has entered kernel for a system call
+Callback:
+	u32 (*report_syscall_entry)(struct utrace_attached_engine *engine,
+				    struct task_struct *tsk,
+				    struct pt_regs *regs);
+
+The system call number and arguments can be seen and modified in the
+registers.  The return value register has -ENOSYS, which will be
+returned for an invalid system call.  The macro tracehook_abort_syscall(regs)
+will abort the system call so that we go immediately to syscall exit,
+and return -ENOSYS (or whatever the register state is changed to).  If
+tracing enginges keep the thread quiescent here, the system call will
+not be performed until it resumes.
+
+UTRACE_EVENT(SYSCALL_EXIT)	Thread is leaving kernel after a system call
+Callback:
+	u32 (*report_syscall_exit)(struct utrace_attached_engine *engine,
+				   struct task_struct *tsk,
+				   struct pt_regs *regs);
+
+The return value can be seen and modified in the registers.  If the
+thread is allowed to resume, it will see any pending signals and then
+return to user mode.
+
+UTRACE_EVENT(SIGNAL)		Signal caught by user handler
+UTRACE_EVENT(SIGNAL_IGN)		Signal with no effect (SIG_IGN or default)
+UTRACE_EVENT(SIGNAL_STOP)	Job control stop signal
+UTRACE_EVENT(SIGNAL_TERM)	Fatal termination signal
+UTRACE_EVENT(SIGNAL_CORE)	Fatal core-dump signal
+UTRACE_EVENT_SIGNAL_ALL		All of the above (bitmask)
+Callback:
+	u32 (*report_signal)(struct utrace_attached_engine *engine,
+			     struct task_struct *tsk,
+			     u32 action, siginfo_t *info,
+			     const struct k_sigaction *orig_ka,
+			     struct k_sigaction *return_ka);
+
+There are five types of signal events, but all use the same callback.
+These happen when a thread is dequeuing a signal to be delivered.
+(Not immediately when the signal is sent, and not when the signal is
+blocked.)  No signal event is reported for SIGKILL; no tracing engine
+can prevent it from killing the thread immediately.  The specific
+event types allow an engine to trace signals based on what they do.
+UTRACE_EVENT_SIGNAL_ALL is all of them OR'd together, to trace all
+signals (except SIGKILL).  A subset of these event flags can be used
+e.g. to catch only fatal signals, not handled ones, or to catch only
+core-dump signals, not normal termination signals.
+
+The action argument says what the signal's default disposition is:
+
+	UTRACE_SIGNAL_DELIVER	Run the user handler from sigaction.
+	UTRACE_SIGNAL_IGN	Do nothing, ignore the signal.
+	UTRACE_SIGNAL_TERM	Terminate the process.
+	UTRACE_SIGNAL_CORE	Terminate the process a write a core dump.
+	UTRACE_SIGNAL_STOP	Absolutely stop the process, a la SIGSTOP.
+	UTRACE_SIGNAL_TSTP	Job control stop (no stop if orphaned).
+
+This selection is made from consulting the process's sigaction and the
+default action for the signal number, but may already have been changed by
+an earlier tracing engine (in which case you see its override).  A return
+value of UTRACE_ACTION_RESUME means to carry out this action.  If instead
+UTRACE_SIGNAL_* bits are in the return value, that overrides the normal
+behavior of the signal.
+
+The signal number and other details of the signal are in info, and
+this data can be changed to make the thread see a different signal.
+A return value of UTRACE_SIGNAL_DELIVER says to follow the sigaction in
+return_ka, which can specify a user handler or SIG_IGN to ignore the
+signal or SIG_DFL to follow the default action for info->si_signo.
+The orig_ka parameter shows the process's sigaction at the time the
+signal was dequeued, and return_ka initially contains this.  Tracing
+engines can modify return_ka to change the effects of delivery.
+For other UTRACE_SIGNAL_* return values, return_ka is ignored.
+
+UTRACE_SIGNAL_HOLD is a flag bit that can be OR'd into the return
+value.  It says to push the signal back on the thread's queue, with
+the signal number and details possibly changed in info.  When the
+thread is allowed to resume, it will dequeue and report it again.
+
+
+	Teardown Races
+	-------- -----
+
+Ordinarily synchronization issues for tracing engines are kept fairly
+straightforward by using quiescence (see above): you make a thread
+quiescent and then once it makes the report_quiesce callback it cannot
+do anything else that would result in another callback, until you let
+it.  This simple arrangement avoids complex and error-prone code in
+each one of a tracing engine's event callbacks to keep them serialized
+with the engine's other operations done on that thread from another
+thread of control.  However, giving tracing engines complete power to
+keep a traced thread stuck in place runs afoul of a more important
+kind of simplicity that the kernel overall guarantees: nothing can
+prevent or delay SIGKILL from making a thread die and release its
+resources.  To preserve this important property of SIGKILL, it as a
+special case can break quiescence like nothing else normally can.
+This includes both explicit SIGKILL signals and the implicit SIGKILL
+sent to each other thread in the same thread group by a thread doing
+an exec, or processing a fatal signal, or making an exit_group system
+call.  A tracing engine can prevent a thread from beginning the exit
+or exec or dying by signal (other than SIGKILL) if it is attached to
+that thread, but once the operation begins, no tracing engine can
+prevent or delay all other threads in the same thread group dying.
+
+As described above, the report_reap callback is always the final event
+in the life cycle of a traced thread.  Tracing engines can use this as
+the trigger to clean up their own data structures.  The report_death
+callback is always the penultimate event a tracing engine might see,
+except when the thread was already in the midst of dying when the
+engine attached.  Many tracing engines will have no interest in when a
+parent reaps a dead process, and nothing they want to do with a zombie
+thread once it dies; for them, the report_death callback is the
+natural place to clean up data structures and detach.  To facilitate
+writing such engines robustly, given the asynchrony of SIGKILL, and
+without error-prone manual implementation of synchronization schemes,
+the utrace infrastructure provides some special guarantees about the
+report_death and report_reap callbacks.  It still takes some care to
+be sure your tracing engine is robust to teardown races, but these
+rules make it reasonably straightforward and concise to handle a lot
+of corner cases correctly.
+
+The first sort of guarantee concerns the core data structures
+themselves.  struct utrace_attached_engine is allocated using RCU, as
+is task_struct.  If you call utrace_attach under rcu_read_lock, then
+the pointer it returns will always be valid while in the RCU critical
+section.  (Note that utrace_attach can block doing memory allocation,
+so you must consider the real critical section to start when
+utrace_attach returns.  utrace_attach can never block when not given
+the UTRACE_ATTACH_CREATE flag bit).  Conversely, you can call
+utrace_attach outside of rcu_read_lock and though the pointer can
+become stale asynchronously if the thread dies and is reaped, you can
+safely pass it to a subsequent utrace_set_flags or utrace_detach call
+and will just get an -ESRCH error return.  However, you must be sure
+the task_struct remains valid, either via get_task_struct or via RCU.
+The utrace infrastructure never holds task_struct references of its
+own.  Though neither rcu_read_lock nor any other lock is held while
+making a callback, it's always guaranteed that the task_struct and
+the struct utrace_attached_engine passed as arguments remain valid
+until the callback function returns.
+
+The second guarantee is the serialization of death and reap event
+callbacks for a given thread.  The actual reaping by the parent
+(release_task call) can occur simultaneously while the thread is
+still doing the final steps of dying, including the report_death
+callback.  If a tracing engine has requested both DEATH and REAP
+event reports, it's guaranteed that the report_reap callback will not
+be made until after the report_death callback has returned.  If the
+report_death callback itself detaches from the thread (with
+utrace_detach or with UTRACE_ACTION_DETACH in its return value), then
+the report_reap callback will never be made.  Thus it is safe for a
+report_death callback to clean up data structures and detach.
+
+The final sort of guarantee is that a tracing engine will know for
+sure whether or not the report_death and/or report_reap callbacks
+will be made for a certain thread.  These teardown races are
+disambiguated by the error return values of utrace_set_flags and
+utrace_detach.  Normally utrace_detach returns zero, and this means
+that no more callbacks will be made.  If the thread is in the midst
+of dying, utrace_detach returns -EALREADY to indicate that the
+report_death callback may already be in progress; when you get this
+error, you know that any cleanup your report_death callback does is
+about to happen or has just happened--note that if the report_death
+callback does not detach, the engine remains attached until the
+thread gets reaped.  If the thread is in the midst of being reaped,
+utrace_detach returns -ESRCH to indicate that the report_reap
+callback may already be in progress; this means the engine is
+implicitly detached when the callback completes.  This makes it
+possible for a tracing engine that has decided asynchronously to
+detach from a thread to safely clean up its data structures, knowing
+that no report_death or report_reap callback will try to do the
+same.  utrace_detach returns -ESRCH when the struct
+utrace_attached_engine has already been detached, but is still a
+valid pointer because of rcu_read_lock.  If RCU is used properly, a
+tracing engine can use this to safely synchronize its own
+independent multiple threads of control with each other and with its
+event callbacks that detach.
+
+In the same vein, utrace_set_flags normally returns zero; if the
+target thread was quiescent before the call, then after a successful
+call, no event callbacks not requested in the new flags will be made,
+and a report_quiesce callback will always be made if requested.  It
+fails with -EALREADY if you try to clear UTRACE_EVENT(DEATH) when the
+report_death callback may already have begun, if you try to clear
+UTRACE_EVENT(REAP) when the report_reap callback may already have
+begun, if you try to newly set UTRACE_ACTION_NOREAP when the target
+may already have sent its parent SIGCHLD, or if you try to newly set
+UTRACE_EVENT(DEATH), UTRACE_EVENT(QUIESCE), or UTRACE_ACTION_QUIESCE,
+when the target is already dead or dying.  Like utrace_detach, it
+returns -ESRCH when the thread has already been detached (including
+forcible detach on reaping).  This lets the tracing engine know for
+sure which event callbacks it will or won't see after utrace_set_flags
+has returned.  By checking for errors, it can know whether to clean up
+its data structures immediately or to let its callbacks do the work.
--- linux-2.6/security/selinux/include/objsec.h.utrace-ptrace-compat
+++ linux-2.6/security/selinux/include/objsec.h
@@ -34,7 +34,6 @@ struct task_security_struct {
 	u32 create_sid;      /* fscreate SID */
 	u32 keycreate_sid;   /* keycreate SID */
 	u32 sockcreate_sid;  /* fscreate SID */
-	u32 ptrace_sid;      /* SID of ptrace parent */
 };
 
 struct inode_security_struct {
--- linux-2.6/security/selinux/hooks.c.utrace-ptrace-compat
+++ linux-2.6/security/selinux/hooks.c
@@ -21,7 +21,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/security.h>
@@ -159,7 +159,7 @@ static int task_alloc_security(struct ta
 		return -ENOMEM;
 
 	tsec->task = task;
-	tsec->osid = tsec->sid = tsec->ptrace_sid = SECINITSID_UNLABELED;
+	tsec->osid = tsec->sid = SECINITSID_UNLABELED;
 	task->security = tsec;
 
 	return 0;
@@ -1387,19 +1387,13 @@ static int inode_security_set_sid(struct
 
 static int selinux_ptrace(struct task_struct *parent, struct task_struct *child)
 {
-	struct task_security_struct *psec = parent->security;
-	struct task_security_struct *csec = child->security;
 	int rc;
 
 	rc = secondary_ops->ptrace(parent,child);
 	if (rc)
 		return rc;
 
-	rc = task_has_perm(parent, child, PROCESS__PTRACE);
-	/* Save the SID of the tracing process for later use in apply_creds. */
-	if (!(child->ptrace & PT_PTRACED) && !rc)
-		csec->ptrace_sid = psec->sid;
-	return rc;
+	return task_has_perm(parent, child, PROCESS__PTRACE);
 }
 
 static int selinux_capget(struct task_struct *target, kernel_cap_t *effective,
@@ -1821,12 +1815,24 @@ static void selinux_bprm_apply_creds(str
 		/* Check for ptracing, and update the task SID if ok.
 		   Otherwise, leave SID unchanged and kill. */
 		if (unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
-			rc = avc_has_perm(tsec->ptrace_sid, sid,
-					  SECCLASS_PROCESS, PROCESS__PTRACE,
-					  NULL);
-			if (rc) {
-				bsec->unsafe = 1;
-				return;
+			struct task_struct *t;
+
+			rcu_read_lock();
+			t = tracehook_tracer_task(current);
+			if (unlikely(t == NULL))
+				rcu_read_unlock();
+			else {
+				struct task_security_struct *sec = t->security;
+				u32 ptsid = sec->sid;
+				rcu_read_unlock();
+
+				rc = avc_has_perm(ptsid, sid,
+						  SECCLASS_PROCESS,
+						  PROCESS__PTRACE, NULL);
+				if (rc) {
+					bsec->unsafe = 1;
+					return;
+				}
 			}
 		}
 		tsec->sid = sid;
@@ -2684,11 +2690,6 @@ static int selinux_task_alloc_security(s
 	tsec2->keycreate_sid = tsec1->keycreate_sid;
 	tsec2->sockcreate_sid = tsec1->sockcreate_sid;
 
-	/* Retain ptracer SID across fork, if any.
-	   This will be reset by the ptrace hook upon any
-	   subsequent ptrace_attach operations. */
-	tsec2->ptrace_sid = tsec1->ptrace_sid;
-
 	return 0;
 }
 
@@ -4293,6 +4294,7 @@ static int selinux_setprocattr(struct ta
 			       char *name, void *value, size_t size)
 {
 	struct task_security_struct *tsec;
+	struct task_struct *tracer;
 	u32 sid = 0;
 	int error;
 	char *str = value;
@@ -4381,18 +4383,24 @@ static int selinux_setprocattr(struct ta
 		/* Check for ptracing, and update the task SID if ok.
 		   Otherwise, leave SID unchanged and fail. */
 		task_lock(p);
-		if (p->ptrace & PT_PTRACED) {
-			error = avc_has_perm_noaudit(tsec->ptrace_sid, sid,
+		rcu_read_lock();
+		tracer = tracehook_tracer_task(p);
+		if (tracer != NULL) {
+			struct task_security_struct *ptsec = tracer->security;
+			u32 ptsid = ptsec->sid;
+			rcu_read_unlock();
+			error = avc_has_perm_noaudit(ptsid, sid,
 						     SECCLASS_PROCESS,
 						     PROCESS__PTRACE, &avd);
 			if (!error)
 				tsec->sid = sid;
 			task_unlock(p);
-			avc_audit(tsec->ptrace_sid, sid, SECCLASS_PROCESS,
+			avc_audit(ptsid, sid, SECCLASS_PROCESS,
 				  PROCESS__PTRACE, &avd, error, NULL);
 			if (error)
 				return error;
 		} else {
+			rcu_read_unlock();
 			tsec->sid = sid;
 			task_unlock(p);
 		}
--- linux-2.6/kernel/fork.c.utrace-ptrace-compat
+++ linux-2.6/kernel/fork.c
@@ -36,7 +36,7 @@
 #include <linux/jiffies.h>
 #include <linux/futex.h>
 #include <linux/rcupdate.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/mount.h>
 #include <linux/audit.h>
 #include <linux/profile.h>
@@ -908,8 +908,7 @@ static inline void copy_flags(unsigned l
 
 	new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
 	new_flags |= PF_FORKNOEXEC;
-	if (!(clone_flags & CLONE_PTRACE))
-		p->ptrace = 0;
+	new_flags |= PF_STARTING;
 	p->flags = new_flags;
 }
 
@@ -1018,6 +1017,9 @@ static struct task_struct *copy_process(
 	INIT_LIST_HEAD(&p->sibling);
 	p->vfork_done = NULL;
 	spin_lock_init(&p->alloc_lock);
+#ifdef CONFIG_PTRACE
+	INIT_LIST_HEAD(&p->ptracees);
+#endif
 
 	clear_tsk_thread_flag(p, TIF_SIGPENDING);
 	init_sigpending(&p->pending);
@@ -1153,8 +1155,6 @@ static struct task_struct *copy_process(
 	 */
 	p->group_leader = p;
 	INIT_LIST_HEAD(&p->thread_group);
-	INIT_LIST_HEAD(&p->ptrace_children);
-	INIT_LIST_HEAD(&p->ptrace_list);
 
 	/* Perform scheduler related setup. Assign this task to a CPU. */
 	sched_fork(p, clone_flags);
@@ -1178,10 +1178,9 @@ static struct task_struct *copy_process(
 
 	/* CLONE_PARENT re-uses the old parent */
 	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
-		p->real_parent = current->real_parent;
+		p->parent = current->parent;
 	else
-		p->real_parent = current;
-	p->parent = p->real_parent;
+		p->parent = current;
 
 	spin_lock(&current->sighand->siglock);
 
@@ -1228,8 +1227,7 @@ static struct task_struct *copy_process(
 
 	if (likely(p->pid)) {
 		add_parent(p);
-		if (unlikely(p->ptrace & PT_PTRACED))
-			__ptrace_link(p, current->parent);
+		tracehook_init_task(p);
 
 		if (thread_group_leader(p)) {
 			p->signal->tty = current->signal->tty;
@@ -1313,22 +1311,6 @@ struct task_struct * __devinit fork_idle
 	return task;
 }
 
-static inline int fork_traceflag (unsigned clone_flags)
-{
-	if (clone_flags & CLONE_UNTRACED)
-		return 0;
-	else if (clone_flags & CLONE_VFORK) {
-		if (current->ptrace & PT_TRACE_VFORK)
-			return PTRACE_EVENT_VFORK;
-	} else if ((clone_flags & CSIGNAL) != SIGCHLD) {
-		if (current->ptrace & PT_TRACE_CLONE)
-			return PTRACE_EVENT_CLONE;
-	} else if (current->ptrace & PT_TRACE_FORK)
-		return PTRACE_EVENT_FORK;
-
-	return 0;
-}
-
 /*
  *  Ok, this is the main fork-routine.
  *
@@ -1343,18 +1325,12 @@ long do_fork(unsigned long clone_flags,
 	      int __user *child_tidptr)
 {
 	struct task_struct *p;
-	int trace = 0;
 	struct pid *pid = alloc_pid();
 	long nr;
 
 	if (!pid)
 		return -EAGAIN;
 	nr = pid->nr;
-	if (unlikely(current->ptrace)) {
-		trace = fork_traceflag (clone_flags);
-		if (trace)
-			clone_flags |= CLONE_PTRACE;
-	}
 
 	p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
 	/*
@@ -1369,30 +1345,26 @@ long do_fork(unsigned long clone_flags,
 			init_completion(&vfork);
 		}
 
-		if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
+		tracehook_report_clone(clone_flags, p);
+
+		p->flags &= ~PF_STARTING;
+
+		if (clone_flags & CLONE_STOPPED) {
 			/*
 			 * We'll start up with an immediate SIGSTOP.
 			 */
 			sigaddset(&p->pending.signal, SIGSTOP);
 			set_tsk_thread_flag(p, TIF_SIGPENDING);
+			p->state = TASK_STOPPED;
 		}
-
-		if (!(clone_flags & CLONE_STOPPED))
-			wake_up_new_task(p, clone_flags);
 		else
-			p->state = TASK_STOPPED;
+			wake_up_new_task(p, clone_flags);
 
-		if (unlikely (trace)) {
-			current->ptrace_message = nr;
-			ptrace_notify ((trace << 8) | SIGTRAP);
-		}
+		tracehook_report_clone_complete(clone_flags, nr, p);
 
 		if (clone_flags & CLONE_VFORK) {
 			wait_for_completion(&vfork);
-			if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
-				current->ptrace_message = nr;
-				ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
-			}
+			tracehook_report_vfork_done(p, nr);
 		}
 	} else {
 		free_pid(pid);
--- linux-2.6/kernel/signal.c.utrace-ptrace-compat
+++ linux-2.6/kernel/signal.c
@@ -20,7 +20,7 @@
 #include <linux/binfmts.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/signal.h>
 #include <linux/capability.h>
 #include <asm/param.h>
@@ -160,12 +160,6 @@ static int sig_ignored(struct task_struc
 	void __user * handler;
 
 	/*
-	 * Tracers always want to know about signals..
-	 */
-	if (t->ptrace & PT_PTRACED)
-		return 0;
-
-	/*
 	 * Blocked signals are never ignored, since the
 	 * signal handler may change by the time it is
 	 * unblocked.
@@ -175,8 +169,12 @@ static int sig_ignored(struct task_struc
 
 	/* Is it explicitly or implicitly ignored? */
 	handler = t->sighand->action[sig-1].sa.sa_handler;
-	return   handler == SIG_IGN ||
-		(handler == SIG_DFL && sig_kernel_ignore(sig));
+	if (handler != SIG_IGN &&
+	    (handler != SIG_DFL || !sig_kernel_ignore(sig)))
+		return 0;
+
+	/* It's ignored, we can short-circuit unless a debugger wants it.  */
+	return !tracehook_consider_ignored_signal(t, sig, handler);
 }
 
 /*
@@ -216,7 +214,8 @@ fastcall void recalc_sigpending_tsk(stru
 	if (t->signal->group_stop_count > 0 ||
 	    (freezing(t)) ||
 	    PENDING(&t->pending, &t->blocked) ||
-	    PENDING(&t->signal->shared_pending, &t->blocked))
+	    PENDING(&t->signal->shared_pending, &t->blocked) ||
+	    tracehook_induce_sigpending(t))
 		set_tsk_thread_flag(t, TIF_SIGPENDING);
 	else
 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
@@ -589,8 +588,6 @@ static int check_kill_permission(int sig
 	return error;
 }
 
-/* forward decl */
-static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
 
 /*
  * Handle magic process-wide effects of stop/continue signals.
@@ -896,7 +893,7 @@ __group_complete_signal(int sig, struct 
 	 */
 	if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
 	    !sigismember(&t->real_blocked, sig) &&
-	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
+	    (sig == SIGKILL || !tracehook_consider_fatal_signal(t, sig))) {
 		/*
 		 * This signal will be fatal to the whole group.
 		 */
@@ -1438,8 +1435,7 @@ void do_notify_parent(struct task_struct
  	/* do_notify_parent_cldstop should have been called instead.  */
  	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
 
-	BUG_ON(!tsk->ptrace &&
-	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
+	BUG_ON(tsk->group_leader != tsk || !thread_group_empty(tsk));
 
 	info.si_signo = sig;
 	info.si_errno = 0;
@@ -1464,7 +1460,7 @@ void do_notify_parent(struct task_struct
 
 	psig = tsk->parent->sighand;
 	spin_lock_irqsave(&psig->siglock, flags);
-	if (!tsk->ptrace && sig == SIGCHLD &&
+	if (sig == SIGCHLD &&
 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
 		/*
@@ -1492,20 +1488,13 @@ void do_notify_parent(struct task_struct
 	spin_unlock_irqrestore(&psig->siglock, flags);
 }
 
-static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
+void do_notify_parent_cldstop(struct task_struct *tsk, int why)
 {
 	struct siginfo info;
 	unsigned long flags;
 	struct task_struct *parent;
 	struct sighand_struct *sighand;
 
-	if (tsk->ptrace & PT_PTRACED)
-		parent = tsk->parent;
-	else {
-		tsk = tsk->group_leader;
-		parent = tsk->real_parent;
-	}
-
 	info.si_signo = SIGCHLD;
 	info.si_errno = 0;
 	info.si_pid = tsk->pid;
@@ -1530,6 +1519,15 @@ static void do_notify_parent_cldstop(str
  		BUG();
  	}
 
+	/*
+	 * Tracing can decide that we should not do the normal notification.
+	 */
+	if (tracehook_notify_cldstop(tsk, &info))
+		return;
+
+	tsk = tsk->group_leader;
+	parent = tsk->parent;
+
 	sighand = parent->sighand;
 	spin_lock_irqsave(&sighand->siglock, flags);
 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
@@ -1542,110 +1540,6 @@ static void do_notify_parent_cldstop(str
 	spin_unlock_irqrestore(&sighand->siglock, flags);
 }
 
-static inline int may_ptrace_stop(void)
-{
-	if (!likely(current->ptrace & PT_PTRACED))
-		return 0;
-
-	if (unlikely(current->parent == current->real_parent &&
-		    (current->ptrace & PT_ATTACHED)))
-		return 0;
-
-	if (unlikely(current->signal == current->parent->signal) &&
-	    unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
-		return 0;
-
-	/*
-	 * Are we in the middle of do_coredump?
-	 * If so and our tracer is also part of the coredump stopping
-	 * is a deadlock situation, and pointless because our tracer
-	 * is dead so don't allow us to stop.
-	 * If SIGKILL was already sent before the caller unlocked
-	 * ->siglock we must see ->core_waiters != 0. Otherwise it
-	 * is safe to enter schedule().
-	 */
-	if (unlikely(current->mm->core_waiters) &&
-	    unlikely(current->mm == current->parent->mm))
-		return 0;
-
-	return 1;
-}
-
-/*
- * This must be called with current->sighand->siglock held.
- *
- * This should be the path for all ptrace stops.
- * We always set current->last_siginfo while stopped here.
- * That makes it a way to test a stopped process for
- * being ptrace-stopped vs being job-control-stopped.
- *
- * If we actually decide not to stop at all because the tracer is gone,
- * we leave nostop_code in current->exit_code.
- */
-static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
-{
-	/*
-	 * If there is a group stop in progress,
-	 * we must participate in the bookkeeping.
-	 */
-	if (current->signal->group_stop_count > 0)
-		--current->signal->group_stop_count;
-
-	current->last_siginfo = info;
-	current->exit_code = exit_code;
-
-	/* Let the debugger run.  */
-	set_current_state(TASK_TRACED);
-	spin_unlock_irq(&current->sighand->siglock);
-	try_to_freeze();
-	read_lock(&tasklist_lock);
-	if (may_ptrace_stop()) {
-		do_notify_parent_cldstop(current, CLD_TRAPPED);
-		read_unlock(&tasklist_lock);
-		schedule();
-	} else {
-		/*
-		 * By the time we got the lock, our tracer went away.
-		 * Don't stop here.
-		 */
-		read_unlock(&tasklist_lock);
-		set_current_state(TASK_RUNNING);
-		current->exit_code = nostop_code;
-	}
-
-	/*
-	 * We are back.  Now reacquire the siglock before touching
-	 * last_siginfo, so that we are sure to have synchronized with
-	 * any signal-sending on another CPU that wants to examine it.
-	 */
-	spin_lock_irq(&current->sighand->siglock);
-	current->last_siginfo = NULL;
-
-	/*
-	 * Queued signals ignored us while we were stopped for tracing.
-	 * So check for any that we should take before resuming user mode.
-	 */
-	recalc_sigpending();
-}
-
-void ptrace_notify(int exit_code)
-{
-	siginfo_t info;
-
-	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
-
-	memset(&info, 0, sizeof info);
-	info.si_signo = SIGTRAP;
-	info.si_code = exit_code;
-	info.si_pid = current->pid;
-	info.si_uid = current->uid;
-
-	/* Let the debugger run.  */
-	spin_lock_irq(&current->sighand->siglock);
-	ptrace_stop(exit_code, 0, &info);
-	spin_unlock_irq(&current->sighand->siglock);
-}
-
 static void
 finish_stop(int stop_count)
 {
@@ -1654,7 +1548,7 @@ finish_stop(int stop_count)
 	 * a group stop in progress and we are the last to stop,
 	 * report to the parent.  When ptraced, every thread reports itself.
 	 */
-	if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
+	if (!tracehook_finish_stop(stop_count <= 0) && stop_count <= 0) {
 		read_lock(&tasklist_lock);
 		do_notify_parent_cldstop(current, CLD_STOPPED);
 		read_unlock(&tasklist_lock);
@@ -1779,44 +1673,24 @@ relock:
 		    handle_group_stop())
 			goto relock;
 
-		signr = dequeue_signal(current, mask, info);
-
-		if (!signr)
-			break; /* will return 0 */
-
-		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
-			ptrace_signal_deliver(regs, cookie);
-
-			/* Let the debugger run.  */
-			ptrace_stop(signr, signr, info);
-
-			/* We're back.  Did the debugger cancel the sig?  */
-			signr = current->exit_code;
-			if (signr == 0)
-				continue;
-
-			current->exit_code = 0;
-
-			/* Update the siginfo structure if the signal has
-			   changed.  If the debugger wanted something
-			   specific in the siginfo structure then it should
-			   have updated *info via PTRACE_SETSIGINFO.  */
-			if (signr != info->si_signo) {
-				info->si_signo = signr;
-				info->si_errno = 0;
-				info->si_code = SI_USER;
-				info->si_pid = current->parent->pid;
-				info->si_uid = current->parent->uid;
-			}
-
-			/* If the (new) signal is now blocked, requeue it.  */
-			if (sigismember(&current->blocked, signr)) {
-				specific_send_sig_info(signr, info, current);
-				continue;
-			}
+		/*
+		 * Tracing can induce an artifical signal and choose sigaction.
+		 * The return value in signr determines the default action,
+		 * but info->si_signo is the signal number we will report.
+		 */
+		signr = tracehook_get_signal(current, regs, info, return_ka);
+		if (unlikely(signr < 0))
+			goto relock;
+		if (unlikely(signr != 0))
+			ka = return_ka;
+		else {
+			signr = dequeue_signal(current, mask, info);
+
+			if (!signr)
+				break; /* will return 0 */
+			ka = &current->sighand->action[signr-1];
 		}
 
-		ka = &current->sighand->action[signr-1];
 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
 			continue;
 		if (ka->sa.sa_handler != SIG_DFL) {
@@ -1861,7 +1735,7 @@ relock:
 				spin_lock_irq(&current->sighand->siglock);
 			}
 
-			if (likely(do_signal_stop(signr))) {
+			if (likely(do_signal_stop(info->si_signo))) {
 				/* It released the siglock.  */
 				goto relock;
 			}
@@ -1888,13 +1762,13 @@ relock:
 			 * first and our do_group_exit call below will use
 			 * that value and ignore the one we pass it.
 			 */
-			do_coredump((long)signr, signr, regs);
+			do_coredump(info->si_signo, info->si_signo, regs);
 		}
 
 		/*
 		 * Death signals, no core dump.
 		 */
-		do_group_exit(signr);
+		do_group_exit(info->si_signo);
 		/* NOTREACHED */
 	}
 	spin_unlock_irq(&current->sighand->siglock);
@@ -1907,7 +1781,6 @@ EXPORT_SYMBOL(flush_signals);
 EXPORT_SYMBOL(force_sig);
 EXPORT_SYMBOL(kill_pg);
 EXPORT_SYMBOL(kill_proc);
-EXPORT_SYMBOL(ptrace_notify);
 EXPORT_SYMBOL(send_sig);
 EXPORT_SYMBOL(send_sig_info);
 EXPORT_SYMBOL(sigprocmask);
--- linux-2.6/kernel/utrace.c.utrace-ptrace-compat
+++ linux-2.6/kernel/utrace.c
@@ -0,0 +1,1859 @@
+#include <linux/utrace.h>
+#include <linux/tracehook.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/tracehook.h>
+
+
+static struct kmem_cache *utrace_cachep;
+static struct kmem_cache *utrace_engine_cachep;
+
+static int __init
+utrace_init(void)
+{
+	utrace_cachep =
+		kmem_cache_create("utrace_cache",
+				  sizeof(struct utrace), 0,
+				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+	utrace_engine_cachep =
+		kmem_cache_create("utrace_engine_cache",
+				  sizeof(struct utrace_attached_engine), 0,
+				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+	return 0;
+}
+subsys_initcall(utrace_init);
+
+
+/*
+ * Make sure target->utrace is allocated, and return with it locked on
+ * success.  This function mediates startup races.  The creating parent
+ * task has priority, and other callers will delay here to let its call
+ * succeed and take the new utrace lock first.
+ */
+static struct utrace *
+utrace_first_engine(struct task_struct *target,
+		    struct utrace_attached_engine *engine)
+{
+	struct utrace *utrace, *ret;
+
+	/*
+	 * If this is a newborn thread and we are not the creator,
+	 * we have to wait for it.  The creator gets the first chance
+	 * to attach.  The PF_STARTING flag is cleared after its
+	 * report_clone hook has had a chance to run.
+	 */
+	if ((target->flags & PF_STARTING)
+	    && (current->utrace == NULL
+		|| current->utrace->u.live.cloning != target)) {
+		yield();
+		return (signal_pending(current)
+			? ERR_PTR(-ERESTARTNOINTR) : NULL);
+	}
+
+	utrace = kmem_cache_alloc(utrace_cachep, GFP_KERNEL);
+	if (unlikely(utrace == NULL))
+		return ERR_PTR(-ENOMEM);
+
+	utrace->u.live.cloning = NULL;
+	utrace->u.live.signal = NULL;
+	INIT_LIST_HEAD(&utrace->engines);
+	list_add(&engine->entry, &utrace->engines);
+	spin_lock_init(&utrace->lock);
+
+	ret = utrace;
+	utrace_lock(utrace);
+	task_lock(target);
+	if (likely(target->utrace == NULL)) {
+		rcu_assign_pointer(target->utrace, utrace);
+		/*
+		 * The task_lock protects us against another thread doing
+		 * the same thing.  We might still be racing against
+		 * tracehook_release_task.  It's called with ->exit_state
+		 * set to EXIT_DEAD and then checks ->utrace with an
+		 * smp_mb() in between.  If EXIT_DEAD is set, then
+		 * release_task might have checked ->utrace already and saw
+		 * it NULL; we can't attach.  If we see EXIT_DEAD not yet
+		 * set after our barrier, then we know release_task will
+		 * see our target->utrace pointer.
+		 */
+		smp_mb();
+		if (target->exit_state == EXIT_DEAD) {
+			/*
+			 * The target has already been through release_task.
+			 */
+			target->utrace = NULL;
+			goto cannot_attach;
+		}
+		task_unlock(target);
+	}
+	else {
+		/*
+		 * Another engine attached first, so there is a struct already.
+		 * A null return says to restart looking for the existing one.
+		 */
+	cannot_attach:
+		ret = NULL;
+		task_unlock(target);
+		utrace_unlock(utrace);
+		kmem_cache_free(utrace_cachep, utrace);
+	}
+
+	return ret;
+}
+
+static void
+utrace_free(struct rcu_head *rhead)
+{
+	struct utrace *utrace = container_of(rhead, struct utrace, u.dead);
+	kmem_cache_free(utrace_cachep, utrace);
+}
+
+/*
+ * Called with utrace locked.  Clean it up and free it via RCU.
+ */
+static void
+rcu_utrace_free(struct utrace *utrace)
+{
+	utrace_unlock(utrace);
+	INIT_RCU_HEAD(&utrace->u.dead);
+	call_rcu(&utrace->u.dead, utrace_free);
+}
+
+static void
+utrace_engine_free(struct rcu_head *rhead)
+{
+	struct utrace_attached_engine *engine =
+		container_of(rhead, struct utrace_attached_engine, rhead);
+	kmem_cache_free(utrace_engine_cachep, engine);
+}
+
+/*
+ * Remove the utrace pointer from the task, unless there is a pending
+ * forced signal (or it's quiescent in utrace_get_signal).
+ */
+static inline void
+utrace_clear_tsk(struct task_struct *tsk, struct utrace *utrace)
+{
+	if (utrace->u.live.signal == NULL) {
+		task_lock(tsk);
+		if (likely(tsk->utrace != NULL)) {
+			rcu_assign_pointer(tsk->utrace, NULL);
+			tsk->utrace_flags &= UTRACE_ACTION_NOREAP;
+		}
+		task_unlock(tsk);
+	}
+}
+
+/*
+ * Called with utrace locked and the target quiescent (maybe current).
+ * If this was the last engine and there is no parting forced signal
+ * pending, utrace is left locked and not freed, but is removed from the task.
+ */
+static void
+remove_engine(struct utrace_attached_engine *engine,
+	      struct task_struct *tsk, struct utrace *utrace)
+{
+	list_del_rcu(&engine->entry);
+	if (list_empty(&utrace->engines))
+		utrace_clear_tsk(tsk, utrace);
+	call_rcu(&engine->rhead, utrace_engine_free);
+}
+
+
+/*
+ * Called with utrace locked, after remove_engine may have run.
+ * Passed the flags from all remaining engines, i.e. zero if none
+ * left.  Install the flags in tsk->utrace_flags and return with
+ * utrace unlocked.  If no engines are left and there is no parting
+ * forced signal pending, utrace is freed.
+ */
+static void
+check_dead_utrace(struct task_struct *tsk, struct utrace *utrace,
+		  unsigned long flags)
+{
+	long exit_state = 0;
+
+	if (!tsk->exit_state && utrace->u.live.signal != NULL)
+		/*
+		 * There is a pending forced signal.  It may have been
+		 * left by an engine now detached.  The empty utrace
+		 * remains attached until it can be processed.
+		 */
+		flags |= UTRACE_ACTION_QUIESCE;
+
+	/*
+	 * If tracing was preventing a SIGCHLD or self-reaping
+	 * and is no longer, we'll do that report or reaping now.
+	 */
+	if (((tsk->utrace_flags &~ flags) & UTRACE_ACTION_NOREAP)
+	    && tsk->exit_state) {
+		BUG_ON(tsk->exit_state != EXIT_ZOMBIE);
+		/*
+		 * While holding the utrace lock, mark that it's been done.
+		 * For self-reaping, we need to change tsk->exit_state
+		 * before clearing tsk->utrace_flags, so that the real
+		 * parent can't see it in EXIT_ZOMBIE momentarily and reap it.
+		 */
+		if (tsk->exit_signal == -1) {
+			exit_state = xchg(&tsk->exit_state, EXIT_DEAD);
+			BUG_ON(exit_state != EXIT_ZOMBIE);
+			exit_state = EXIT_DEAD;
+
+			/*
+			 * Now that we've changed its state to DEAD,
+			 * it's safe to install the new tsk->utrace_flags
+			 * value without the UTRACE_ACTION_NOREAP bit set.
+			 */
+		}
+		else if (thread_group_empty(tsk)) {
+			/*
+			 * We need to prevent the real parent from reaping
+			 * until after we've called do_notify_parent, below.
+			 * It can get into wait_task_zombie any time after
+			 * the UTRACE_ACTION_NOREAP bit is cleared.  It's
+			 * safe for that to do everything it does until its
+			 * release_task call starts tearing things down.
+			 * Holding tasklist_lock for reading prevents
+			 * release_task from proceeding until we've done
+			 * everything we need to do.
+			 */
+			exit_state = EXIT_ZOMBIE;
+			read_lock(&tasklist_lock);
+		}
+	}
+
+	tsk->utrace_flags = flags;
+	if (flags)
+		utrace_unlock(utrace);
+	else {
+		rcu_utrace_free(utrace);
+		utrace = NULL;
+	}
+
+	/*
+	 * Now we're finished updating the utrace state.
+	 * Do a pending self-reaping or parent notification.
+	 */
+	if (exit_state == EXIT_DEAD)
+		/*
+		 * Note this can wind up in utrace_reap and do more callbacks.
+		 * Our callers must be in places where that is OK.
+		 */
+		release_task(tsk);
+	else if (exit_state == EXIT_ZOMBIE) {
+		do_notify_parent(tsk, tsk->exit_signal);
+		read_unlock(&tasklist_lock); /* See comment above.  */
+	}
+}
+
+
+
+/*
+ * Get the target thread to quiesce.  Return nonzero if it's already quiescent.
+ * Return zero if it will report a QUIESCE event soon.
+ * If interrupt is nonzero, wake it like a signal would so it quiesces ASAP.
+ * If interrupt is zero, just make sure it quiesces before going to user mode.
+ */
+static int
+quiesce(struct task_struct *target, int interrupt)
+{
+	int quiescent;
+
+	target->utrace_flags |= UTRACE_ACTION_QUIESCE;
+	read_barrier_depends();
+
+	quiescent = (target->exit_state
+		     || target->state & (TASK_TRACED | TASK_STOPPED));
+
+	if (!quiescent) {
+		spin_lock_irq(&target->sighand->siglock);
+		quiescent = (unlikely(target->exit_state)
+			     || unlikely(target->state
+					 & (TASK_TRACED | TASK_STOPPED)));
+		if (!quiescent) {
+			if (interrupt)
+				signal_wake_up(target, 0);
+			else {
+				set_tsk_thread_flag(target, TIF_SIGPENDING);
+				kick_process(target);
+			}
+		}
+		spin_unlock_irq(&target->sighand->siglock);
+	}
+
+	return quiescent;
+}
+
+
+static struct utrace_attached_engine *
+matching_engine(struct utrace *utrace, int flags,
+		const struct utrace_engine_ops *ops, unsigned long data)
+{
+	struct utrace_attached_engine *engine;
+	list_for_each_entry_rcu(engine, &utrace->engines, entry) {
+		if ((flags & UTRACE_ATTACH_MATCH_OPS)
+		    && engine->ops != ops)
+			continue;
+		if ((flags & UTRACE_ATTACH_MATCH_DATA)
+		    && engine->data != data)
+			continue;
+		return engine;
+	}
+	return ERR_PTR(-ENOENT);
+}
+
+/*
+  option to stop it?
+  option to match existing on ops, ops+data, return it; nocreate:lookup only
+ */
+struct utrace_attached_engine *
+utrace_attach(struct task_struct *target, int flags,
+	     const struct utrace_engine_ops *ops, unsigned long data)
+{
+	struct utrace *utrace;
+	struct utrace_attached_engine *engine;
+
+restart:
+	rcu_read_lock();
+	utrace = rcu_dereference(target->utrace);
+	smp_rmb();
+	if (utrace == NULL) {
+		rcu_read_unlock();
+
+		if (!(flags & UTRACE_ATTACH_CREATE)) {
+			return ERR_PTR(-ENOENT);
+		}
+
+		engine = kmem_cache_alloc(utrace_engine_cachep, GFP_KERNEL);
+		if (unlikely(engine == NULL))
+			return ERR_PTR(-ENOMEM);
+		engine->flags = 0;
+
+	first:
+		utrace = utrace_first_engine(target, engine);
+		if (IS_ERR(utrace)) {
+			kmem_cache_free(utrace_engine_cachep, engine);
+			return ERR_PTR(PTR_ERR(utrace));
+		}
+		if (unlikely(utrace == NULL)) /* Race condition.  */
+			goto restart;
+	}
+	else if (unlikely(target->exit_state == EXIT_DEAD)) {
+		/*
+		 * The target has already been reaped.
+		 */
+		rcu_read_unlock();
+		return ERR_PTR(-ESRCH);
+	}
+	else {
+		if (!(flags & UTRACE_ATTACH_CREATE)) {
+			engine = matching_engine(utrace, flags, ops, data);
+			rcu_read_unlock();
+			return engine;
+		}
+		rcu_read_unlock();
+
+		engine = kmem_cache_alloc(utrace_engine_cachep, GFP_KERNEL);
+		if (unlikely(engine == NULL))
+			return ERR_PTR(-ENOMEM);
+		engine->flags = 0;
+
+		rcu_read_lock();
+		utrace = rcu_dereference(target->utrace);
+		if (unlikely(utrace == NULL)) { /* Race with detach.  */
+			rcu_read_unlock();
+			goto first;
+		}
+		utrace_lock(utrace);
+
+		if (flags & UTRACE_ATTACH_EXCLUSIVE) {
+			struct utrace_attached_engine *old;
+			old = matching_engine(utrace, flags, ops, data);
+			if (!IS_ERR(old)) {
+				utrace_unlock(utrace);
+				rcu_read_unlock();
+				kmem_cache_free(utrace_engine_cachep, engine);
+				return ERR_PTR(-EEXIST);
+			}
+		}
+
+		if (unlikely(rcu_dereference(target->utrace) != utrace)) {
+			/*
+			 * We lost a race with other CPUs doing a sequence
+			 * of detach and attach before we got in.
+			 */
+			utrace_unlock(utrace);
+			rcu_read_unlock();
+			kmem_cache_free(utrace_engine_cachep, engine);
+			goto restart;
+		}
+		rcu_read_unlock();
+
+		list_add_tail_rcu(&engine->entry, &utrace->engines);
+	}
+
+	engine->ops = ops;
+	engine->data = data;
+
+	utrace_unlock(utrace);
+
+	return engine;
+}
+EXPORT_SYMBOL_GPL(utrace_attach);
+
+/*
+ * When an engine is detached, the target thread may still see it and make
+ * callbacks until it quiesces.  We reset its event flags to just QUIESCE
+ * and install a special ops vector whose callback is dead_engine_delete.
+ * When the target thread quiesces, it can safely free the engine itself.
+ */
+static u32
+dead_engine_delete(struct utrace_attached_engine *engine,
+		   struct task_struct *tsk)
+{
+	return UTRACE_ACTION_DETACH;
+}
+
+static const struct utrace_engine_ops dead_engine_ops =
+{
+	.report_quiesce = &dead_engine_delete
+};
+
+
+/*
+ * Called with utrace locked.  Recompute the union of engines' flags.
+ */
+static inline unsigned long
+rescan_flags(struct utrace *utrace)
+{
+	struct utrace_attached_engine *engine;
+	unsigned long flags = 0;
+	list_for_each_entry(engine, &utrace->engines, entry)
+		flags |= engine->flags | UTRACE_EVENT(REAP);
+	return flags;
+}
+
+/*
+ * Only these flags matter any more for a dead task (exit_state set).
+ * We use this mask on flags installed in ->utrace_flags after
+ * exit_notify (and possibly utrace_report_death) has run.
+ * This ensures that utrace_release_task knows positively that
+ * utrace_report_death will not run later.
+ */
+#define DEAD_FLAGS_MASK	(UTRACE_EVENT(REAP) | UTRACE_ACTION_NOREAP)
+
+/*
+ * We may have been the one keeping the target thread quiescent.
+ * Check if it should wake up now.
+ * Called with utrace locked, and unlocks it on return.
+ * If we were keeping it stopped, resume it.
+ * If we were keeping its zombie from reporting/self-reap, do it now.
+ */
+static void
+wake_quiescent(unsigned long old_flags,
+	       struct utrace *utrace, struct task_struct *target)
+{
+	unsigned long flags;
+
+	/*
+	 * Update the set of events of interest from the union
+	 * of the interests of the remaining tracing engines.
+	 */
+	flags = rescan_flags(utrace);
+	if (target->exit_state) {
+		BUG_ON(utrace->u.exit.report_death);
+		flags &= DEAD_FLAGS_MASK;
+	}
+	check_dead_utrace(target, utrace, flags);
+
+	if (target->exit_state || (flags & UTRACE_ACTION_QUIESCE))
+		return;
+
+	read_lock(&tasklist_lock);
+	if (!unlikely(target->exit_state)) {
+		/*
+		 * The target is not dead and should not be in tracing stop
+		 * any more.  Wake it unless it's in job control stop.
+		 */
+		spin_lock_irq(&target->sighand->siglock);
+		if (target->signal->flags & SIGNAL_STOP_STOPPED) {
+			int stop_count = target->signal->group_stop_count;
+			target->state = TASK_STOPPED;
+			spin_unlock_irq(&target->sighand->siglock);
+
+			/*
+			 * If tracing was preventing a CLD_STOPPED report
+			 * and is no longer, do that report right now.
+			 */
+			if (stop_count == 0
+			    && ((old_flags &~ flags) & UTRACE_ACTION_NOREAP))
+				do_notify_parent_cldstop(target, CLD_STOPPED);
+		}
+		else {
+			/*
+			 * Wake the task up.
+			 */
+			recalc_sigpending_tsk(target);
+			wake_up_state(target, TASK_STOPPED | TASK_TRACED);
+			spin_unlock_irq(&target->sighand->siglock);
+		}
+	}
+	read_unlock(&tasklist_lock);
+}
+
+/*
+ * The engine is supposed to be attached.  The caller really needs
+ * rcu_read_lock if it wants to look at the engine struct
+ * (e.g. engine->data), to be sure it hasn't been freed by utrace_reap
+ * asynchronously--unless he has synchronized with his report_reap
+ * callback, which would have happened before then.  A simultaneous
+ * utrace_detach call or UTRACE_ACTION_DETACH return from a callback can
+ * also free the engine if rcu_read_lock is not held, but that is in the
+ * tracing engine's power to avoid.
+ *
+ * Get the utrace lock for the target task.
+ * Returns the struct if locked, or ERR_PTR(-errno).
+ *
+ * This has to be robust against races with:
+ *	utrace_detach calls
+ *	UTRACE_ACTION_DETACH after reports
+ *	utrace_report_death
+ *	utrace_release_task
+ */
+static struct utrace *
+get_utrace_lock_attached(struct task_struct *target,
+			 struct utrace_attached_engine *engine)
+{
+	struct utrace *utrace;
+
+	rcu_read_lock();
+	utrace = rcu_dereference(target->utrace);
+	smp_rmb();
+	if (unlikely(target->exit_state == EXIT_DEAD)) {
+		/*
+		 * Called after utrace_release_task might have started.
+		 * A call to this engine's report_reap callback might
+		 * already be in progress or engine might even have been
+		 * freed already.
+		 */
+		utrace = ERR_PTR(-ESRCH);
+	}
+	else {
+		utrace_lock(utrace);
+		if (unlikely(rcu_dereference(target->utrace) != utrace)
+		    || unlikely(rcu_dereference(engine->ops)
+				== &dead_engine_ops)) {
+			/*
+			 * By the time we got the utrace lock,
+			 * it had been reaped or detached already.
+			 */
+			utrace_unlock(utrace);
+			utrace = ERR_PTR(-ESRCH);
+		}
+	}
+	rcu_read_unlock();
+
+	return utrace;
+}
+
+int
+utrace_detach(struct task_struct *target,
+	      struct utrace_attached_engine *engine)
+{
+	struct utrace *utrace;
+	unsigned long flags;
+
+	utrace = get_utrace_lock_attached(target, engine);
+	if (unlikely(IS_ERR(utrace)))
+		return PTR_ERR(utrace);
+
+	if (target->exit_state
+	    && unlikely(utrace->u.exit.reap || utrace->u.exit.report_death)) {
+		/*
+		 * We have already started the death report, or
+		 * even entered release_task.  We can't prevent
+		 * the report_death and report_reap callbacks,
+		 * so tell the caller they will happen.
+		 */
+		int ret = utrace->u.exit.reap ? -ESRCH : -EALREADY;
+		utrace_unlock(utrace);
+		return ret;
+	}
+
+	flags = engine->flags;
+	engine->flags = UTRACE_EVENT(QUIESCE) | UTRACE_ACTION_QUIESCE;
+	rcu_assign_pointer(engine->ops, &dead_engine_ops);
+
+	if (quiesce(target, 1)) {
+		remove_engine(engine, target, utrace);
+		wake_quiescent(flags, utrace, target);
+	}
+	else
+		utrace_unlock(utrace);
+
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(utrace_detach);
+
+
+/*
+ * Called with utrace->lock held.
+ * Notify and clean up all engines, then free utrace.
+ */
+static void
+utrace_reap(struct task_struct *target, struct utrace *utrace)
+{
+	struct utrace_attached_engine *engine, *next;
+	const struct utrace_engine_ops *ops;
+
+restart:
+	list_for_each_entry_safe(engine, next, &utrace->engines, entry) {
+		list_del_rcu(&engine->entry);
+
+		/*
+		 * Now nothing else refers to this engine.
+		 */
+		if (engine->flags & UTRACE_EVENT(REAP)) {
+			ops = rcu_dereference(engine->ops);
+			if (ops != &dead_engine_ops) {
+				utrace_unlock(utrace);
+				(*ops->report_reap)(engine, target);
+				call_rcu(&engine->rhead, utrace_engine_free);
+				utrace_lock(utrace);
+				goto restart;
+			}
+		}
+		call_rcu(&engine->rhead, utrace_engine_free);
+	}
+
+	rcu_utrace_free(utrace);
+}
+
+/*
+ * Called by release_task.  After this, target->utrace must be cleared.
+ */
+void
+utrace_release_task(struct task_struct *target)
+{
+	struct utrace *utrace;
+
+	task_lock(target);
+	utrace = target->utrace;
+	rcu_assign_pointer(target->utrace, NULL);
+	task_unlock(target);
+
+	if (unlikely(utrace == NULL))
+		return;
+
+	utrace_lock(utrace);
+	utrace->u.exit.reap = 1;
+
+	if (target->utrace_flags & (UTRACE_EVENT(DEATH)
+				    | UTRACE_EVENT(QUIESCE)))
+		/*
+		 * The target will do some final callbacks but hasn't
+		 * finished them yet.  We know because it clears these
+		 * event bits after it's done.  Instead of cleaning up here
+		 * and requiring utrace_report_death to cope with it, we
+		 * delay the REAP report and the teardown until after the
+		 * target finishes its death reports.
+		 */
+		utrace_unlock(utrace);
+	else
+		utrace_reap(target, utrace); /* Unlocks and frees.  */
+}
+
+
+int
+utrace_set_flags(struct task_struct *target,
+		 struct utrace_attached_engine *engine,
+		 unsigned long flags)
+{
+	struct utrace *utrace;
+	int report;
+	unsigned long old_flags, old_utrace_flags;
+	int ret = -EALREADY;
+
+#ifdef ARCH_HAS_SINGLE_STEP
+	if (! ARCH_HAS_SINGLE_STEP)
+#endif
+		WARN_ON(flags & UTRACE_ACTION_SINGLESTEP);
+#ifdef ARCH_HAS_BLOCK_STEP
+	if (! ARCH_HAS_BLOCK_STEP)
+#endif
+		WARN_ON(flags & UTRACE_ACTION_BLOCKSTEP);
+
+	utrace = get_utrace_lock_attached(target, engine);
+	if (unlikely(IS_ERR(utrace)))
+		return PTR_ERR(utrace);
+
+restart:			/* See below. */
+
+	old_utrace_flags = target->utrace_flags;
+	old_flags = engine->flags;
+
+	if (target->exit_state
+	    && (((flags &~ old_flags) & (UTRACE_ACTION_QUIESCE
+					 | UTRACE_ACTION_NOREAP
+					 | UTRACE_EVENT(DEATH)
+					 | UTRACE_EVENT(QUIESCE)))
+		|| (utrace->u.exit.report_death
+		    && ((old_flags &~ flags) & (UTRACE_EVENT(DEATH) |
+						UTRACE_EVENT(QUIESCE))))
+		|| (utrace->u.exit.reap
+		    && ((old_flags &~ flags) & UTRACE_EVENT(REAP))))) {
+		utrace_unlock(utrace);
+		return ret;
+	}
+
+	/*
+	 * When setting these flags, it's essential that we really
+	 * synchronize with exit_notify.  They cannot be set after
+	 * exit_notify takes the tasklist_lock.  By holding the read
+	 * lock here while setting the flags, we ensure that the calls
+	 * to tracehook_notify_death and tracehook_report_death will
+	 * see the new flags.  This ensures that utrace_release_task
+	 * knows positively that utrace_report_death will be called or
+	 * that it won't.
+	 */
+	if ((flags &~ old_utrace_flags) & (UTRACE_ACTION_NOREAP
+					   | UTRACE_EVENT(DEATH)
+					   | UTRACE_EVENT(QUIESCE))) {
+		read_lock(&tasklist_lock);
+		if (unlikely(target->exit_state)) {
+			read_unlock(&tasklist_lock);
+			utrace_unlock(utrace);
+			return ret;
+		}
+		target->utrace_flags |= flags;
+		read_unlock(&tasklist_lock);
+	}
+
+	engine->flags = flags;
+	target->utrace_flags |= flags;
+	ret = 0;
+
+	report = 0;
+	if ((old_flags ^ flags) & UTRACE_ACTION_QUIESCE) {
+		if (flags & UTRACE_ACTION_QUIESCE) {
+			report = (quiesce(target, 1)
+				  && (flags & UTRACE_EVENT(QUIESCE)));
+			utrace_unlock(utrace);
+		}
+		else
+			wake_quiescent(old_flags, utrace, target);
+	}
+	else if (((old_flags &~ flags) & UTRACE_ACTION_NOREAP)
+		 && target->exit_state)
+			wake_quiescent(old_flags, utrace, target);
+	else {
+		/*
+		 * If we're asking for single-stepping or syscall tracing,
+		 * we need to pass through utrace_quiescent before resuming
+		 * in user mode to get those effects, even if the target is
+		 * not going to be quiescent right now.
+		 */
+		if (!(target->utrace_flags & UTRACE_ACTION_QUIESCE)
+		    && !target->exit_state
+		    && ((flags &~ old_utrace_flags)
+			& (UTRACE_ACTION_SINGLESTEP | UTRACE_ACTION_BLOCKSTEP
+			   | UTRACE_EVENT_SYSCALL)))
+			quiesce(target, 0);
+		utrace_unlock(utrace);
+	}
+
+	if (report) {	/* Already quiescent, won't report itself.  */
+		u32 action = (*engine->ops->report_quiesce)(engine, target);
+		if (action & UTRACE_ACTION_DETACH)
+			utrace_detach(target, engine);
+		else if (action & UTRACE_ACTION_NEWSTATE) {
+			/*
+			 * The callback has us changing the flags yet
+			 * again.  Since we released the lock, they
+			 * could have changed asynchronously just now.
+			 * We must refetch the current flags to change
+			 * the UTRACE_ACTION_STATE_MASK bits.  If the
+			 * target thread started dying, then there is
+			 * nothing we can do--but that failure is due
+			 * to the report_quiesce callback after the
+			 * original utrace_set_flags has already
+			 * succeeded, so we don't want to return
+			 * failure here (hence leave ret = 0).
+			 */
+			utrace = get_utrace_lock_attached(target, engine);
+			if (!unlikely(IS_ERR(utrace))) {
+				flags = action & UTRACE_ACTION_STATE_MASK;
+				flags |= (engine->flags
+					  &~ UTRACE_ACTION_STATE_MASK);
+				goto restart;
+			}
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(utrace_set_flags);
+
+/*
+ * While running an engine callback, no locks are held.
+ * If a callback updates its engine's action state, then
+ * we need to take the utrace lock to install the flags update.
+ */
+static inline u32
+update_action(struct task_struct *tsk, struct utrace *utrace,
+	      struct utrace_attached_engine *engine,
+	      u32 ret)
+{
+	if (ret & UTRACE_ACTION_DETACH)
+		rcu_assign_pointer(engine->ops, &dead_engine_ops);
+	else if ((ret & UTRACE_ACTION_NEWSTATE)
+		 && ((ret ^ engine->flags) & UTRACE_ACTION_STATE_MASK)) {
+#ifdef ARCH_HAS_SINGLE_STEP
+		if (! ARCH_HAS_SINGLE_STEP)
+#endif
+			WARN_ON(ret & UTRACE_ACTION_SINGLESTEP);
+#ifdef ARCH_HAS_BLOCK_STEP
+		if (! ARCH_HAS_BLOCK_STEP)
+#endif
+			WARN_ON(ret & UTRACE_ACTION_BLOCKSTEP);
+		utrace_lock(utrace);
+		/*
+		 * If we're changing something other than just QUIESCE,
+		 * make sure we pass through utrace_quiescent before
+		 * resuming even if we aren't going to stay quiescent.
+		 * That's where we get the correct union of all engines'
+		 * flags after they've finished changing, and apply changes.
+		 */
+		if (((ret ^ engine->flags) & (UTRACE_ACTION_STATE_MASK
+					      & ~UTRACE_ACTION_QUIESCE)))
+			tsk->utrace_flags |= UTRACE_ACTION_QUIESCE;
+		engine->flags &= ~UTRACE_ACTION_STATE_MASK;
+		engine->flags |= ret & UTRACE_ACTION_STATE_MASK;
+		tsk->utrace_flags |= engine->flags;
+		utrace_unlock(utrace);
+	}
+	else
+		ret |= engine->flags & UTRACE_ACTION_STATE_MASK;
+	return ret;
+}
+
+#define REPORT(callback, ...) do { \
+	u32 ret = (*rcu_dereference(engine->ops)->callback) \
+		(engine, tsk, ##__VA_ARGS__); \
+	action = update_action(tsk, utrace, engine, ret); \
+	} while (0)
+
+
+/*
+ * Called with utrace->lock held, returns with it released.
+ */
+static u32
+remove_detached(struct task_struct *tsk, struct utrace *utrace,
+		u32 action, unsigned long mask)
+{
+	struct utrace_attached_engine *engine, *next;
+	unsigned long flags = 0;
+
+	list_for_each_entry_safe(engine, next, &utrace->engines, entry) {
+		if (engine->ops == &dead_engine_ops)
+			remove_engine(engine, tsk, utrace);
+		else
+			flags |= engine->flags | UTRACE_EVENT(REAP);
+	}
+	check_dead_utrace(tsk, utrace, flags & mask);
+
+	flags &= UTRACE_ACTION_STATE_MASK;
+	return flags | (action & UTRACE_ACTION_OP_MASK);
+}
+
+/*
+ * Called after an event report loop.  Remove any engines marked for detach.
+ */
+static inline u32
+check_detach(struct task_struct *tsk, u32 action)
+{
+	if (action & UTRACE_ACTION_DETACH) {
+		/*
+		 * This must be current to be sure it's not possibly
+		 * getting into utrace_report_death.
+		 */
+		BUG_ON(tsk != current);
+		utrace_lock(tsk->utrace);
+		action = remove_detached(tsk, tsk->utrace, action, ~0UL);
+	}
+	return action;
+}
+
+static inline int
+check_quiescent(struct task_struct *tsk, u32 action)
+{
+	if (action & UTRACE_ACTION_STATE_MASK)
+		return utrace_quiescent(tsk, NULL);
+	return 0;
+}
+
+/*
+ * Called iff UTRACE_EVENT(CLONE) flag is set.
+ * This notification call blocks the wake_up_new_task call on the child.
+ * So we must not quiesce here.  tracehook_report_clone_complete will do
+ * a quiescence check momentarily.
+ */
+void
+utrace_report_clone(unsigned long clone_flags, struct task_struct *child)
+{
+	struct task_struct *tsk = current;
+	struct utrace *utrace = tsk->utrace;
+	struct list_head *pos, *next;
+	struct utrace_attached_engine *engine;
+	unsigned long action;
+
+	utrace->u.live.cloning = child;
+
+	/* XXX must change for sharing */
+	action = UTRACE_ACTION_RESUME;
+	list_for_each_safe_rcu(pos, next, &utrace->engines) {
+		engine = list_entry(pos, struct utrace_attached_engine, entry);
+		if (engine->flags & UTRACE_EVENT(CLONE))
+			REPORT(report_clone, clone_flags, child);
+		if (action & UTRACE_ACTION_HIDE)
+			break;
+	}
+
+	utrace->u.live.cloning = NULL;
+
+	check_detach(tsk, action);
+}
+
+static unsigned long
+report_quiescent(struct task_struct *tsk, struct utrace *utrace, u32 action)
+{
+	struct list_head *pos, *next;
+	struct utrace_attached_engine *engine;
+
+	list_for_each_safe_rcu(pos, next, &utrace->engines) {
+		engine = list_entry(pos, struct utrace_attached_engine, entry);
+		if (engine->flags & UTRACE_EVENT(QUIESCE))
+			REPORT(report_quiesce);
+		action |= engine->flags & UTRACE_ACTION_STATE_MASK;
+	}
+
+	return check_detach(tsk, action);
+}
+
+/*
+ * Called iff UTRACE_EVENT(JCTL) flag is set.
+ */
+int
+utrace_report_jctl(int what)
+{
+	struct task_struct *tsk = current;
+	struct utrace *utrace = tsk->utrace;
+	struct list_head *pos, *next;
+	struct utrace_attached_engine *engine;
+	unsigned long action;
+
+	/* XXX must change for sharing */
+	action = UTRACE_ACTION_RESUME;
+	list_for_each_safe_rcu(pos, next, &utrace->engines) {
+		engine = list_entry(pos, struct utrace_attached_engine, entry);
+		if (engine->flags & UTRACE_EVENT(JCTL))
+			REPORT(report_jctl, what);
+		if (action & UTRACE_ACTION_HIDE)
+			break;
+	}
+
+	/*
+	 * We are becoming quiescent, so report it now.
+	 * We don't block in utrace_quiescent because we are stopping anyway.
+	 * We know that upon resuming we'll go through tracehook_induce_signal,
+	 * which will keep us quiescent or set us up to resume with tracing.
+	 */
+	action = report_quiescent(tsk, utrace, action);
+
+	if (what == CLD_STOPPED && tsk->state != TASK_STOPPED) {
+		/*
+		 * The event report hooks could have blocked, though
+		 * it should have been briefly.  Make sure we're in
+		 * TASK_STOPPED state again to block properly, unless
+		 * we've just come back out of job control stop.
+		 */
+		spin_lock_irq(&tsk->sighand->siglock);
+		if (tsk->signal->flags & SIGNAL_STOP_STOPPED)
+			set_current_state(TASK_STOPPED);
+		spin_unlock_irq(&tsk->sighand->siglock);
+	}
+
+	return action & UTRACE_JCTL_NOSIGCHLD;
+}
+
+
+/*
+ * Return nonzero if there is a SIGKILL that should be waking us up.
+ * Called with the siglock held.
+ */
+static inline int
+sigkill_pending(struct task_struct *tsk)
+{
+	return ((sigismember(&tsk->pending.signal, SIGKILL)
+		 || sigismember(&tsk->signal->shared_pending.signal, SIGKILL))
+		&& !unlikely(sigismember(&tsk->blocked, SIGKILL)));
+}
+
+/*
+ * Called if UTRACE_EVENT(QUIESCE) or UTRACE_ACTION_QUIESCE flag is set.
+ * Also called after other event reports.
+ * It is a good time to block.
+ * Returns nonzero if we woke up prematurely due to SIGKILL.
+ *
+ * The signal pointer is nonzero when called from utrace_get_signal,
+ * where a pending forced signal can be processed right away.  Otherwise,
+ * we keep UTRACE_ACTION_QUIESCE set after resuming so that utrace_get_signal
+ * will be entered before user mode.
+ */
+int
+utrace_quiescent(struct task_struct *tsk, struct utrace_signal *signal)
+{
+	struct utrace *utrace = tsk->utrace;
+	unsigned long action;
+
+restart:
+	/* XXX must change for sharing */
+
+	action = report_quiescent(tsk, utrace, UTRACE_ACTION_RESUME);
+
+	/*
+	 * If some engines want us quiescent, we block here.
+	 */
+	if (action & UTRACE_ACTION_QUIESCE) {
+		int killed;
+
+		if (signal != NULL) {
+			BUG_ON(utrace->u.live.signal != NULL);
+			utrace->u.live.signal = signal;
+		}
+
+		spin_lock_irq(&tsk->sighand->siglock);
+		/*
+		 * If wake_quiescent is trying to wake us up now, it will
+		 * have cleared the QUIESCE flag before trying to take the
+		 * siglock.  Now we have the siglock, so either it has
+		 * already cleared the flag, or it will wake us up after we
+		 * release the siglock it's waiting for.
+		 * Never stop when there is a SIGKILL bringing us down.
+		 */
+		killed = sigkill_pending(tsk);
+		if (!killed && (tsk->utrace_flags & UTRACE_ACTION_QUIESCE)) {
+			set_current_state(TASK_TRACED);
+			/*
+			 * If there is a group stop in progress,
+			 * we must participate in the bookkeeping.
+			 */
+			if (tsk->signal->group_stop_count > 0)
+				--tsk->signal->group_stop_count;
+			spin_unlock_irq(&tsk->sighand->siglock);
+			schedule();
+		}
+		else
+			spin_unlock_irq(&tsk->sighand->siglock);
+
+		if (signal != NULL) {
+			/*
+			 * We know the struct stays in place when its
+			 * u.live.signal is set, see check_dead_utrace.
+			 * This makes it safe to clear its pointer here.
+			 */
+			BUG_ON(tsk->utrace != utrace);
+			BUG_ON(utrace->u.live.signal != signal);
+			utrace->u.live.signal = NULL;
+		}
+
+		if (killed)	/* Game over, man!  */
+			return 1;
+
+		/*
+		 * We've woken up.  One engine could be waking us up while
+		 * another has asked us to quiesce.  So check afresh.  We
+		 * could have been detached while quiescent.  Now we are no
+		 * longer quiescent, so don't need to do any RCU locking.
+		 * But we do need to check our utrace pointer anew.
+		 */
+		utrace = tsk->utrace;
+		if (tsk->utrace_flags
+		    & (UTRACE_EVENT(QUIESCE) | UTRACE_ACTION_STATE_MASK))
+			goto restart;
+	}
+	else if (tsk->utrace_flags & UTRACE_ACTION_QUIESCE) {
+		/*
+		 * Our flags are out of date.
+		 * Update the set of events of interest from the union
+		 * of the interests of the remaining tracing engines.
+		 * This may notice that there are no engines left
+		 * and clean up the struct utrace.  It's left in place
+		 * and the QUIESCE flag set as long as utrace_get_signal
+		 * still needs to process a pending forced signal.
+		 */
+		unsigned long flags;
+		utrace = rcu_dereference(tsk->utrace);
+		utrace_lock(utrace);
+		flags = rescan_flags(utrace);
+		if (flags == 0)
+			utrace_clear_tsk(tsk, utrace);
+		check_dead_utrace(tsk, utrace, flags);
+	}
+
+	/*
+	 * We're resuming.  Update the machine layer tracing state and then go.
+	 */
+#ifdef ARCH_HAS_SINGLE_STEP
+	if (action & UTRACE_ACTION_SINGLESTEP)
+		tracehook_enable_single_step(tsk);
+	else
+		tracehook_disable_single_step(tsk);
+#endif
+#ifdef ARCH_HAS_BLOCK_STEP
+	if ((action & (UTRACE_ACTION_BLOCKSTEP|UTRACE_ACTION_SINGLESTEP))
+	    == UTRACE_ACTION_BLOCKSTEP)
+		tracehook_enable_block_step(tsk);
+	else
+		tracehook_disable_block_step(tsk);
+#endif
+	if (tsk->utrace_flags & UTRACE_EVENT_SYSCALL)
+		tracehook_enable_syscall_trace(tsk);
+	else
+		tracehook_disable_syscall_trace(tsk);
+
+	return 0;
+}
+
+
+/*
+ * Called iff UTRACE_EVENT(EXIT) flag is set.
+ */
+void
+utrace_report_exit(long *exit_code)
+{
+	struct task_struct *tsk = current;
+	struct utrace *utrace = tsk->utrace;
+	struct list_head *pos, *next;
+	struct utrace_attached_engine *engine;
+	unsigned long action;
+	long orig_code = *exit_code;
+
+	/* XXX must change for sharing */
+	action = UTRACE_ACTION_RESUME;
+	list_for_each_safe_rcu(pos, next, &utrace->engines) {
+		engine = list_entry(pos, struct utrace_attached_engine, entry);
+		if (engine->flags & UTRACE_EVENT(EXIT))
+			REPORT(report_exit, orig_code, exit_code);
+	}
+	action = check_detach(tsk, action);
+	check_quiescent(tsk, action);
+}
+
+/*
+ * Called iff UTRACE_EVENT(DEATH) or UTRACE_ACTION_QUIESCE flag is set.
+ *
+ * It is always possible that we are racing with utrace_release_task here,
+ * if UTRACE_ACTION_NOREAP is not set, or in the case of non-leader exec
+ * where the old leader will get released regardless of NOREAP.  For this
+ * reason, utrace_release_task checks for the event bits that get us here,
+ * and delays its cleanup for us to do.
+ */
+void
+utrace_report_death(struct task_struct *tsk, struct utrace *utrace)
+{
+	struct list_head *pos, *next;
+	struct utrace_attached_engine *engine;
+	u32 action;
+
+	BUG_ON(!tsk->exit_state);
+
+	/*
+	 * We are presently considered "quiescent"--which is accurate
+	 * inasmuch as we won't run any more user instructions ever again.
+	 * But for utrace_detach and utrace_set_flags to be robust, they
+	 * must be sure whether or not we will run any more callbacks.  If
+	 * a call comes in before we do, taking the lock here synchronizes
+	 * us so we don't run any callbacks just disabled.  Calls that come
+	 * in while we're running the callbacks will see the report_death
+	 * flag and know that we are not yet fully quiescent for purposes
+	 * of detach bookkeeping.
+	 */
+	utrace_lock(utrace);
+	BUG_ON(utrace->u.exit.report_death);
+	utrace->u.exit.report_death = 1;
+	utrace_unlock(utrace);
+
+	/* XXX must change for sharing */
+	list_for_each_safe_rcu(pos, next, &utrace->engines) {
+		engine = list_entry(pos, struct utrace_attached_engine, entry);
+		if (engine->flags & UTRACE_EVENT(DEATH))
+			REPORT(report_death);
+		if (engine->flags & UTRACE_EVENT(QUIESCE))
+			REPORT(report_quiesce);
+	}
+
+	/*
+	 * Unconditionally lock and recompute the flags.
+	 * This may notice that there are no engines left and
+	 * free the utrace struct.
+	 */
+	utrace_lock(utrace);
+
+	/*
+	 * After we unlock (possibly inside utrace_reap for callbacks) with
+	 * this flag clear, competing utrace_detach/utrace_set_flags calls
+	 * know that we've finished our callbacks and any detach bookkeeping.
+	 */
+	utrace->u.exit.report_death = 0;
+
+	if (utrace->u.exit.reap)
+		/*
+		 * utrace_release_task was already called in parallel.
+		 * We must complete its work now.
+		 */
+		utrace_reap(tsk, utrace);
+	else
+		/*
+		 * Clear out any detached engines and in the process
+		 * recompute the flags.  Mask off event bits we can't
+		 * see any more.  This tells utrace_release_task we
+		 * have already finished, if it comes along later.
+		 * Note this all happens on the already-locked utrace,
+		 * which might already be removed from the task.
+		 */
+		remove_detached(tsk, utrace, 0, DEAD_FLAGS_MASK);
+}
+
+/*
+ * Called iff UTRACE_EVENT(VFORK_DONE) flag is set.
+ */
+void
+utrace_report_vfork_done(pid_t child_pid)
+{
+	struct task_struct *tsk = current;
+	struct utrace *utrace = tsk->utrace;
+	struct list_head *pos, *next;
+	struct utrace_attached_engine *engine;
+	unsigned long action;
+
+	/* XXX must change for sharing */
+	action = UTRACE_ACTION_RESUME;
+	list_for_each_safe_rcu(pos, next, &utrace->engines) {
+		engine = list_entry(pos, struct utrace_attached_engine, entry);
+		if (engine->flags & UTRACE_EVENT(VFORK_DONE))
+			REPORT(report_vfork_done, child_pid);
+		if (action & UTRACE_ACTION_HIDE)
+			break;
+	}
+	action = check_detach(tsk, action);
+	check_quiescent(tsk, action);
+}
+
+/*
+ * Called iff UTRACE_EVENT(EXEC) flag is set.
+ */
+void
+utrace_report_exec(struct linux_binprm *bprm, struct pt_regs *regs)
+{
+	struct task_struct *tsk = current;
+	struct utrace *utrace = tsk->utrace;
+	struct list_head *pos, *next;
+	struct utrace_attached_engine *engine;
+	unsigned long action;
+
+	/* XXX must change for sharing */
+	action = UTRACE_ACTION_RESUME;
+	list_for_each_safe_rcu(pos, next, &utrace->engines) {
+		engine = list_entry(pos, struct utrace_attached_engine, entry);
+		if (engine->flags & UTRACE_EVENT(EXEC))
+			REPORT(report_exec, bprm, regs);
+		if (action & UTRACE_ACTION_HIDE)
+			break;
+	}
+	action = check_detach(tsk, action);
+	check_quiescent(tsk, action);
+}
+
+/*
+ * Called iff UTRACE_EVENT(SYSCALL_{ENTRY,EXIT}) flag is set.
+ */
+void
+utrace_report_syscall(struct pt_regs *regs, int is_exit)
+{
+	struct task_struct *tsk = current;
+	struct utrace *utrace = tsk->utrace;
+	struct list_head *pos, *next;
+	struct utrace_attached_engine *engine;
+	unsigned long action, ev;
+
+/*
+  XXX pass syscall # to engine hook directly, let it return inhibit-action
+  to reset to -1
+	long syscall = tracehook_syscall_number(regs, is_exit);
+*/
+
+	ev = is_exit ? UTRACE_EVENT(SYSCALL_EXIT) : UTRACE_EVENT(SYSCALL_ENTRY);
+
+	/* XXX must change for sharing */
+	action = UTRACE_ACTION_RESUME;
+	list_for_each_safe_rcu(pos, next, &utrace->engines) {
+		engine = list_entry(pos, struct utrace_attached_engine, entry);
+		if (engine->flags & ev) {
+			if (is_exit)
+				REPORT(report_syscall_exit, regs);
+			else
+				REPORT(report_syscall_entry, regs);
+		}
+		if (action & UTRACE_ACTION_HIDE)
+			break;
+	}
+	action = check_detach(tsk, action);
+	if (unlikely(check_quiescent(tsk, action)) && !is_exit)
+		/*
+		 * We are continuing despite QUIESCE because of a SIGKILL.
+		 * Don't let the system call actually proceed.
+		 */
+		tracehook_abort_syscall(regs);
+}
+
+
+/*
+ * This is pointed to by the utrace struct, but it's really a private
+ * structure between utrace_get_signal and utrace_inject_signal.
+ */
+struct utrace_signal
+{
+	siginfo_t *const info;
+	struct k_sigaction *return_ka;
+	int signr;
+};
+
+
+// XXX copied from signal.c
+#ifdef SIGEMT
+#define M_SIGEMT	M(SIGEMT)
+#else
+#define M_SIGEMT	0
+#endif
+
+#if SIGRTMIN > BITS_PER_LONG
+#define M(sig) (1ULL << ((sig)-1))
+#else
+#define M(sig) (1UL << ((sig)-1))
+#endif
+#define T(sig, mask) (M(sig) & (mask))
+
+#define SIG_KERNEL_ONLY_MASK (\
+	M(SIGKILL)   |  M(SIGSTOP)                                   )
+
+#define SIG_KERNEL_STOP_MASK (\
+	M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
+
+#define SIG_KERNEL_COREDUMP_MASK (\
+        M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
+        M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
+        M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
+
+#define SIG_KERNEL_IGNORE_MASK (\
+        M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
+
+#define sig_kernel_only(sig) \
+		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
+#define sig_kernel_coredump(sig) \
+		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
+#define sig_kernel_ignore(sig) \
+		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
+#define sig_kernel_stop(sig) \
+		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
+
+
+/*
+ * Call each interested tracing engine's report_signal callback.
+ */
+static u32
+report_signal(struct task_struct *tsk, struct pt_regs *regs,
+	      struct utrace *utrace, u32 action,
+	      unsigned long flags1, unsigned long flags2, siginfo_t *info,
+	      const struct k_sigaction *ka, struct k_sigaction *return_ka)
+{
+	struct list_head *pos, *next;
+	struct utrace_attached_engine *engine;
+
+	/* XXX must change for sharing */
+	list_for_each_safe_rcu(pos, next, &utrace->engines) {
+		engine = list_entry(pos, struct utrace_attached_engine, entry);
+		if ((engine->flags & flags1) && (engine->flags & flags2)) {
+			u32 disp = action & UTRACE_ACTION_OP_MASK;
+			action &= ~UTRACE_ACTION_OP_MASK;
+			REPORT(report_signal, regs, disp, info, ka, return_ka);
+			if ((action & UTRACE_ACTION_OP_MASK) == 0)
+				action |= disp;
+			if (action & UTRACE_ACTION_HIDE)
+				break;
+		}
+	}
+
+	return action;
+}
+
+void
+utrace_signal_handler_singlestep(struct task_struct *tsk, struct pt_regs *regs)
+{
+	u32 action;
+	action = report_signal(tsk, regs, tsk->utrace, UTRACE_SIGNAL_HANDLER,
+			       UTRACE_EVENT_SIGNAL_ALL,
+			       UTRACE_ACTION_SINGLESTEP|UTRACE_ACTION_BLOCKSTEP,
+			       NULL, NULL, NULL);
+	action = check_detach(tsk, action);
+	check_quiescent(tsk, action);
+}
+
+
+/*
+ * This is the hook from the signals code, called with the siglock held.
+ * Here is the ideal place to quiesce.  We also dequeue and intercept signals.
+ */
+int
+utrace_get_signal(struct task_struct *tsk, struct pt_regs *regs,
+		  siginfo_t *info, struct k_sigaction *return_ka)
+{
+	struct utrace *utrace = tsk->utrace;
+	struct utrace_signal signal = { info, return_ka, 0 };
+	struct k_sigaction *ka;
+	unsigned long action, event;
+
+	/*
+	 * If a signal was injected previously, it could not use our
+	 * stack space directly.  It had to allocate a data structure,
+	 * which we can now copy out of and free.
+	 *
+	 * We don't have to lock access to u.live.signal because it's only
+	 * touched by utrace_inject_signal when we're quiescent.
+	 */
+	if (utrace->u.live.signal != NULL) {
+		signal.signr = utrace->u.live.signal->signr;
+		copy_siginfo(info, utrace->u.live.signal->info);
+		if (utrace->u.live.signal->return_ka)
+			*return_ka = *utrace->u.live.signal->return_ka;
+		else
+			signal.return_ka = NULL;
+		kfree(utrace->u.live.signal);
+		utrace->u.live.signal = NULL;
+	}
+
+	/*
+	 * If we should quiesce, now is the time.
+	 * First stash a pointer to the state on our stack,
+	 * so that utrace_inject_signal can tell us what to do.
+	 */
+	if (tsk->utrace_flags & UTRACE_ACTION_QUIESCE) {
+		int killed = sigkill_pending(tsk);
+		if (!killed) {
+			spin_unlock_irq(&tsk->sighand->siglock);
+
+			killed = utrace_quiescent(tsk, &signal);
+
+			/*
+			 * Noone wants us quiescent any more, we can take
+			 * signals.  Unless we have a forced signal to take,
+			 * back out to the signal code to resynchronize after
+			 * releasing the siglock.
+			 */
+			if (signal.signr == 0 && !killed)
+				/*
+				 * This return value says to reacquire the
+				 * siglock and check again.  This will check
+				 * for a pending group stop and process it
+				 * before coming back here.
+				 */
+				return -1;
+
+			spin_lock_irq(&tsk->sighand->siglock);
+		}
+		if (killed) {
+			/*
+			 * The only reason we woke up now was because of a
+			 * SIGKILL.  Don't do normal dequeuing in case it
+			 * might get a signal other than SIGKILL.  That would
+			 * perturb the death state so it might differ from
+			 * what the debugger would have allowed to happen.
+			 * Instead, pluck out just the SIGKILL to be sure
+			 * we'll die immediately with nothing else different
+			 * from the quiescent state the debugger wanted us in.
+			 */
+			sigset_t sigkill_only;
+			sigfillset(&sigkill_only);
+			sigdelset(&sigkill_only, SIGKILL);
+			killed = dequeue_signal(tsk, &sigkill_only, info);
+			BUG_ON(killed != SIGKILL);
+			*return_ka = tsk->sighand->action[killed - 1];
+			return killed;
+		}
+	}
+
+	/*
+	 * If a signal was injected, everything is in place now.  Go do it.
+	 */
+	if (signal.signr != 0) {
+		if (signal.return_ka == NULL) {
+			ka = &tsk->sighand->action[signal.signr - 1];
+			if (ka->sa.sa_flags & SA_ONESHOT)
+				ka->sa.sa_handler = SIG_DFL;
+			*return_ka = *ka;
+		}
+		else
+			BUG_ON(signal.return_ka != return_ka);
+		return signal.signr;
+	}
+
+	/*
+	 * If noone is interested in intercepting signals, let the caller
+	 * just dequeue them normally.
+	 */
+	if ((tsk->utrace_flags & UTRACE_EVENT_SIGNAL_ALL) == 0)
+		return 0;
+
+	/*
+	 * Steal the next signal so we can let tracing engines examine it.
+	 * From the signal number and sigaction, determine what normal
+	 * delivery would do.  If no engine perturbs it, we'll do that
+	 * by returning the signal number after setting *return_ka.
+	 */
+	signal.signr = dequeue_signal(tsk, &tsk->blocked, info);
+	if (signal.signr == 0)
+		return 0;
+
+	BUG_ON(signal.signr != info->si_signo);
+
+	ka = &tsk->sighand->action[signal.signr - 1];
+	*return_ka = *ka;
+
+	/*
+	 * We are never allowed to interfere with SIGKILL,
+	 * just punt after filling in *return_ka for our caller.
+	 */
+	if (signal.signr == SIGKILL)
+		return signal.signr;
+
+	if (ka->sa.sa_handler == SIG_IGN) {
+		event = UTRACE_EVENT(SIGNAL_IGN);
+		action = UTRACE_SIGNAL_IGN;
+	}
+	else if (ka->sa.sa_handler != SIG_DFL) {
+		event = UTRACE_EVENT(SIGNAL);
+		action = UTRACE_ACTION_RESUME;
+	}
+	else if (sig_kernel_coredump(signal.signr)) {
+		event = UTRACE_EVENT(SIGNAL_CORE);
+		action = UTRACE_SIGNAL_CORE;
+	}
+	else if (sig_kernel_ignore(signal.signr)) {
+		event = UTRACE_EVENT(SIGNAL_IGN);
+		action = UTRACE_SIGNAL_IGN;
+	}
+	else if (sig_kernel_stop(signal.signr)) {
+		event = UTRACE_EVENT(SIGNAL_STOP);
+		action = (signal.signr == SIGSTOP
+			  ? UTRACE_SIGNAL_STOP : UTRACE_SIGNAL_TSTP);
+	}
+	else {
+		event = UTRACE_EVENT(SIGNAL_TERM);
+		action = UTRACE_SIGNAL_TERM;
+	}
+
+	if (tsk->utrace_flags & event) {
+		/*
+		 * We have some interested engines, so tell them about the
+		 * signal and let them change its disposition.
+		 */
+
+		spin_unlock_irq(&tsk->sighand->siglock);
+
+		action = report_signal(tsk, regs, utrace, action, event, event,
+				       info, ka, return_ka);
+		action &= UTRACE_ACTION_OP_MASK;
+
+		if (action & UTRACE_SIGNAL_HOLD) {
+			struct sigqueue *q = sigqueue_alloc();
+			if (likely(q != NULL)) {
+				q->flags = 0;
+				copy_siginfo(&q->info, info);
+			}
+			action &= ~UTRACE_SIGNAL_HOLD;
+			spin_lock_irq(&tsk->sighand->siglock);
+			sigaddset(&tsk->pending.signal, info->si_signo);
+			if (likely(q != NULL))
+				list_add(&q->list, &tsk->pending.list);
+		}
+		else
+			spin_lock_irq(&tsk->sighand->siglock);
+
+		recalc_sigpending_tsk(tsk);
+	}
+
+	/*
+	 * We express the chosen action to the signals code in terms
+	 * of a representative signal whose default action does it.
+	 */
+	switch (action) {
+	case UTRACE_SIGNAL_IGN:
+		/*
+		 * We've eaten the signal.  That's all we do.
+		 * Tell the caller to restart.
+		 */
+		spin_unlock_irq(&tsk->sighand->siglock);
+		return -1;
+
+	case UTRACE_ACTION_RESUME:
+	case UTRACE_SIGNAL_DELIVER:
+		/*
+		 * The handler will run.  We do the SA_ONESHOT work here
+		 * since the normal path will only touch *return_ka now.
+		 */
+		if (return_ka->sa.sa_flags & SA_ONESHOT)
+			ka->sa.sa_handler = SIG_DFL;
+		break;
+
+	case UTRACE_SIGNAL_TSTP:
+		signal.signr = SIGTSTP;
+		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
+		return_ka->sa.sa_handler = SIG_DFL;
+		break;
+
+	case UTRACE_SIGNAL_STOP:
+		signal.signr = SIGSTOP;
+		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
+		return_ka->sa.sa_handler = SIG_DFL;
+		break;
+
+	case UTRACE_SIGNAL_TERM:
+		signal.signr = SIGTERM;
+		return_ka->sa.sa_handler = SIG_DFL;
+		break;
+
+	case UTRACE_SIGNAL_CORE:
+		signal.signr = SIGQUIT;
+		return_ka->sa.sa_handler = SIG_DFL;
+		break;
+
+	default:
+		BUG();
+	}
+
+	return signal.signr;
+}
+
+
+/*
+ * Cause a specified signal delivery in the target thread,
+ * which must be quiescent.  The action has UTRACE_SIGNAL_* bits
+ * as returned from a report_signal callback.  If ka is non-null,
+ * it gives the sigaction to follow for UTRACE_SIGNAL_DELIVER;
+ * otherwise, the installed sigaction at the time of delivery is used.
+ */
+int
+utrace_inject_signal(struct task_struct *target,
+		     struct utrace_attached_engine *engine,
+		     u32 action, siginfo_t *info,
+		     const struct k_sigaction *ka)
+{
+	struct utrace *utrace;
+	struct utrace_signal *signal;
+	int ret;
+
+	if (info->si_signo == 0 || !valid_signal(info->si_signo))
+		return -EINVAL;
+
+	utrace = get_utrace_lock_attached(target, engine);
+	if (unlikely(IS_ERR(utrace)))
+		return PTR_ERR(utrace);
+
+	ret = 0;
+	signal = utrace->u.live.signal;
+	if (unlikely(target->exit_state))
+		ret = -ESRCH;
+	else if (signal == NULL) {
+		ret = -ENOSYS;	/* XXX */
+	}
+	else if (signal->signr != 0)
+		ret = -EAGAIN;
+	else {
+		if (info != signal->info)
+			copy_siginfo(signal->info, info);
+
+		switch (action) {
+		default:
+			ret = -EINVAL;
+			break;
+
+		case UTRACE_SIGNAL_IGN:
+			break;
+
+		case UTRACE_ACTION_RESUME:
+		case UTRACE_SIGNAL_DELIVER:
+			/*
+			 * The handler will run.  We do the SA_ONESHOT work
+			 * here since the normal path will not touch the
+			 * real sigaction when using an injected signal.
+			 */
+			if (ka == NULL)
+				signal->return_ka = NULL;
+			else if (ka != signal->return_ka)
+				*signal->return_ka = *ka;
+			if (ka && ka->sa.sa_flags & SA_ONESHOT) {
+				struct k_sigaction *a;
+				a = &target->sighand->action[info->si_signo-1];
+				spin_lock_irq(&target->sighand->siglock);
+				a->sa.sa_handler = SIG_DFL;
+				spin_unlock_irq(&target->sighand->siglock);
+			}
+			signal->signr = info->si_signo;
+			break;
+
+		case UTRACE_SIGNAL_TSTP:
+			signal->signr = SIGTSTP;
+			spin_lock_irq(&target->sighand->siglock);
+			target->signal->flags |= SIGNAL_STOP_DEQUEUED;
+			spin_unlock_irq(&target->sighand->siglock);
+			signal->return_ka->sa.sa_handler = SIG_DFL;
+			break;
+
+		case UTRACE_SIGNAL_STOP:
+			signal->signr = SIGSTOP;
+			spin_lock_irq(&target->sighand->siglock);
+			target->signal->flags |= SIGNAL_STOP_DEQUEUED;
+			spin_unlock_irq(&target->sighand->siglock);
+			signal->return_ka->sa.sa_handler = SIG_DFL;
+			break;
+
+		case UTRACE_SIGNAL_TERM:
+			signal->signr = SIGTERM;
+			signal->return_ka->sa.sa_handler = SIG_DFL;
+			break;
+
+		case UTRACE_SIGNAL_CORE:
+			signal->signr = SIGQUIT;
+			signal->return_ka->sa.sa_handler = SIG_DFL;
+			break;
+		}
+	}
+
+	utrace_unlock(utrace);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(utrace_inject_signal);
+
+
+const struct utrace_regset *
+utrace_regset(struct task_struct *target,
+	      struct utrace_attached_engine *engine,
+	      const struct utrace_regset_view *view, int which)
+{
+	if (unlikely((unsigned) which >= view->n))
+		return NULL;
+
+	if (target != current)
+		wait_task_inactive(target);
+
+	return &view->regsets[which];
+}
+EXPORT_SYMBOL_GPL(utrace_regset);
+
+
+/*
+ * Return the task_struct for the task using ptrace on this one, or NULL.
+ * Must be called with rcu_read_lock held to keep the returned struct alive.
+ *
+ * At exec time, this may be called with task_lock(p) still held from when
+ * tracehook_unsafe_exec was just called.  In that case it must give
+ * results consistent with those unsafe_exec results, i.e. non-NULL if
+ * any LSM_UNSAFE_PTRACE_* bits were set.
+ *
+ * The value is also used to display after "TracerPid:" in /proc/PID/status,
+ * where it is called with only rcu_read_lock held.
+ */
+struct task_struct *
+utrace_tracer_task(struct task_struct *target)
+{
+	struct utrace *utrace;
+	struct task_struct *tracer = NULL;
+
+	utrace = rcu_dereference(target->utrace);
+	if (utrace != NULL) {
+		struct list_head *pos, *next;
+		struct utrace_attached_engine *engine;
+		const struct utrace_engine_ops *ops;
+		list_for_each_safe_rcu(pos, next, &utrace->engines) {
+			engine = list_entry(pos, struct utrace_attached_engine,
+					    entry);
+			ops = rcu_dereference(engine->ops);
+			if (ops->tracer_task) {
+				tracer = (*ops->tracer_task)(engine, target);
+				if (tracer != NULL)
+					break;
+			}
+		}
+	}
+
+	return tracer;
+}
+
+int
+utrace_allow_access_process_vm(struct task_struct *target)
+{
+	struct utrace *utrace;
+	int ret = 0;
+
+	rcu_read_lock();
+	utrace = rcu_dereference(target->utrace);
+	if (utrace != NULL) {
+		struct list_head *pos, *next;
+		struct utrace_attached_engine *engine;
+		const struct utrace_engine_ops *ops;
+		list_for_each_safe_rcu(pos, next, &utrace->engines) {
+			engine = list_entry(pos, struct utrace_attached_engine,
+					    entry);
+			ops = rcu_dereference(engine->ops);
+			if (ops->allow_access_process_vm) {
+				ret = (*ops->allow_access_process_vm)(engine,
+								      target,
+								      current);
+				if (ret)
+					break;
+			}
+		}
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+
+/*
+ * Called on the current task to return LSM_UNSAFE_* bits implied by tracing.
+ * Called with task_lock held.
+ */
+int
+utrace_unsafe_exec(struct task_struct *tsk)
+{
+	struct utrace *utrace = tsk->utrace;
+	struct list_head *pos, *next;
+	struct utrace_attached_engine *engine;
+	const struct utrace_engine_ops *ops;
+	int unsafe = 0;
+
+	/* XXX must change for sharing */
+	list_for_each_safe_rcu(pos, next, &utrace->engines) {
+		engine = list_entry(pos, struct utrace_attached_engine, entry);
+		ops = rcu_dereference(engine->ops);
+		if (ops->unsafe_exec)
+			unsafe |= (*ops->unsafe_exec)(engine, tsk);
+	}
+
+	return unsafe;
+}
--- linux-2.6/kernel/exit.c.utrace-ptrace-compat
+++ linux-2.6/kernel/exit.c
@@ -21,6 +21,7 @@
 #include <linux/file.h>
 #include <linux/binfmts.h>
 #include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/profile.h>
 #include <linux/mount.h>
 #include <linux/proc_fs.h>
@@ -139,10 +140,10 @@ void release_task(struct task_struct * p
 	struct task_struct *leader;
 	int zap_leader;
 repeat:
+	tracehook_release_task(p);
 	atomic_dec(&p->user->processes);
 	write_lock_irq(&tasklist_lock);
-	ptrace_unlink(p);
-	BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
+	BUG_ON(tracehook_check_released(p));
 	__exit_signal(p);
 
 	/*
@@ -219,10 +220,10 @@ static int will_become_orphaned_pgrp(int
 	do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
 		if (p == ignored_task
 				|| p->exit_state
-				|| p->real_parent->pid == 1)
+				|| p->parent->pid == 1)
 			continue;
-		if (process_group(p->real_parent) != pgrp
-			    && p->real_parent->signal->session == p->signal->session) {
+		if (process_group(p->parent) != pgrp
+			    && p->parent->signal->session == p->signal->session) {
 			ret = 0;
 			break;
 		}
@@ -250,16 +251,6 @@ static int has_stopped_jobs(int pgrp)
 		if (p->state != TASK_STOPPED)
 			continue;
 
-		/* If p is stopped by a debugger on a signal that won't
-		   stop it, then don't count p as stopped.  This isn't
-		   perfect but it's a good approximation.  */
-		if (unlikely (p->ptrace)
-		    && p->exit_code != SIGSTOP
-		    && p->exit_code != SIGTSTP
-		    && p->exit_code != SIGTTOU
-		    && p->exit_code != SIGTTIN)
-			continue;
-
 		retval = 1;
 		break;
 	} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
@@ -282,11 +273,9 @@ static void reparent_to_init(void)
 {
 	write_lock_irq(&tasklist_lock);
 
-	ptrace_unlink(current);
 	/* Reparent to init */
 	remove_parent(current);
 	current->parent = child_reaper;
-	current->real_parent = child_reaper;
 	add_parent(current);
 
 	/* Set the exit signal to SIGCHLD so we signal init on exit */
@@ -592,11 +581,11 @@ choose_new_parent(struct task_struct *p,
 	 * the parent is not a zombie.
 	 */
 	BUG_ON(p == reaper || reaper->exit_state);
-	p->real_parent = reaper;
+	p->parent = reaper;
 }
 
 static void
-reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
+reparent_thread(struct task_struct *p, struct task_struct *father)
 {
 	/* We don't want people slaying init.  */
 	if (p->exit_signal != -1)
@@ -607,35 +596,14 @@ reparent_thread(struct task_struct *p, s
 		group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
 
 	/* Move the child from its dying parent to the new one.  */
-	if (unlikely(traced)) {
-		/* Preserve ptrace links if someone else is tracing this child.  */
-		list_del_init(&p->ptrace_list);
-		if (p->parent != p->real_parent)
-			list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
-	} else {
-		/* If this child is being traced, then we're the one tracing it
-		 * anyway, so let go of it.
-		 */
-		p->ptrace = 0;
-		remove_parent(p);
-		p->parent = p->real_parent;
-		add_parent(p);
+	list_move_tail(&p->sibling, &p->parent->children);
 
-		/* If we'd notified the old parent about this child's death,
-		 * also notify the new parent.
-		 */
-		if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
-		    thread_group_empty(p))
-			do_notify_parent(p, p->exit_signal);
-		else if (p->state == TASK_TRACED) {
-			/*
-			 * If it was at a trace stop, turn it into
-			 * a normal stop since it's no longer being
-			 * traced.
-			 */
-			ptrace_untrace(p);
-		}
-	}
+	/* If we'd notified the old parent about this child's death,
+	 * also notify the new parent.
+	 */
+	if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
+	    thread_group_empty(p))
+		do_notify_parent(p, p->exit_signal);
 
 	/*
 	 * process group orphan check
@@ -661,7 +629,7 @@ reparent_thread(struct task_struct *p, s
  * the global child reaper process (ie "init")
  */
 static void
-forget_original_parent(struct task_struct *father, struct list_head *to_release)
+forget_original_parent(struct task_struct *father)
 {
 	struct task_struct *p, *reaper = father;
 	struct list_head *_p, *_n;
@@ -674,48 +642,10 @@ forget_original_parent(struct task_struc
 		}
 	} while (reaper->exit_state);
 
-	/*
-	 * There are only two places where our children can be:
-	 *
-	 * - in our child list
-	 * - in our ptraced child list
-	 *
-	 * Search them and reparent children.
-	 */
 	list_for_each_safe(_p, _n, &father->children) {
-		int ptrace;
 		p = list_entry(_p, struct task_struct, sibling);
-
-		ptrace = p->ptrace;
-
-		/* if father isn't the real parent, then ptrace must be enabled */
-		BUG_ON(father != p->real_parent && !ptrace);
-
-		if (father == p->real_parent) {
-			/* reparent with a reaper, real father it's us */
-			choose_new_parent(p, reaper);
-			reparent_thread(p, father, 0);
-		} else {
-			/* reparent ptraced task to its real parent */
-			__ptrace_unlink (p);
-			if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
-			    thread_group_empty(p))
-				do_notify_parent(p, p->exit_signal);
-		}
-
-		/*
-		 * if the ptraced child is a zombie with exit_signal == -1
-		 * we must collect it before we exit, or it will remain
-		 * zombie forever since we prevented it from self-reap itself
-		 * while it was being traced by us, to be able to see it in wait4.
-		 */
-		if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
-			list_add(&p->ptrace_list, to_release);
-	}
-	list_for_each_safe(_p, _n, &father->ptrace_children) {
-		p = list_entry(_p, struct task_struct, ptrace_list);
 		choose_new_parent(p, reaper);
-		reparent_thread(p, father, 1);
+		reparent_thread(p, father);
 	}
 }
 
@@ -727,7 +657,8 @@ static void exit_notify(struct task_stru
 {
 	int state;
 	struct task_struct *t;
-	struct list_head ptrace_dead, *_p, *_n;
+	int noreap;
+	void *cookie;
 
 	if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
 	    && !thread_group_empty(tsk)) {
@@ -763,10 +694,8 @@ static void exit_notify(struct task_stru
 	 *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
 	 */
 
-	INIT_LIST_HEAD(&ptrace_dead);
-	forget_original_parent(tsk, &ptrace_dead);
+	forget_original_parent(tsk);
 	BUG_ON(!list_empty(&tsk->children));
-	BUG_ON(!list_empty(&tsk->ptrace_children));
 
 	/*
 	 * Check to see if any process groups have become orphaned
@@ -778,7 +707,7 @@ static void exit_notify(struct task_stru
 	 * is about to become orphaned.
 	 */
 	 
-	t = tsk->real_parent;
+	t = tsk->parent;
 	
 	if ((process_group(t) != process_group(tsk)) &&
 	    (t->signal->session == tsk->signal->session) &&
@@ -810,32 +739,18 @@ static void exit_notify(struct task_stru
 	    && !capable(CAP_KILL))
 		tsk->exit_signal = SIGCHLD;
 
-
-	/* If something other than our normal parent is ptracing us, then
-	 * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
-	 * only has special meaning to our real parent.
-	 */
-	if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
-		int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
-		do_notify_parent(tsk, signal);
-	} else if (tsk->ptrace) {
-		do_notify_parent(tsk, SIGCHLD);
-	}
+	if (!tracehook_notify_death(tsk, &noreap, &cookie)
+	    && tsk->exit_signal != -1 && thread_group_empty(tsk))
+		do_notify_parent(tsk, tsk->exit_signal);
 
 	state = EXIT_ZOMBIE;
-	if (tsk->exit_signal == -1 &&
-	    (likely(tsk->ptrace == 0) ||
-	     unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))
+	if (tsk->exit_signal == -1 && !noreap)
 		state = EXIT_DEAD;
 	tsk->exit_state = state;
 
 	write_unlock_irq(&tasklist_lock);
 
-	list_for_each_safe(_p, _n, &ptrace_dead) {
-		list_del_init(_p);
-		t = list_entry(_p, struct task_struct, ptrace_list);
-		release_task(t);
-	}
+	tracehook_report_death(tsk, state, cookie);
 
 	/* If the process is dead, release it - nobody will wait for it */
 	if (state == EXIT_DEAD)
@@ -860,10 +775,7 @@ fastcall NORET_TYPE void do_exit(long co
 	if (unlikely(tsk == child_reaper))
 		panic("Attempted to kill init!");
 
-	if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
-		current->ptrace_message = code;
-		ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
-	}
+	tracehook_report_exit(&code);
 
 	/*
 	 * We're taking recursive faults here in do_exit. Safest is to just
@@ -880,6 +792,8 @@ fastcall NORET_TYPE void do_exit(long co
 
 	tsk->flags |= PF_EXITING;
 
+	ptrace_exit(tsk);
+
 	if (unlikely(in_atomic()))
 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
 				current->comm, current->pid,
@@ -1035,10 +949,9 @@ static int eligible_child(pid_t pid, int
 	}
 
 	/*
-	 * Do not consider detached threads that are
-	 * not ptraced:
+	 * Do not consider detached threads.
 	 */
-	if (p->exit_signal == -1 && !p->ptrace)
+	if (p->exit_signal == -1)
 		return 0;
 
 	/* Wait for all children (clone and not) if __WALL is set;
@@ -1109,7 +1022,7 @@ static int wait_task_zombie(struct task_
 
 		if (unlikely(p->exit_state != EXIT_ZOMBIE))
 			return 0;
-		if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
+		if (unlikely(p->exit_signal == -1))
 			return 0;
 		get_task_struct(p);
 		read_unlock(&tasklist_lock);
@@ -1133,15 +1046,9 @@ static int wait_task_zombie(struct task_
 		BUG_ON(state != EXIT_DEAD);
 		return 0;
 	}
-	if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
-		/*
-		 * This can only happen in a race with a ptraced thread
-		 * dying on another processor.
-		 */
-		return 0;
-	}
+	BUG_ON(p->exit_signal == -1);
 
-	if (likely(p->real_parent == p->parent) && likely(p->signal)) {
+	if (likely(p->signal)) {
 		struct signal_struct *psig;
 		struct signal_struct *sig;
 
@@ -1223,28 +1130,8 @@ static int wait_task_zombie(struct task_
 		return retval;
 	}
 	retval = p->pid;
-	if (p->real_parent != p->parent) {
-		write_lock_irq(&tasklist_lock);
-		/* Double-check with lock held.  */
-		if (p->real_parent != p->parent) {
-			__ptrace_unlink(p);
-			// TODO: is this safe?
-			p->exit_state = EXIT_ZOMBIE;
-			/*
-			 * If this is not a detached task, notify the parent.
-			 * If it's still not detached after that, don't release
-			 * it now.
-			 */
-			if (p->exit_signal != -1) {
-				do_notify_parent(p, p->exit_signal);
-				if (p->exit_signal != -1)
-					p = NULL;
-			}
-		}
-		write_unlock_irq(&tasklist_lock);
-	}
-	if (p != NULL)
-		release_task(p);
+	release_task(p);
+
 	BUG_ON(!retval);
 	return retval;
 }
@@ -1263,7 +1150,7 @@ static int wait_task_stopped(struct task
 
 	if (!p->exit_code)
 		return 0;
-	if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
+	if (delayed_group_leader &&
 	    p->signal && p->signal->group_stop_count > 0)
 		/*
 		 * A group stop is in progress and this is the group leader.
@@ -1284,14 +1171,13 @@ static int wait_task_stopped(struct task
 	if (unlikely(noreap)) {
 		pid_t pid = p->pid;
 		uid_t uid = p->uid;
-		int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
 
 		exit_code = p->exit_code;
 		if (unlikely(!exit_code) ||
 		    unlikely(p->state & TASK_TRACED))
 			goto bail_ref;
-		return wait_noreap_copyout(p, pid, uid,
-					   why, (exit_code << 8) | 0x7f,
+		return wait_noreap_copyout(p, pid, uid, CLD_STOPPED,
+					   (exit_code << 8) | 0x7f,
 					   infop, ru);
 	}
 
@@ -1347,9 +1233,7 @@ bail_ref:
 	if (!retval && infop)
 		retval = put_user(0, &infop->si_errno);
 	if (!retval && infop)
-		retval = put_user((short)((p->ptrace & PT_PTRACED)
-					  ? CLD_TRAPPED : CLD_STOPPED),
-				  &infop->si_code);
+		retval = put_user((short)CLD_STOPPED, &infop->si_code);
 	if (!retval && infop)
 		retval = put_user(exit_code, &infop->si_status);
 	if (!retval && infop)
@@ -1417,22 +1301,6 @@ static int wait_task_continued(struct ta
 }
 
 
-static inline int my_ptrace_child(struct task_struct *p)
-{
-	if (!(p->ptrace & PT_PTRACED))
-		return 0;
-	if (!(p->ptrace & PT_ATTACHED))
-		return 1;
-	/*
-	 * This child was PTRACE_ATTACH'd.  We should be seeing it only if
-	 * we are the attacher.  If we are the real parent, this is a race
-	 * inside ptrace_attach.  It is waiting for the tasklist_lock,
-	 * which we have to switch the parent links, but has already set
-	 * the flags in p->ptrace.
-	 */
-	return (p->parent != p->real_parent);
-}
-
 static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
 		    int __user *stat_addr, struct rusage __user *ru)
 {
@@ -1464,26 +1332,17 @@ repeat:
 
 			switch (p->state) {
 			case TASK_TRACED:
-				/*
-				 * When we hit the race with PTRACE_ATTACH,
-				 * we will not report this child.  But the
-				 * race means it has not yet been moved to
-				 * our ptrace_children list, so we need to
-				 * set the flag here to avoid a spurious ECHILD
-				 * when the race happens with the only child.
-				 */
 				flag = 1;
-				if (!my_ptrace_child(p))
-					continue;
-				/*FALLTHROUGH*/
+				continue;
 			case TASK_STOPPED:
 				/*
 				 * It's stopped now, so it might later
 				 * continue, exit, or stop again.
 				 */
 				flag = 1;
-				if (!(options & WUNTRACED) &&
-				    !my_ptrace_child(p))
+				if (!(options & WUNTRACED))
+					continue;
+				if (tracehook_inhibit_wait_stopped(p))
 					continue;
 				retval = wait_task_stopped(p, ret == 2,
 							   (options & WNOWAIT),
@@ -1508,6 +1367,10 @@ repeat:
 						goto check_continued;
 					if (!likely(options & WEXITED))
 						continue;
+					if (tracehook_inhibit_wait_zombie(p)) {
+						flag = 1;
+						continue;
+					}
 					retval = wait_task_zombie(
 						p, (options & WNOWAIT),
 						infop, stat_addr, ru);
@@ -1524,6 +1387,8 @@ check_continued:
 				flag = 1;
 				if (!unlikely(options & WCONTINUED))
 					continue;
+				if (tracehook_inhibit_wait_continued(p))
+					continue;
 				retval = wait_task_continued(
 					p, (options & WNOWAIT),
 					infop, stat_addr, ru);
@@ -1532,16 +1397,15 @@ check_continued:
 				break;
 			}
 		}
-		if (!flag) {
-			list_for_each(_p, &tsk->ptrace_children) {
-				p = list_entry(_p, struct task_struct,
-						ptrace_list);
-				if (!eligible_child(pid, options, p))
-					continue;
-				flag = 1;
-				break;
-			}
+
+		retval = ptrace_do_wait(tsk, pid, options,
+					infop, stat_addr, ru);
+		if (retval != -ECHILD) {
+			flag = 1;
+			if (retval != 0) /* He released the lock.  */
+				goto end;
 		}
+
 		if (options & __WNOTHREAD)
 			break;
 		tsk = next_thread(tsk);
@@ -1565,7 +1429,7 @@ end:
 	remove_wait_queue(&current->signal->wait_chldexit,&wait);
 	if (infop) {
 		if (retval > 0)
-		retval = 0;
+			retval = 0;
 		else {
 			/*
 			 * For a WNOHANG return, clear out all the fields
--- linux-2.6/kernel/sys.c.utrace-ptrace-compat
+++ linux-2.6/kernel/sys.c
@@ -1274,7 +1274,7 @@ asmlinkage long sys_setpgid(pid_t pid, p
 	if (!thread_group_leader(p))
 		goto out;
 
-	if (p->real_parent == group_leader) {
+	if (p->parent == group_leader) {
 		err = -EPERM;
 		if (p->signal->session != group_leader->signal->session)
 			goto out;
--- linux-2.6/kernel/timer.c.utrace-ptrace-compat
+++ linux-2.6/kernel/timer.c
@@ -1324,7 +1324,7 @@ asmlinkage long sys_getpid(void)
 }
 
 /*
- * Accessing ->real_parent is not SMP-safe, it could
+ * Accessing ->parent is not SMP-safe, it could
  * change from under us. However, we can use a stale
  * value of ->real_parent under rcu_read_lock(), see
  * release_task()->call_rcu(delayed_put_task_struct).
@@ -1334,7 +1334,7 @@ asmlinkage long sys_getppid(void)
 	int pid;
 
 	rcu_read_lock();
-	pid = rcu_dereference(current->real_parent)->tgid;
+	pid = rcu_dereference(current->parent)->tgid;
 	rcu_read_unlock();
 
 	return pid;
--- linux-2.6/kernel/ptrace.c.utrace-ptrace-compat
+++ linux-2.6/kernel/ptrace.c
@@ -22,101 +22,15 @@
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
 
-/*
- * ptrace a task: make the debugger its new parent and
- * move it to the ptrace list.
- *
- * Must be called with the tasklist lock write-held.
- */
-void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
-{
-	BUG_ON(!list_empty(&child->ptrace_list));
-	if (child->parent == new_parent)
-		return;
-	list_add(&child->ptrace_list, &child->parent->ptrace_children);
-	remove_parent(child);
-	child->parent = new_parent;
-	add_parent(child);
-}
- 
-/*
- * Turn a tracing stop into a normal stop now, since with no tracer there
- * would be no way to wake it up with SIGCONT or SIGKILL.  If there was a
- * signal sent that would resume the child, but didn't because it was in
- * TASK_TRACED, resume it now.
- * Requires that irqs be disabled.
- */
-void ptrace_untrace(struct task_struct *child)
-{
-	spin_lock(&child->sighand->siglock);
-	if (child->state == TASK_TRACED) {
-		if (child->signal->flags & SIGNAL_STOP_STOPPED) {
-			child->state = TASK_STOPPED;
-		} else {
-			signal_wake_up(child, 1);
-		}
-	}
-	spin_unlock(&child->sighand->siglock);
-}
-
-/*
- * unptrace a task: move it back to its original parent and
- * remove it from the ptrace list.
- *
- * Must be called with the tasklist lock write-held.
- */
-void __ptrace_unlink(struct task_struct *child)
-{
-	BUG_ON(!child->ptrace);
-
-	child->ptrace = 0;
-	if (!list_empty(&child->ptrace_list)) {
-		list_del_init(&child->ptrace_list);
-		remove_parent(child);
-		child->parent = child->real_parent;
-		add_parent(child);
-	}
-
-	if (child->state == TASK_TRACED)
-		ptrace_untrace(child);
-}
-
-/*
- * Check that we have indeed attached to the thing..
- */
-int ptrace_check_attach(struct task_struct *child, int kill)
-{
-	int ret = -ESRCH;
-
-	/*
-	 * We take the read lock around doing both checks to close a
-	 * possible race where someone else was tracing our child and
-	 * detached between these two checks.  After this locked check,
-	 * we are sure that this is our traced child and that can only
-	 * be changed by us so it's not changing right after this.
-	 */
-	read_lock(&tasklist_lock);
-	if ((child->ptrace & PT_PTRACED) && child->parent == current &&
-	    (!(child->ptrace & PT_ATTACHED) || child->real_parent != current)
-	    && child->signal != NULL) {
-		ret = 0;
-		spin_lock_irq(&child->sighand->siglock);
-		if (child->state == TASK_STOPPED) {
-			child->state = TASK_TRACED;
-		} else if (child->state != TASK_TRACED && !kill) {
-			ret = -ESRCH;
-		}
-		spin_unlock_irq(&child->sighand->siglock);
-	}
-	read_unlock(&tasklist_lock);
+#ifdef CONFIG_PTRACE
+#include <linux/utrace.h>
+#include <linux/tracehook.h>
+#include <asm/tracehook.h>
+#endif
 
-	if (!ret && !kill) {
-		wait_task_inactive(child);
-	}
+int getrusage(struct task_struct *, int, struct rusage __user *);
 
-	/* All systems go.. */
-	return ret;
-}
+//#define PTRACE_DEBUG
 
 static int may_attach(struct task_struct *task)
 {
@@ -157,90 +71,6 @@ int ptrace_may_attach(struct task_struct
 	return !err;
 }
 
-int ptrace_attach(struct task_struct *task)
-{
-	int retval;
-
-	retval = -EPERM;
-	if (task->pid <= 1)
-		goto out;
-	if (task->tgid == current->tgid)
-		goto out;
-
-repeat:
-	/*
-	 * Nasty, nasty.
-	 *
-	 * We want to hold both the task-lock and the
-	 * tasklist_lock for writing at the same time.
-	 * But that's against the rules (tasklist_lock
-	 * is taken for reading by interrupts on other
-	 * cpu's that may have task_lock).
-	 */
-	task_lock(task);
-	local_irq_disable();
-	if (!write_trylock(&tasklist_lock)) {
-		local_irq_enable();
-		task_unlock(task);
-		do {
-			cpu_relax();
-		} while (!write_can_lock(&tasklist_lock));
-		goto repeat;
-	}
-
-	if (!task->mm)
-		goto bad;
-	/* the same process cannot be attached many times */
-	if (task->ptrace & PT_PTRACED)
-		goto bad;
-	retval = may_attach(task);
-	if (retval)
-		goto bad;
-
-	/* Go */
-	task->ptrace |= PT_PTRACED | ((task->real_parent != current)
-				      ? PT_ATTACHED : 0);
-	if (capable(CAP_SYS_PTRACE))
-		task->ptrace |= PT_PTRACE_CAP;
-
-	__ptrace_link(task, current);
-
-	force_sig_specific(SIGSTOP, task);
-
-bad:
-	write_unlock_irq(&tasklist_lock);
-	task_unlock(task);
-out:
-	return retval;
-}
-
-static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
-{
-	child->exit_code = data;
-	/* .. re-parent .. */
-	__ptrace_unlink(child);
-	/* .. and wake it up. */
-	if (child->exit_state != EXIT_ZOMBIE)
-		wake_up_process(child);
-}
-
-int ptrace_detach(struct task_struct *child, unsigned int data)
-{
-	if (!valid_signal(data))
-		return -EIO;
-
-	/* Architecture-specific hardware disable .. */
-	ptrace_disable(child);
-
-	write_lock_irq(&tasklist_lock);
-	/* protect against de_thread()->release_task() */
-	if (child->ptrace)
-		__ptrace_detach(child, data);
-	write_unlock_irq(&tasklist_lock);
-
-	return 0;
-}
-
 /*
  * Access another process' address space.
  * Source/target buffer must be kernel space, 
@@ -295,249 +125,1567 @@ int access_process_vm(struct task_struct
 	return buf - old_buf;
 }
 
-int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
+
+#ifndef CONFIG_PTRACE
+
+asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+{
+	return -ENOSYS;
+}
+
+#else
+
+struct ptrace_state
 {
-	int copied = 0;
+	/*
+	 * These elements are always available, even when the struct is
+	 * awaiting destruction at the next RCU callback point.
+	 */
+	struct utrace_attached_engine *engine;
+	struct task_struct *task; /* Target task.  */
+	struct task_struct *parent; /* Whom we report to.  */
+	struct list_head entry;	/* Entry on parent->ptracees list.  */
+
+	union {
+		struct rcu_head dead;
+		struct {
+			u8 options; /* PTRACE_SETOPTIONS bits.  */
+			unsigned int syscall:1;	/* Reporting for syscall.  */
+#ifdef PTRACE_SYSEMU
+			unsigned int sysemu:1; /* PTRACE_SYSEMU in progress. */
+#endif
+			unsigned int have_eventmsg:1; /* u.eventmsg valid. */
+			unsigned int cap_sys_ptrace:1; /* Tracer capable.  */
 
-	while (len > 0) {
-		char buf[128];
-		int this_len, retval;
+			union
+			{
+				unsigned long eventmsg;
+				siginfo_t *siginfo;
+			} u;
+		} live;
+	} u;
+};
 
-		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
-		retval = access_process_vm(tsk, src, buf, this_len, 0);
-		if (!retval) {
-			if (copied)
-				break;
-			return -EIO;
+static const struct utrace_engine_ops ptrace_utrace_ops; /* Initialized below. */
+
+static void
+ptrace_state_unlink(struct ptrace_state *state)
+{
+	task_lock(state->parent);
+	list_del_rcu(&state->entry);
+	task_unlock(state->parent);
+}
+
+static struct ptrace_state *
+ptrace_setup(struct task_struct *target, struct utrace_attached_engine *engine,
+	     struct task_struct *parent, u8 options, int cap_sys_ptrace,
+	     struct ptrace_state *state)
+{
+	if (state == NULL) {
+		state = kzalloc(sizeof *state, GFP_USER);
+		if (unlikely(state == NULL))
+			return ERR_PTR(-ENOMEM);
+	}
+
+	state->engine = engine;
+	state->task = target;
+	state->parent = parent;
+	state->u.live.options = options;
+	state->u.live.cap_sys_ptrace = cap_sys_ptrace;
+
+	task_lock(parent);
+	if (unlikely(parent->flags & PF_EXITING)) {
+		task_unlock(parent);
+		kfree(state);
+		return ERR_PTR(-EALREADY);
+	}
+	list_add_rcu(&state->entry, &state->parent->ptracees);
+	task_unlock(state->parent);
+
+	BUG_ON(engine->data != 0);
+	rcu_assign_pointer(engine->data, (unsigned long) state);
+
+	return state;
+}
+
+static void
+ptrace_state_free(struct rcu_head *rhead)
+{
+	struct ptrace_state *state = container_of(rhead,
+						  struct ptrace_state, u.dead);
+	kfree(state);
+}
+
+static void
+ptrace_done(struct ptrace_state *state)
+{
+	INIT_RCU_HEAD(&state->u.dead);
+	call_rcu(&state->u.dead, ptrace_state_free);
+}
+
+/*
+ * Update the tracing engine state to match the new ptrace state.
+ */
+static int __must_check
+ptrace_update(struct task_struct *target,
+	      struct utrace_attached_engine *engine,
+	      unsigned long flags)
+{
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+
+	/*
+	 * These events are always reported.
+	 */
+	flags |= (UTRACE_EVENT(DEATH) | UTRACE_EVENT(EXEC)
+		  | UTRACE_EVENT_SIGNAL_ALL | UTRACE_EVENT(JCTL));
+
+	/*
+	 * We always have to examine clone events to check for CLONE_PTRACE.
+	 */
+	flags |= UTRACE_EVENT(CLONE);
+
+	/*
+	 * PTRACE_SETOPTIONS can request more events.
+	 */
+	if (state->u.live.options & PTRACE_O_TRACEEXIT)
+		flags |= UTRACE_EVENT(EXIT);
+	if (state->u.live.options & PTRACE_O_TRACEVFORKDONE)
+		flags |= UTRACE_EVENT(VFORK_DONE);
+
+	/*
+	 * ptrace always inhibits normal parent reaping.
+	 * But for a corner case we sometimes see the REAP event anyway.
+	 */
+	flags |= UTRACE_ACTION_NOREAP | UTRACE_EVENT(REAP);
+
+	if (!(flags & UTRACE_ACTION_QUIESCE)) {
+		/*
+		 * We're letting the thread resume from ptrace stop.
+		 * If SIGKILL is waking it up, it can be racing with us here
+		 * to set its own exit_code in do_exit.  Though we clobber
+		 * it here, we check for the case in ptrace_report_death.
+		 */
+		if (!unlikely(target->flags & PF_SIGNALED))
+			target->exit_code = 0;
+
+		if (!state->u.live.have_eventmsg)
+			state->u.live.u.siginfo = NULL;
+
+		if (target->state == TASK_STOPPED) {
+			spin_lock_irq(&target->sighand->siglock);
+			if (target->state == TASK_STOPPED)
+				target->signal->flags &= ~SIGNAL_STOP_STOPPED;
+			spin_unlock_irq(&target->sighand->siglock);
 		}
-		if (copy_to_user(dst, buf, retval))
-			return -EFAULT;
-		copied += retval;
-		src += retval;
-		dst += retval;
-		len -= retval;			
 	}
-	return copied;
+
+	return utrace_set_flags(target, engine, flags);
 }
 
-int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
+static int ptrace_traceme(void)
 {
-	int copied = 0;
+	struct utrace_attached_engine *engine;
+	struct ptrace_state *state;
+	struct task_struct *parent;
+	int retval;
+
+	engine = utrace_attach(current, (UTRACE_ATTACH_CREATE
+					 | UTRACE_ATTACH_EXCLUSIVE
+					 | UTRACE_ATTACH_MATCH_OPS),
+			       &ptrace_utrace_ops, 0UL);
+
+	if (IS_ERR(engine)) {
+		retval = PTR_ERR(engine);
+		if (retval == -EEXIST)
+			retval = -EPERM;
+	}
+	else {
+		/*
+		 * We need to preallocate so that we can hold
+		 * rcu_read_lock from extracting ->parent through
+		 * ptrace_setup using it.
+		 */
+		state = kzalloc(sizeof *state, GFP_USER);
+		if (unlikely(state == NULL)) {
+			(void) utrace_detach(current, engine);
+			printk(KERN_ERR
+			       "ptrace out of memory, lost child %d of %d",
+			       current->pid, current->parent->pid);
+			return -ENOMEM;
+		}
 
-	while (len > 0) {
-		char buf[128];
-		int this_len, retval;
+		rcu_read_lock();
+		parent = rcu_dereference(current->parent);
+
+		task_lock(current);
+		retval = security_ptrace(parent, current);
+		task_unlock(current);
+
+		if (retval) {
+			kfree(state);
+			(void) utrace_detach(current, engine);
+		}
+		else {
+			state = ptrace_setup(current, engine, parent, 0, 0,
+					     state);
+			if (IS_ERR(state))
+				retval = PTR_ERR(state);
+		}
+		rcu_read_unlock();
 
-		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
-		if (copy_from_user(buf, src, this_len))
-			return -EFAULT;
-		retval = access_process_vm(tsk, dst, buf, this_len, 1);
 		if (!retval) {
-			if (copied)
-				break;
-			return -EIO;
+			/*
+			 * This can't fail because we can't die while we
+			 * are here doing this.
+			 */
+			retval = ptrace_update(current, engine, 0);
+			BUG_ON(retval);
+		}
+		else if (unlikely(retval == -EALREADY))
+			/*
+			 * We raced with our parent's exit, which would
+			 * have detached us just after our attach if
+			 * we'd won the race.  Pretend we got attached
+			 * and then detached immediately, no error.
+			 */
+			retval = 0;
+	}
+
+	return retval;
+}
+
+static int ptrace_attach(struct task_struct *task)
+{
+	struct utrace_attached_engine *engine;
+	struct ptrace_state *state;
+	int retval;
+
+	retval = -EPERM;
+	if (task->pid <= 1)
+		goto bad;
+	if (task->tgid == current->tgid)
+		goto bad;
+	if (!task->mm)		/* kernel threads */
+		goto bad;
+
+	engine = utrace_attach(task, (UTRACE_ATTACH_CREATE
+				      | UTRACE_ATTACH_EXCLUSIVE
+				      | UTRACE_ATTACH_MATCH_OPS),
+			       &ptrace_utrace_ops, 0);
+	if (IS_ERR(engine)) {
+		retval = PTR_ERR(engine);
+		if (retval == -EEXIST)
+			retval = -EPERM;
+		goto bad;
+	}
+
+	if (ptrace_may_attach(task)) {
+		state = ptrace_setup(task, engine, current, 0,
+				     capable(CAP_SYS_PTRACE), NULL);
+		if (IS_ERR(state))
+			retval = PTR_ERR(state);
+		else {
+			retval = ptrace_update(task, engine, 0);
+			if (retval) {
+				/*
+				 * It died before we enabled any callbacks.
+				 */
+				if (retval == -EALREADY)
+					retval = -ESRCH;
+				BUG_ON(retval != -ESRCH);
+				ptrace_state_unlink(state);
+				ptrace_done(state);
+			}
+		}
+	}
+	if (retval)
+		(void) utrace_detach(task, engine);
+	else {
+		int stopped;
+
+		force_sig_specific(SIGSTOP, task);
+
+		spin_lock_irq(&task->sighand->siglock);
+		stopped = (task->state == TASK_STOPPED);
+		spin_unlock_irq(&task->sighand->siglock);
+
+		if (stopped) {
+			/*
+			 * Do now the regset 0 writeback that we do on every
+			 * stop, since it's never been done.  On register
+			 * window machines, this makes sure the user memory
+			 * backing the register data is up to date.
+			 */
+			const struct utrace_regset *regset;
+			regset = utrace_regset(task, engine,
+					       utrace_native_view(task), 0);
+			if (regset->writeback)
+				(*regset->writeback)(task, regset, 1);
 		}
-		copied += retval;
-		src += retval;
-		dst += retval;
-		len -= retval;			
 	}
-	return copied;
+
+bad:
+	return retval;
 }
 
-static int ptrace_setoptions(struct task_struct *child, long data)
+static int ptrace_detach(struct task_struct *task,
+			 struct utrace_attached_engine *engine)
 {
-	child->ptrace &= ~PT_TRACE_MASK;
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+	int error = utrace_detach(task, engine);
+	if (!error) {
+		/*
+		 * We can only get here from the ptracer itself or via
+		 * detach_zombie from another thread in its group.
+		 */
+		BUG_ON(state->parent->tgid != current->tgid);
+		ptrace_state_unlink(state);
+		ptrace_done(state);
 
-	if (data & PTRACE_O_TRACESYSGOOD)
-		child->ptrace |= PT_TRACESYSGOOD;
+		/*
+		 * Wake up any other threads that might be blocked in
+		 * wait.  Though traditional ptrace does not guarantee
+		 * this wakeup on PTRACE_DETACH, it does prevent
+		 * erroneous blocking in wait when another racing
+		 * thread's wait call reap-detaches the last child.
+		 * Without this wakeup, another thread might stay
+		 * blocked when it should return -ECHILD.
+		 */
+		spin_lock_irq(&current->sighand->siglock);
+		wake_up_interruptible(&current->signal->wait_chldexit);
+		spin_unlock_irq(&current->sighand->siglock);
+	}
+	return error;
+}
 
-	if (data & PTRACE_O_TRACEFORK)
-		child->ptrace |= PT_TRACE_FORK;
 
-	if (data & PTRACE_O_TRACEVFORK)
-		child->ptrace |= PT_TRACE_VFORK;
+/*
+ * This is called when we are exiting.  We must stop all our ptracing.
+ */
+void
+ptrace_exit(struct task_struct *tsk)
+{
+	struct list_head *pos, *n;
 
-	if (data & PTRACE_O_TRACECLONE)
-		child->ptrace |= PT_TRACE_CLONE;
+	/*
+	 * Taking the task_lock after PF_EXITING is set ensures that a
+	 * child in ptrace_traceme will not put itself on our list when
+	 * we might already be tearing it down.
+	 */
+	task_lock(tsk);
+	if (likely(list_empty(&tsk->ptracees))) {
+		task_unlock(tsk);
+		return;
+	}
+	task_unlock(tsk);
 
-	if (data & PTRACE_O_TRACEEXEC)
-		child->ptrace |= PT_TRACE_EXEC;
+restart:
+	rcu_read_lock();
 
-	if (data & PTRACE_O_TRACEVFORKDONE)
-		child->ptrace |= PT_TRACE_VFORK_DONE;
+	list_for_each_safe_rcu(pos, n, &tsk->ptracees) {
+		struct ptrace_state *state = list_entry(pos,
+							struct ptrace_state,
+							entry);
+		int error = utrace_detach(state->task, state->engine);
+		BUG_ON(state->parent != tsk);
+		if (likely(error == 0)) {
+			ptrace_state_unlink(state);
+			ptrace_done(state);
+		}
+		else if (unlikely(error == -EALREADY)) {
+			/*
+			 * It's still doing report_death callbacks.
+			 * Just wait for it to settle down.
+			 * Since wait_task_inactive might yield,
+			 * we must go out of rcu_read_lock and restart.
+			 */
+			struct task_struct *p = state->task;
+			get_task_struct(p);
+			rcu_read_unlock();
+			wait_task_inactive(p);
+			put_task_struct(p);
+			goto restart;
+		}
+		else
+			BUG_ON(error != -ESRCH);
+	}
 
-	if (data & PTRACE_O_TRACEEXIT)
-		child->ptrace |= PT_TRACE_EXIT;
+	rcu_read_unlock();
 
-	return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
+	BUG_ON(!list_empty(&tsk->ptracees));
 }
 
-static int ptrace_getsiginfo(struct task_struct *child, siginfo_t __user * data)
+static int
+ptrace_induce_signal(struct task_struct *target,
+		     struct utrace_attached_engine *engine,
+		     long signr)
 {
-	siginfo_t lastinfo;
-	int error = -ESRCH;
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
 
-	read_lock(&tasklist_lock);
-	if (likely(child->sighand != NULL)) {
-		error = -EINVAL;
-		spin_lock_irq(&child->sighand->siglock);
-		if (likely(child->last_siginfo != NULL)) {
-			lastinfo = *child->last_siginfo;
-			error = 0;
+	if (signr == 0)
+		return 0;
+
+	if (!valid_signal(signr))
+		return -EIO;
+
+	if (state->u.live.syscall) {
+		/*
+		 * This is the traditional ptrace behavior when given
+		 * a signal to resume from a syscall tracing stop.
+		 */
+		send_sig(signr, target, 1);
+	}
+	else if (!state->u.live.have_eventmsg && state->u.live.u.siginfo) {
+		siginfo_t *info = state->u.live.u.siginfo;
+
+		/* Update the siginfo structure if the signal has
+		   changed.  If the debugger wanted something
+		   specific in the siginfo structure then it should
+		   have updated *info via PTRACE_SETSIGINFO.  */
+		if (signr != info->si_signo) {
+			info->si_signo = signr;
+			info->si_errno = 0;
+			info->si_code = SI_USER;
+			info->si_pid = current->pid;
+			info->si_uid = current->uid;
 		}
-		spin_unlock_irq(&child->sighand->siglock);
+
+		return utrace_inject_signal(target, engine,
+					    UTRACE_ACTION_RESUME, info, NULL);
 	}
-	read_unlock(&tasklist_lock);
-	if (!error)
-		return copy_siginfo_to_user(data, &lastinfo);
-	return error;
+
+	return 0;
 }
 
-static int ptrace_setsiginfo(struct task_struct *child, siginfo_t __user * data)
+fastcall int
+ptrace_regset_access(struct task_struct *target,
+		     struct utrace_attached_engine *engine,
+		     const struct utrace_regset_view *view,
+		     int setno, unsigned long offset, unsigned int size,
+		     void __user *data, int write)
 {
-	siginfo_t newinfo;
-	int error = -ESRCH;
+	const struct utrace_regset *regset = utrace_regset(target, engine,
+							   view, setno);
+	int ret;
+
+	if (unlikely(regset == NULL))
+		return -EIO;
 
-	if (copy_from_user(&newinfo, data, sizeof (siginfo_t)))
-		return -EFAULT;
+	if (size == (unsigned int) -1)
+		size = regset->size * regset->n;
 
-	read_lock(&tasklist_lock);
-	if (likely(child->sighand != NULL)) {
-		error = -EINVAL;
-		spin_lock_irq(&child->sighand->siglock);
-		if (likely(child->last_siginfo != NULL)) {
-			*child->last_siginfo = newinfo;
-			error = 0;
-		}
-		spin_unlock_irq(&child->sighand->siglock);
+	if (write) {
+		if (!access_ok(VERIFY_READ, data, size))
+			ret = -EIO;
+		else
+			ret = (*regset->set)(target, regset,
+					     offset, size, NULL, data);
+	}
+	else {
+		if (!access_ok(VERIFY_WRITE, data, size))
+			ret = -EIO;
+		else
+			ret = (*regset->get)(target, regset,
+					     offset, size, NULL, data);
+	}
+
+	return ret;
+}
+
+fastcall int
+ptrace_onereg_access(struct task_struct *target,
+		     struct utrace_attached_engine *engine,
+		     const struct utrace_regset_view *view,
+		     int setno, unsigned long regno,
+		     void __user *data, int write)
+{
+	const struct utrace_regset *regset = utrace_regset(target, engine,
+							   view, setno);
+	unsigned int pos;
+	int ret;
+
+	if (unlikely(regset == NULL))
+		return -EIO;
+
+	if (regno < regset->bias || regno >= regset->bias + regset->n)
+		return -EINVAL;
+
+	pos = (regno - regset->bias) * regset->size;
+
+	if (write) {
+		if (!access_ok(VERIFY_READ, data, regset->size))
+			ret = -EIO;
+		else
+			ret = (*regset->set)(target, regset, pos, regset->size,
+					     NULL, data);
+	}
+	else {
+		if (!access_ok(VERIFY_WRITE, data, regset->size))
+			ret = -EIO;
+		else
+			ret = (*regset->get)(target, regset, pos, regset->size,
+					     NULL, data);
 	}
+
+	return ret;
+}
+
+fastcall int
+ptrace_layout_access(struct task_struct *target,
+		     struct utrace_attached_engine *engine,
+		     const struct utrace_regset_view *view,
+		     const struct ptrace_layout_segment layout[],
+		     unsigned long addr, unsigned int size,
+		     void __user *udata, void *kdata, int write)
+{
+	const struct ptrace_layout_segment *seg;
+	int ret = -EIO;
+
+	if (kdata == NULL &&
+	    !access_ok(write ? VERIFY_READ : VERIFY_WRITE, udata, size))
+		return -EIO;
+
+	seg = layout;
+	do {
+		unsigned int pos, n;
+
+		while (addr >= seg->end && seg->end != 0)
+			++seg;
+
+		if (addr < seg->start || addr >= seg->end)
+			return -EIO;
+
+		pos = addr - seg->start + seg->offset;
+		n = min(size, seg->end - (unsigned int) addr);
+
+		if (unlikely(seg->regset == (unsigned int) -1)) {
+			/*
+			 * This is a no-op/zero-fill portion of struct user.
+			 */
+			ret = 0;
+			if (!write) {
+				if (kdata)
+					memset(kdata, 0, n);
+				else if (clear_user(udata, n))
+					ret = -EFAULT;
+			}
+		}
+		else {
+			unsigned int align;
+			const struct utrace_regset *regset = utrace_regset(
+				target, engine, view, seg->regset);
+			if (unlikely(regset == NULL))
+				return -EIO;
+
+			/*
+			 * A ptrace compatibility layout can do a misaligned
+			 * regset access, e.g. word access to larger data.
+			 * An arch's compat layout can be this way only if
+			 * it is actually ok with the regset code despite the
+			 * regset->align setting.
+			 */
+			align = min(regset->align, size);
+			if ((pos & (align - 1))
+			    || pos >= regset->n * regset->size)
+				return -EIO;
+
+			if (write)
+				ret = (*regset->set)(target, regset,
+						     pos, n, kdata, udata);
+			else
+				ret = (*regset->get)(target, regset,
+						     pos, n, kdata, udata);
+		}
+
+		if (kdata)
+			kdata += n;
+		else
+			udata += n;
+		addr += n;
+		size -= n;
+	} while (ret == 0 && size > 0);
+
+	return ret;
+}
+
+
+static int
+ptrace_start(long pid, long request,
+	     struct task_struct **childp,
+	     struct utrace_attached_engine **enginep,
+	     struct ptrace_state **statep)
+
+{
+	struct task_struct *child;
+	struct utrace_attached_engine *engine;
+	struct ptrace_state *state;
+	int ret;
+
+	if (request == PTRACE_TRACEME)
+		return ptrace_traceme();
+
+	ret = -ESRCH;
+	read_lock(&tasklist_lock);
+	child = find_task_by_pid(pid);
+	if (child)
+		get_task_struct(child);
 	read_unlock(&tasklist_lock);
-	return error;
+#ifdef PTRACE_DEBUG
+	printk("ptrace pid %ld => %p\n", pid, child);
+#endif
+	if (!child)
+		goto out;
+
+	ret = -EPERM;
+	if (pid == 1)		/* you may not mess with init */
+		goto out_tsk;
+
+	if (request == PTRACE_ATTACH) {
+		ret = ptrace_attach(child);
+		goto out_tsk;
+	}
+
+	rcu_read_lock();
+	engine = utrace_attach(child, UTRACE_ATTACH_MATCH_OPS,
+			       &ptrace_utrace_ops, 0);
+	ret = -ESRCH;
+	if (IS_ERR(engine) || engine == NULL)
+		goto out_tsk_rcu;
+	state = rcu_dereference((struct ptrace_state *) engine->data);
+	if (state == NULL || state->parent != current)
+		goto out_tsk_rcu;
+	rcu_read_unlock();
+
+	/*
+	 * Traditional ptrace behavior demands that the target already be
+	 * quiescent, but not dead.
+	 */
+	if (request != PTRACE_KILL
+	    && !(engine->flags & UTRACE_ACTION_QUIESCE)) {
+#ifdef PTRACE_DEBUG
+		printk("%d not stopped (%lx)\n", child->pid, child->state);
+#endif
+		goto out_tsk;
+	}
+
+	/*
+	 * We do this for all requests to match traditional ptrace behavior.
+	 * If the machine state synchronization done at context switch time
+	 * includes e.g. writing back to user memory, we want to make sure
+	 * that has finished before a PTRACE_PEEKDATA can fetch the results.
+	 * On most machines, only regset data is affected by context switch
+	 * and calling utrace_regset later on will take care of that, so
+	 * this is superfluous.
+	 *
+	 * To do this purely in utrace terms, we could do:
+	 *  (void) utrace_regset(child, engine, utrace_native_view(child), 0);
+	 */
+	wait_task_inactive(child);
+
+	if (child->exit_state)
+		goto out_tsk;
+
+	*childp = child;
+	*enginep = engine;
+	*statep = state;
+	return -EIO;
+
+out_tsk_rcu:
+	rcu_read_unlock();
+out_tsk:
+	put_task_struct(child);
+out:
+	return ret;
 }
 
-int ptrace_request(struct task_struct *child, long request,
-		   long addr, long data)
+static int
+ptrace_common(long request, struct task_struct *child,
+	      struct utrace_attached_engine *engine,
+	      struct ptrace_state *state,
+	      unsigned long addr, long data)
 {
+	unsigned long flags;
 	int ret = -EIO;
 
 	switch (request) {
+	case PTRACE_DETACH:
+		/*
+		 * Detach a process that was attached.
+		 */
+		ret = ptrace_induce_signal(child, engine, data);
+		if (!ret) {
+			ret = ptrace_detach(child, engine);
+			if (ret == -EALREADY) /* Already a zombie.  */
+				ret = -ESRCH;
+			if (ret)
+				BUG_ON(ret != -ESRCH);
+		}
+		break;
+
+		/*
+		 * These are the operations that resume the child running.
+		 */
+	case PTRACE_KILL:
+		data = SIGKILL;
+	case PTRACE_CONT:
+	case PTRACE_SYSCALL:
+#ifdef PTRACE_SYSEMU
+	case PTRACE_SYSEMU:
+	case PTRACE_SYSEMU_SINGLESTEP:
+#endif
+#ifdef PTRACE_SINGLEBLOCK
+	case PTRACE_SINGLEBLOCK:
+# ifdef ARCH_HAS_BLOCK_STEP
+		if (! ARCH_HAS_BLOCK_STEP)
+# endif
+			if (request == PTRACE_SINGLEBLOCK)
+				break;
+#endif
+	case PTRACE_SINGLESTEP:
+#ifdef ARCH_HAS_SINGLE_STEP
+		if (! ARCH_HAS_SINGLE_STEP)
+#endif
+			if (request == PTRACE_SINGLESTEP
+#ifdef PTRACE_SYSEMU_SINGLESTEP
+			    || request == PTRACE_SYSEMU_SINGLESTEP
+#endif
+				)
+				break;
+
+		ret = ptrace_induce_signal(child, engine, data);
+		if (ret)
+			break;
+
+
+		/*
+		 * Reset the action flags without QUIESCE, so it resumes.
+		 */
+		flags = 0;
+#ifdef PTRACE_SYSEMU
+		state->u.live.sysemu = (request == PTRACE_SYSEMU_SINGLESTEP
+					|| request == PTRACE_SYSEMU);
+#endif
+		if (request == PTRACE_SINGLESTEP
+#ifdef PTRACE_SYSEMU
+		    || request == PTRACE_SYSEMU_SINGLESTEP
+#endif
+			)
+			flags |= UTRACE_ACTION_SINGLESTEP;
+#ifdef PTRACE_SINGLEBLOCK
+		else if (request == PTRACE_SINGLEBLOCK)
+			flags |= UTRACE_ACTION_BLOCKSTEP;
+#endif
+		if (request == PTRACE_SYSCALL)
+			flags |= UTRACE_EVENT_SYSCALL;
+#ifdef PTRACE_SYSEMU
+		else if (request == PTRACE_SYSEMU
+			 || request == PTRACE_SYSEMU_SINGLESTEP)
+			flags |= UTRACE_EVENT(SYSCALL_ENTRY);
+#endif
+		ret = ptrace_update(child, engine, flags);
+		if (ret)
+			BUG_ON(ret != -ESRCH);
+		ret = 0;
+		break;
+
 #ifdef PTRACE_OLDSETOPTIONS
 	case PTRACE_OLDSETOPTIONS:
 #endif
 	case PTRACE_SETOPTIONS:
-		ret = ptrace_setoptions(child, data);
+		ret = -EINVAL;
+		if (data & ~PTRACE_O_MASK)
+			break;
+		state->u.live.options = data;
+		ret = ptrace_update(child, engine, UTRACE_ACTION_QUIESCE);
+		if (ret)
+			BUG_ON(ret != -ESRCH);
+		ret = 0;
+		break;
+	}
+
+	return ret;
+}
+
+
+asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+{
+	struct task_struct *child;
+	struct utrace_attached_engine *engine;
+	struct ptrace_state *state;
+	long ret, val;
+
+#ifdef PTRACE_DEBUG
+	printk("%d sys_ptrace(%ld, %ld, %lx, %lx)\n",
+	       current->pid, request, pid, addr, data);
+#endif
+
+	ret = ptrace_start(pid, request, &child, &engine, &state);
+	if (ret != -EIO)
+		goto out;
+
+	val = 0;
+	ret = arch_ptrace(&request, child, engine, addr, data, &val);
+	if (ret != -ENOSYS) {
+		if (ret == 0) {
+			ret = val;
+			force_successful_syscall_return();
+		}
+		goto out_tsk;
+	}
+
+	switch (request) {
+	default:
+		ret = ptrace_common(request, child, engine, state, addr, data);
+		break;
+
+	case PTRACE_PEEKTEXT: /* read word at location addr. */
+	case PTRACE_PEEKDATA: {
+		unsigned long tmp;
+		int copied;
+
+		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+		ret = -EIO;
+		if (copied != sizeof(tmp))
+			break;
+		ret = put_user(tmp, (unsigned long __user *) data);
+		break;
+	}
+
+	case PTRACE_POKETEXT: /* write the word at location addr. */
+	case PTRACE_POKEDATA:
+		ret = 0;
+		if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
+			break;
+		ret = -EIO;
 		break;
+
 	case PTRACE_GETEVENTMSG:
-		ret = put_user(child->ptrace_message, (unsigned long __user *) data);
+		ret = put_user(state->u.live.have_eventmsg
+			       ? state->u.live.u.eventmsg : 0L,
+			       (unsigned long __user *) data);
 		break;
 	case PTRACE_GETSIGINFO:
-		ret = ptrace_getsiginfo(child, (siginfo_t __user *) data);
+		ret = -EINVAL;
+		if (!state->u.live.have_eventmsg && state->u.live.u.siginfo)
+			ret = copy_siginfo_to_user((siginfo_t __user *) data,
+						   state->u.live.u.siginfo);
 		break;
 	case PTRACE_SETSIGINFO:
-		ret = ptrace_setsiginfo(child, (siginfo_t __user *) data);
+		ret = -EINVAL;
+		if (!state->u.live.have_eventmsg && state->u.live.u.siginfo) {
+			ret = 0;
+			if (copy_from_user(state->u.live.u.siginfo,
+					   (siginfo_t __user *) data,
+					   sizeof(siginfo_t)))
+				ret = -EFAULT;
+		}
 		break;
+	}
+
+out_tsk:
+	put_task_struct(child);
+out:
+#ifdef PTRACE_DEBUG
+	printk("%d ptrace -> %lx\n", current->pid, ret);
+#endif
+	return ret;
+}
+
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+				  compat_ulong_t addr, compat_long_t cdata)
+{
+	const unsigned long data = (unsigned long) (compat_ulong_t) cdata;
+	struct task_struct *child;
+	struct utrace_attached_engine *engine;
+	struct ptrace_state *state;
+	compat_long_t ret, val;
+
+#ifdef PTRACE_DEBUG
+	printk("%d compat_sys_ptrace(%d, %d, %x, %x)\n",
+	       current->pid, request, pid, addr, cdata);
+#endif
+	ret = ptrace_start(pid, request, &child, &engine, &state);
+	if (ret != -EIO)
+		goto out;
+
+	val = 0;
+	ret = arch_compat_ptrace(&request, child, engine, addr, cdata, &val);
+	if (ret != -ENOSYS) {
+		if (ret == 0) {
+			ret = val;
+			force_successful_syscall_return();
+		}
+		goto out_tsk;
+	}
+
+	switch (request) {
 	default:
+		ret = ptrace_common(request, child, engine, state, addr, data);
+		break;
+
+	case PTRACE_PEEKTEXT: /* read word at location addr. */
+	case PTRACE_PEEKDATA: {
+		compat_ulong_t tmp;
+		int copied;
+
+		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+		ret = -EIO;
+		if (copied != sizeof(tmp))
+			break;
+		ret = put_user(tmp, (compat_ulong_t __user *) data);
+		break;
+	}
+
+	case PTRACE_POKETEXT: /* write the word at location addr. */
+	case PTRACE_POKEDATA:
+		ret = 0;
+		if (access_process_vm(child, addr, &cdata, sizeof(cdata), 1) == sizeof(cdata))
+			break;
+		ret = -EIO;
+		break;
+
+	case PTRACE_GETEVENTMSG:
+		ret = put_user(state->u.live.have_eventmsg
+			       ? state->u.live.u.eventmsg : 0L,
+			       (compat_long_t __user *) data);
+		break;
+	case PTRACE_GETSIGINFO:
+		ret = -EINVAL;
+		if (!state->u.live.have_eventmsg && state->u.live.u.siginfo)
+			ret = copy_siginfo_to_user32(
+				(struct compat_siginfo __user *) data,
+				state->u.live.u.siginfo);
+		break;
+	case PTRACE_SETSIGINFO:
+		ret = -EINVAL;
+		if (!state->u.live.have_eventmsg && state->u.live.u.siginfo
+		    && copy_siginfo_from_user32(
+			    state->u.live.u.siginfo,
+			    (struct compat_siginfo __user *) data))
+			ret = -EFAULT;
 		break;
 	}
 
+out_tsk:
+	put_task_struct(child);
+out:
+#ifdef PTRACE_DEBUG
+	printk("%d ptrace -> %lx\n", current->pid, ret);
+#endif
 	return ret;
 }
+#endif
 
-/**
- * ptrace_traceme  --  helper for PTRACE_TRACEME
- *
- * Performs checks and sets PT_PTRACED.
- * Should be used by all ptrace implementations for PTRACE_TRACEME.
+
+/*
+ * Detach the zombie being reported for wait.
  */
-int ptrace_traceme(void)
+static inline void
+detach_zombie(struct task_struct *tsk,
+	      struct task_struct *p, struct ptrace_state *state)
 {
-	int ret = -EPERM;
-
-	/*
-	 * Are we already being traced?
-	 */
-	task_lock(current);
-	if (!(current->ptrace & PT_PTRACED)) {
-		ret = security_ptrace(current->parent, current);
+	int detach_error;
+restart:
+	detach_error = 0;
+	rcu_read_lock();
+	if (tsk != current) {
 		/*
-		 * Set the ptrace bit in the process ptrace flags.
+		 * We've excluded other ptrace_do_wait calls.  But the
+		 * ptracer itself might have done ptrace_detach while we
+		 * did not have rcu_read_lock.  So double-check that state
+		 * is still valid.
 		 */
-		if (!ret)
-			current->ptrace |= PT_PTRACED;
+		struct utrace_attached_engine *engine;
+		engine = utrace_attach(
+			p, (UTRACE_ATTACH_MATCH_OPS
+			    | UTRACE_ATTACH_MATCH_DATA),
+			&ptrace_utrace_ops,
+			(unsigned long) state);
+		if (IS_ERR(engine) || state->parent != tsk)
+			detach_error = -ESRCH;
+		else
+			BUG_ON(state->engine != engine);
 	}
-	task_unlock(current);
-	return ret;
+	if (likely(!detach_error))
+		detach_error = ptrace_detach(p, state->engine);
+	if (unlikely(detach_error == -EALREADY)) {
+		/*
+		 * It's still doing report_death callbacks.
+		 * Just wait for it to settle down.
+		 */
+		rcu_read_unlock();
+		wait_task_inactive(p); /* Might block.  */
+		goto restart;
+	}
+	/*
+	 * A failure with -ESRCH means that report_reap is
+	 * already running and will do the cleanup, or that
+	 * we lost a race with ptrace_detach in another
+	 * thread or with the automatic detach in
+	 * report_death.
+	 */
+	if (detach_error)
+		BUG_ON(detach_error != -ESRCH);
+	rcu_read_unlock();
 }
 
-/**
- * ptrace_get_task_struct  --  grab a task struct reference for ptrace
- * @pid:       process id to grab a task_struct reference of
- *
- * This function is a helper for ptrace implementations.  It checks
- * permissions and then grabs a task struct for use of the actual
- * ptrace implementation.
- *
- * Returns the task_struct for @pid or an ERR_PTR() on failure.
+/*
+ * We're called with tasklist_lock held for reading.
+ * If we return -ECHILD or zero, next_thread(tsk) must still be valid to use.
+ * If we return another error code, or a successful PID value, we
+ * release tasklist_lock first.
  */
-struct task_struct *ptrace_get_task_struct(pid_t pid)
+int
+ptrace_do_wait(struct task_struct *tsk,
+	       pid_t pid, int options, struct siginfo __user *infop,
+	       int __user *stat_addr, struct rusage __user *rusagep)
 {
-	struct task_struct *child;
+	struct ptrace_state *state;
+	struct task_struct *p;
+	int err = -ECHILD;
+	int exit_code, why, status;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(state, &tsk->ptracees, entry) {
+		p = state->task;
+
+		if (pid > 0) {
+			if (p->pid != pid)
+				continue;
+		} else if (!pid) {
+			if (process_group(p) != process_group(current))
+				continue;
+		} else if (pid != -1) {
+			if (process_group(p) != -pid)
+				continue;
+		}
+		if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
+		    && !(options & __WALL))
+			continue;
+		if (security_task_wait(p))
+			continue;
+
+		/*
+		 * This is a matching child.  If we don't win now, tell
+		 * our caller to block and repeat.  From this point we
+		 * must ensure that wait_chldexit will get a wakeup for
+		 * any tracee stopping, dying, or being detached.
+		 * For death, tasklist_lock guarantees this already.
+		 */
+		err = 0;
+
+		switch (p->exit_state) {
+		case EXIT_ZOMBIE:
+			if (!likely(options & WEXITED))
+				continue;
+			if (delay_group_leader(p))
+				continue;
+			exit_code = p->exit_code;
+			goto found;
+		case EXIT_DEAD:
+			continue;
+		default:
+			/*
+			 * tasklist_lock holds up any transitions to
+			 * EXIT_ZOMBIE.  After releasing it we are
+			 * guaranteed a wakeup on wait_chldexit after
+			 * any new deaths.
+			 */
+			break;
+		}
+
+		/*
+		 * This xchg atomically ensures that only one do_wait
+		 * call can report this thread.  Because exit_code is
+		 * always set before do_notify wakes us up, after this
+		 * check fails we are sure to get a wakeup if it stops.
+		 */
+		exit_code = xchg(&p->exit_code, 0);
+		if (exit_code)
+			goto found;
+
+		// XXX should handle WCONTINUED
+	}
+	rcu_read_unlock();
+	return err;
+
+found:
+	BUG_ON(state->parent != tsk);
+	rcu_read_unlock();
+
+#ifdef PTRACE_DEBUG
+	printk("%d ptrace_do_wait (%d) found %d code %x (%lu)\n", current->pid, tsk->pid, p->pid, exit_code, p->exit_state);
+#endif
+
+	if (p->exit_state) {
+		if (unlikely(p->parent == tsk))
+			/*
+			 * This is our natural child we were ptracing.
+			 * When it dies it detaches (see ptrace_report_death).
+			 * So we're seeing it here in a race.  When it
+			 * finishes detaching it will become reapable in
+			 * the normal wait_task_zombie path instead.
+			 */
+			return 0;
+		if ((exit_code & 0x7f) == 0) {
+			why = CLD_EXITED;
+			status = exit_code >> 8;
+		}
+		else {
+			why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
+			status = exit_code & 0x7f;
+		}
+	}
+	else {
+		why = CLD_TRAPPED;
+		status = exit_code;
+		exit_code = (status << 8) | 0x7f;
+	}
 
 	/*
-	 * Tracing init is not allowed.
+	 * At this point we are committed to a successful return
+	 * or a user error return.  Release the tasklist_lock.
 	 */
-	if (pid == 1)
-		return ERR_PTR(-EPERM);
-
-	read_lock(&tasklist_lock);
-	child = find_task_by_pid(pid);
-	if (child)
-		get_task_struct(child);
+	get_task_struct(p);
 	read_unlock(&tasklist_lock);
-	if (!child)
-		return ERR_PTR(-ESRCH);
-	return child;
+
+	if (rusagep)
+		err = getrusage(p, RUSAGE_BOTH, rusagep);
+	if (infop) {
+		if (!err)
+			err = put_user(SIGCHLD, &infop->si_signo);
+		if (!err)
+			err = put_user(0, &infop->si_errno);
+		if (!err)
+			err = put_user((short)why, &infop->si_code);
+		if (!err)
+			err = put_user(p->pid, &infop->si_pid);
+		if (!err)
+			err = put_user(p->uid, &infop->si_uid);
+		if (!err)
+			err = put_user(status, &infop->si_status);
+	}
+	if (!err && stat_addr)
+		err = put_user(exit_code, stat_addr);
+
+	if (!err) {
+		if (why != CLD_TRAPPED)
+			/*
+			 * This was a death report.  The ptracer's wait
+			 * does an implicit detach, so the zombie reports
+			 * to its real parent now.
+			 */
+			detach_zombie(tsk, p, state);
+		err = p->pid;
+	}
+
+	put_task_struct(p);
+
+	return err;
 }
 
-#ifndef __ARCH_SYS_PTRACE
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+static void
+do_notify(struct task_struct *tsk, struct task_struct *parent, int why)
 {
-	struct task_struct *child;
-	long ret;
+	struct siginfo info;
+	unsigned long flags;
+	struct sighand_struct *sighand;
+	int sa_mask;
+
+	info.si_signo = SIGCHLD;
+	info.si_errno = 0;
+	info.si_pid = tsk->pid;
+	info.si_uid = tsk->uid;
+
+	/* FIXME: find out whether or not this is supposed to be c*time. */
+	info.si_utime = cputime_to_jiffies(tsk->utime);
+	info.si_stime = cputime_to_jiffies(tsk->stime);
+
+	sa_mask = SA_NOCLDSTOP;
+ 	info.si_code = why;
+	info.si_status = tsk->exit_code & 0x7f;
+	if (why == CLD_CONTINUED)
+ 		info.si_status = SIGCONT;
+	else if (why == CLD_STOPPED)
+		info.si_status = tsk->signal->group_exit_code & 0x7f;
+	else if (why == CLD_EXITED) {
+		sa_mask = SA_NOCLDWAIT;
+		if (tsk->exit_code & 0x80)
+			info.si_code = CLD_DUMPED;
+		else if (tsk->exit_code & 0x7f)
+			info.si_code = CLD_KILLED;
+		else {
+			info.si_code = CLD_EXITED;
+			info.si_status = tsk->exit_code >> 8;
+		}
+	}
+
+	sighand = parent->sighand;
+	spin_lock_irqsave(&sighand->siglock, flags);
+	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
+	    !(sighand->action[SIGCHLD-1].sa.sa_flags & sa_mask))
+		__group_send_sig_info(SIGCHLD, &info, parent);
+	/*
+	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
+	 */
+	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
+	spin_unlock_irqrestore(&sighand->siglock, flags);
+}
+
+static u32
+ptrace_report(struct utrace_attached_engine *engine, struct task_struct *tsk,
+	      int code)
+{
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+	const struct utrace_regset *regset;
+
+#ifdef PTRACE_DEBUG
+	printk("%d ptrace_report %d engine %p state %p code %x parent %d (%p)\n",
+	       current->pid, tsk->pid, engine, state, code,
+	       state->parent->pid, state->parent);
+	if (!state->u.live.have_eventmsg && state->u.live.u.siginfo) {
+		const siginfo_t *si = state->u.live.u.siginfo;
+		printk("  si %d code %x errno %d addr %p\n",
+		       si->si_signo, si->si_code, si->si_errno,
+		       si->si_addr);
+	}
+#endif
 
 	/*
-	 * This lock_kernel fixes a subtle race with suid exec
+	 * Set our QUIESCE flag right now, before notifying the tracer.
+	 * We do this before setting tsk->exit_code rather than
+	 * by using UTRACE_ACTION_NEWSTATE in our return value, to
+	 * ensure that the tracer can't get the notification and then
+	 * try to resume us with PTRACE_CONT before we set the flag.
 	 */
-	lock_kernel();
-	if (request == PTRACE_TRACEME) {
-		ret = ptrace_traceme();
+	utrace_set_flags(tsk, engine, engine->flags | UTRACE_ACTION_QUIESCE);
+
+	/*
+	 * If regset 0 has a writeback call, do it now.  On register window
+	 * machines, this makes sure the user memory backing the register
+	 * data is up to date by the time wait_task_inactive returns to
+	 * ptrace_start in our tracer doing a PTRACE_PEEKDATA or the like.
+	 */
+	regset = utrace_regset(tsk, engine, utrace_native_view(tsk), 0);
+	if (regset->writeback)
+		(*regset->writeback)(tsk, regset, 0);
+
+	BUG_ON(code == 0);
+	tsk->exit_code = code;
+	do_notify(tsk, state->parent, CLD_TRAPPED);
+
+#ifdef PTRACE_DEBUG
+	printk("%d ptrace_report quiescing exit_code %x\n",
+	       current->pid, current->exit_code);
+#endif
+
+	return UTRACE_ACTION_RESUME;
+}
+
+static inline u32
+ptrace_event(struct utrace_attached_engine *engine, struct task_struct *tsk,
+	     int event)
+{
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+	state->u.live.syscall = 0;
+	return ptrace_report(engine, tsk, (event << 8) | SIGTRAP);
+}
+
+
+static u32
+ptrace_report_death(struct utrace_attached_engine *engine,
+		    struct task_struct *tsk)
+{
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+
+	if (tsk->exit_code == 0 && unlikely(tsk->flags & PF_SIGNALED))
+		/*
+		 * This can only mean that tsk->exit_code was clobbered
+		 * by ptrace_update or ptrace_do_wait in a race with
+		 * an asynchronous wakeup and exit for SIGKILL.
+		 */
+		tsk->exit_code = SIGKILL;
+
+	if (tsk->parent == state->parent) {
+		/*
+		 * This is a natural child, so we detach and let the normal
+		 * reporting happen once our NOREAP action is gone.  But
+		 * first, generate a SIGCHLD for those cases where normal
+		 * behavior won't.  A ptrace'd child always generates SIGCHLD.
+		 */
+		if (tsk->exit_signal == -1 || !thread_group_empty(tsk))
+			do_notify(tsk, state->parent, CLD_EXITED);
+		ptrace_state_unlink(state);
+		rcu_assign_pointer(engine->data, 0UL);
+		ptrace_done(state);
+		return UTRACE_ACTION_DETACH;
+	}
+
+	do_notify(tsk, state->parent, CLD_EXITED);
+	return UTRACE_ACTION_RESUME;
+}
+
+/*
+ * We get this only in the case where our UTRACE_ACTION_NOREAP was ignored.
+ * That happens solely when a non-leader exec reaps the old leader.
+ */
+static void
+ptrace_report_reap(struct utrace_attached_engine *engine,
+		   struct task_struct *tsk)
+{
+	struct ptrace_state *state;
+	rcu_read_lock();
+	state = rcu_dereference((struct ptrace_state *) engine->data);
+	if (state != NULL) {
+		ptrace_state_unlink(state);
+		rcu_assign_pointer(engine->data, 0UL);
+		ptrace_done(state);
+	}
+	rcu_read_unlock();
+}
+
+
+static u32
+ptrace_report_clone(struct utrace_attached_engine *engine,
+		    struct task_struct *parent,
+		    unsigned long clone_flags, struct task_struct *child)
+{
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+	struct utrace_attached_engine *child_engine;
+	int event = PTRACE_EVENT_FORK;
+	int option = PTRACE_O_TRACEFORK;
+
+#ifdef PTRACE_DEBUG
+	printk("%d (%p) engine %p ptrace_report_clone child %d (%p) fl %lx\n",
+	       parent->pid, parent, engine, child->pid, child, clone_flags);
+#endif
+
+	if (clone_flags & CLONE_UNTRACED)
 		goto out;
+
+	if (clone_flags & CLONE_VFORK) {
+		event = PTRACE_EVENT_VFORK;
+		option = PTRACE_O_TRACEVFORK;
+	}
+	else if ((clone_flags & CSIGNAL) != SIGCHLD) {
+		event = PTRACE_EVENT_CLONE;
+		option = PTRACE_O_TRACECLONE;
 	}
 
-	child = ptrace_get_task_struct(pid);
-	if (IS_ERR(child)) {
-		ret = PTR_ERR(child);
+	if (!(clone_flags & CLONE_PTRACE) && !(state->u.live.options & option))
 		goto out;
+
+	child_engine = utrace_attach(child, (UTRACE_ATTACH_CREATE
+					     | UTRACE_ATTACH_EXCLUSIVE
+					     | UTRACE_ATTACH_MATCH_OPS),
+				     &ptrace_utrace_ops, 0UL);
+	if (unlikely(IS_ERR(child_engine))) {
+		BUG_ON(PTR_ERR(child_engine) != -ENOMEM);
+		printk(KERN_ERR
+		       "ptrace out of memory, lost child %d of %d",
+		       child->pid, parent->pid);
+	}
+	else {
+		struct ptrace_state *child_state;
+		child_state = ptrace_setup(child, child_engine,
+					   state->parent,
+					   state->u.live.options,
+					   state->u.live.cap_sys_ptrace,
+					   NULL);
+		if (unlikely(IS_ERR(child_state))) {
+			BUG_ON(PTR_ERR(child_state) != -ENOMEM);
+			(void) utrace_detach(child, child_engine);
+			printk(KERN_ERR
+			       "ptrace out of memory, lost child %d of %d",
+			       child->pid, parent->pid);
+		}
+		else {
+			int ret;
+			sigaddset(&child->pending.signal, SIGSTOP);
+			set_tsk_thread_flag(child, TIF_SIGPENDING);
+			ret = ptrace_update(child, child_engine, 0);
+			/*
+			 * The child hasn't run yet,
+			 * it can't have died already.
+			 */
+			BUG_ON(ret);
+		}
 	}
 
-	if (request == PTRACE_ATTACH) {
-		ret = ptrace_attach(child);
-		goto out_put_task_struct;
+	if (state->u.live.options & option) {
+		state->u.live.have_eventmsg = 1;
+		state->u.live.u.eventmsg = child->pid;
+		return ptrace_event(engine, parent, event);
 	}
 
-	ret = ptrace_check_attach(child, request == PTRACE_KILL);
-	if (ret < 0)
-		goto out_put_task_struct;
+out:
+	return UTRACE_ACTION_RESUME;
+}
 
-	ret = arch_ptrace(child, request, addr, data);
-	if (ret < 0)
-		goto out_put_task_struct;
 
- out_put_task_struct:
-	put_task_struct(child);
- out:
-	unlock_kernel();
-	return ret;
+static u32
+ptrace_report_vfork_done(struct utrace_attached_engine *engine,
+			 struct task_struct *parent, pid_t child_pid)
+{
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+	state->u.live.have_eventmsg = 1;
+	state->u.live.u.eventmsg = child_pid;
+	return ptrace_event(engine, parent, PTRACE_EVENT_VFORK_DONE);
+}
+
+
+static u32
+ptrace_report_signal(struct utrace_attached_engine *engine,
+		     struct task_struct *tsk, struct pt_regs *regs,
+		     u32 action, siginfo_t *info,
+		     const struct k_sigaction *orig_ka,
+		     struct k_sigaction *return_ka)
+{
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+	int signo = info == NULL ? SIGTRAP : info->si_signo;
+	state->u.live.syscall = 0;
+	state->u.live.have_eventmsg = 0;
+	state->u.live.u.siginfo = info;
+	return ptrace_report(engine, tsk, signo) | UTRACE_SIGNAL_IGN;
+}
+
+static u32
+ptrace_report_jctl(struct utrace_attached_engine *engine,
+		   struct task_struct *tsk, int type)
+{
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+#ifdef PTRACE_DEBUG
+	printk("ptrace %d jctl notify %d type %x exit_code %x\n",
+	       tsk->pid, state->parent->pid, type, tsk->exit_code);
+#endif
+	do_notify(tsk, state->parent, type);
+	return UTRACE_JCTL_NOSIGCHLD;
+}
+
+static u32
+ptrace_report_exec(struct utrace_attached_engine *engine,
+		   struct task_struct *tsk,
+		   const struct linux_binprm *bprm,
+		   struct pt_regs *regs)
+{
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+	if (state->u.live.options & PTRACE_O_TRACEEXEC)
+		return ptrace_event(engine, tsk, PTRACE_EVENT_EXEC);
+	state->u.live.syscall = 0;
+	return ptrace_report(engine, tsk, SIGTRAP);
+}
+
+static u32
+ptrace_report_syscall(struct utrace_attached_engine *engine,
+		      struct task_struct *tsk, struct pt_regs *regs,
+		      int entry)
+{
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+#ifdef PTRACE_SYSEMU
+	if (entry && state->u.live.sysemu)
+		tracehook_abort_syscall(regs);
+#endif
+	state->u.live.syscall = 1;
+	return ptrace_report(engine, tsk,
+			     ((state->u.live.options & PTRACE_O_TRACESYSGOOD)
+			      ? 0x80 : 0) | SIGTRAP);
 }
-#endif /* __ARCH_SYS_PTRACE */
+
+static u32
+ptrace_report_syscall_entry(struct utrace_attached_engine *engine,
+			    struct task_struct *tsk, struct pt_regs *regs)
+{
+	return ptrace_report_syscall(engine, tsk, regs, 1);
+}
+
+static u32
+ptrace_report_syscall_exit(struct utrace_attached_engine *engine,
+			    struct task_struct *tsk, struct pt_regs *regs)
+{
+	return ptrace_report_syscall(engine, tsk, regs, 0);
+}
+
+static u32
+ptrace_report_exit(struct utrace_attached_engine *engine,
+		   struct task_struct *tsk, long orig_code, long *code)
+{
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+	state->u.live.have_eventmsg = 1;
+	state->u.live.u.eventmsg = *code;
+	return ptrace_event(engine, tsk, PTRACE_EVENT_EXIT);
+}
+
+static int
+ptrace_unsafe_exec(struct utrace_attached_engine *engine,
+		   struct task_struct *tsk)
+{
+	struct ptrace_state *state = (struct ptrace_state *) engine->data;
+	int unsafe = LSM_UNSAFE_PTRACE;
+	if (state->u.live.cap_sys_ptrace)
+		unsafe = LSM_UNSAFE_PTRACE_CAP;
+	return unsafe;
+}
+
+static struct task_struct *
+ptrace_tracer_task(struct utrace_attached_engine *engine,
+		   struct task_struct *target)
+{
+	struct ptrace_state *state;
+
+	/*
+	 * This call is not necessarily made by the target task,
+	 * so ptrace might be getting detached while we run here.
+	 * The state pointer will be NULL if that happens.
+	 */
+	state = rcu_dereference((struct ptrace_state *) engine->data);
+
+	return state == NULL ? NULL : state->parent;
+}
+
+static int
+ptrace_allow_access_process_vm(struct utrace_attached_engine *engine,
+			       struct task_struct *target,
+			       struct task_struct *caller)
+{
+	struct ptrace_state *state;
+	int ours;
+
+	/*
+	 * This call is not necessarily made by the target task,
+	 * so ptrace might be getting detached while we run here.
+	 * The state pointer will be NULL if that happens.
+	 */
+	rcu_read_lock();
+	state = rcu_dereference((struct ptrace_state *) engine->data);
+	ours = (state != NULL
+		&& ((engine->flags & UTRACE_ACTION_QUIESCE)
+		    || (target->state == TASK_STOPPED))
+		&& state->parent == caller);
+	rcu_read_unlock();
+
+	return ours && security_ptrace(caller, target) == 0;
+}
+
+
+static const struct utrace_engine_ops ptrace_utrace_ops =
+{
+	.report_syscall_entry = ptrace_report_syscall_entry,
+	.report_syscall_exit = ptrace_report_syscall_exit,
+	.report_exec = ptrace_report_exec,
+	.report_jctl = ptrace_report_jctl,
+	.report_signal = ptrace_report_signal,
+	.report_vfork_done = ptrace_report_vfork_done,
+	.report_clone = ptrace_report_clone,
+	.report_exit = ptrace_report_exit,
+	.report_death = ptrace_report_death,
+	.report_reap = ptrace_report_reap,
+	.unsafe_exec = ptrace_unsafe_exec,
+	.tracer_task = ptrace_tracer_task,
+	.allow_access_process_vm = ptrace_allow_access_process_vm,
+};
+
+#endif
--- linux-2.6/kernel/Makefile.utrace-ptrace-compat
+++ linux-2.6/kernel/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_RCU_TORTURE_TEST) += rcutor
 obj-$(CONFIG_RELAY) += relay.o
 obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
 obj-$(CONFIG_TASKSTATS) += taskstats.o
+obj-$(CONFIG_UTRACE) += utrace.o
 
 ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
 # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
--- linux-2.6/fs/binfmt_elf_fdpic.c.utrace-ptrace-compat
+++ linux-2.6/fs/binfmt_elf_fdpic.c
@@ -421,13 +421,6 @@ static int load_elf_fdpic_binary(struct 
 	entryaddr = interp_params.entry_addr ?: exec_params.entry_addr;
 	start_thread(regs, entryaddr, current->mm->start_stack);
 
-	if (unlikely(current->ptrace & PT_PTRACED)) {
-		if (current->ptrace & PT_TRACE_EXEC)
-			ptrace_notify((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
-		else
-			send_sig(SIGTRAP, current, 0);
-	}
-
 	retval = 0;
 
 error:
--- linux-2.6/fs/binfmt_som.c.utrace-ptrace-compat
+++ linux-2.6/fs/binfmt_som.c
@@ -271,8 +271,6 @@ load_som_binary(struct linux_binprm * bp
 	map_hpux_gateway_page(current,current->mm);
 
 	start_thread_som(regs, som_entry, bprm->p);
-	if (current->ptrace & PT_PTRACED)
-		send_sig(SIGTRAP, current, 0);
 	return 0;
 
 	/* error cleanup */
--- linux-2.6/fs/proc/base.c.utrace-ptrace-compat
+++ linux-2.6/fs/proc/base.c
@@ -67,6 +67,7 @@
 #include <linux/mount.h>
 #include <linux/security.h>
 #include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/seccomp.h>
 #include <linux/cpuset.h>
 #include <linux/audit.h>
@@ -405,13 +406,6 @@ static int proc_root_link(struct inode *
 	return result;
 }
 
-#define MAY_PTRACE(task) \
-	(task == current || \
-	(task->parent == current && \
-	(task->ptrace & PT_PTRACED) && \
-	 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
-	 security_ptrace(current,task) == 0))
-
 static int proc_pid_environ(struct task_struct *task, char * buffer)
 {
 	int res = 0;
@@ -736,7 +730,8 @@ static ssize_t mem_read(struct file * fi
 	if (!task)
 		goto out_no_task;
 
-	if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
+	if (!tracehook_allow_access_process_vm(task)
+	    || !ptrace_may_attach(task))
 		goto out;
 
 	ret = -ENOMEM;
@@ -762,7 +757,8 @@ static ssize_t mem_read(struct file * fi
 
 		this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
 		retval = access_process_vm(task, src, page, this_len, 0);
-		if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) {
+		if (!retval || !tracehook_allow_access_process_vm(task)
+		    || !ptrace_may_attach(task)) {
 			if (!ret)
 				ret = -EIO;
 			break;
@@ -806,7 +802,8 @@ static ssize_t mem_write(struct file * f
 	if (!task)
 		goto out_no_task;
 
-	if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
+	if (!tracehook_allow_access_process_vm(task)
+	    || !ptrace_may_attach(task))
 		goto out;
 
 	copied = -ENOMEM;
--- linux-2.6/fs/proc/array.c.utrace-ptrace-compat
+++ linux-2.6/fs/proc/array.c
@@ -73,6 +73,7 @@
 #include <linux/file.h>
 #include <linux/times.h>
 #include <linux/cpuset.h>
+#include <linux/tracehook.h>
 #include <linux/rcupdate.h>
 #include <linux/delayacct.h>
 
@@ -158,10 +159,17 @@ static inline const char * get_task_stat
 
 static inline char * task_state(struct task_struct *p, char *buffer)
 {
+	struct task_struct *tracer;
+	pid_t tracer_pid;
 	struct group_info *group_info;
 	int g;
 	struct fdtable *fdt = NULL;
 
+	rcu_read_lock();
+	tracer = tracehook_tracer_task(p);
+	tracer_pid = tracer == NULL ? 0 : tracer->pid;
+	rcu_read_unlock();
+
 	read_lock(&tasklist_lock);
 	buffer += sprintf(buffer,
 		"State:\t%s\n"
@@ -175,8 +183,8 @@ static inline char * task_state(struct t
 		get_task_state(p),
 		(p->sleep_avg/1024)*100/(1020000000/1024),
 	       	p->tgid,
-		p->pid, pid_alive(p) ? p->group_leader->real_parent->tgid : 0,
-		pid_alive(p) && p->ptrace ? p->parent->pid : 0,
+		p->pid, pid_alive(p) ? p->group_leader->parent->tgid : 0,
+		tracer_pid,
 		p->uid, p->euid, p->suid, p->fsuid,
 		p->gid, p->egid, p->sgid, p->fsgid);
 	read_unlock(&tasklist_lock);
@@ -386,7 +394,7 @@ static int do_task_stat(struct task_stru
 			stime = cputime_add(stime, task->signal->stime);
 		}
 	}
-	ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0;
+	ppid = pid_alive(task) ? task->group_leader->parent->tgid : 0;
 	read_unlock(&tasklist_lock);
 
 	if (!whole || num_threads<2)
--- linux-2.6/fs/binfmt_aout.c.utrace-ptrace-compat
+++ linux-2.6/fs/binfmt_aout.c
@@ -445,12 +445,6 @@ beyond_if:
 	regs->gp = ex.a_gpvalue;
 #endif
 	start_thread(regs, ex.a_entry, current->mm->start_stack);
-	if (unlikely(current->ptrace & PT_PTRACED)) {
-		if (current->ptrace & PT_TRACE_EXEC)
-			ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
-		else
-			send_sig(SIGTRAP, current, 0);
-	}
 	return 0;
 }
 
--- linux-2.6/fs/binfmt_flat.c.utrace-ptrace-compat
+++ linux-2.6/fs/binfmt_flat.c
@@ -897,9 +897,6 @@ static int load_flat_binary(struct linux
 	
 	start_thread(regs, start_addr, current->mm->start_stack);
 
-	if (current->ptrace & PT_PTRACED)
-		send_sig(SIGTRAP, current, 0);
-
 	return 0;
 }
 
--- linux-2.6/fs/binfmt_elf.c.utrace-ptrace-compat
+++ linux-2.6/fs/binfmt_elf.c
@@ -1015,12 +1015,6 @@ static int load_elf_binary(struct linux_
 #endif
 
 	start_thread(regs, elf_entry, bprm->p);
-	if (unlikely(current->ptrace & PT_PTRACED)) {
-		if (current->ptrace & PT_TRACE_EXEC)
-			ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
-		else
-			send_sig(SIGTRAP, current, 0);
-	}
 	retval = 0;
 out:
 	kfree(loc);
--- linux-2.6/fs/exec.c.utrace-ptrace-compat
+++ linux-2.6/fs/exec.c
@@ -41,7 +41,7 @@
 #include <linux/module.h>
 #include <linux/namei.h>
 #include <linux/proc_fs.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/mount.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
@@ -958,13 +958,7 @@ EXPORT_SYMBOL(prepare_binprm);
 
 static int unsafe_exec(struct task_struct *p)
 {
-	int unsafe = 0;
-	if (p->ptrace & PT_PTRACED) {
-		if (p->ptrace & PT_PTRACE_CAP)
-			unsafe |= LSM_UNSAFE_PTRACE_CAP;
-		else
-			unsafe |= LSM_UNSAFE_PTRACE;
-	}
+	int unsafe = tracehook_unsafe_exec(p);
 	if (atomic_read(&p->fs->count) > 1 ||
 	    atomic_read(&p->files->count) > 1 ||
 	    atomic_read(&p->sighand->count) > 1)
@@ -1089,6 +1083,7 @@ int search_binary_handler(struct linux_b
 				bprm->file = NULL;
 				current->did_exec = 1;
 				proc_exec_connector(current);
+				tracehook_report_exec(bprm, regs);
 				return retval;
 			}
 			read_lock(&binfmt_lock);
--- linux-2.6/drivers/connector/cn_proc.c.utrace-ptrace-compat
+++ linux-2.6/drivers/connector/cn_proc.c
@@ -62,8 +62,8 @@ void proc_fork_connector(struct task_str
 	ktime_get_ts(&ts); /* get high res monotonic timestamp */
 	ev->timestamp_ns = timespec_to_ns(&ts);
 	ev->what = PROC_EVENT_FORK;
-	ev->event_data.fork.parent_pid = task->real_parent->pid;
-	ev->event_data.fork.parent_tgid = task->real_parent->tgid;
+	ev->event_data.fork.parent_pid = task->parent->pid;
+	ev->event_data.fork.parent_tgid = task->parent->tgid;
 	ev->event_data.fork.child_pid = task->pid;
 	ev->event_data.fork.child_tgid = task->tgid;
 
--- linux-2.6/init/Kconfig.utrace-ptrace-compat
+++ linux-2.6/init/Kconfig
@@ -512,6 +512,35 @@ config STOP_MACHINE
 	  Need stop_machine() primitive.
 endmenu
 
+menu "Process debugging support"
+
+config UTRACE
+	bool "Infrastructure for tracing and debugging user processes"
+	default y
+	help
+	  Enable the utrace process tracing interface.
+	  This is an internal kernel interface to track events in user
+	  threads, extract and change user thread state.  This interface
+	  is exported to kernel modules, and is also used to implement ptrace.
+	  If you disable this, no facilities for debugging user processes
+	  will be available, nor the facilities used by UML and other
+	  applications.  Unless you are making a specially stripped-down
+	  kernel and are very sure you don't need these facilitiies,
+	  say Y.
+
+config PTRACE
+	bool "Legacy ptrace system call interface"
+	default y
+	depends on UTRACE
+	help
+	  Enable the ptrace system call.
+	  This is traditionally used by debuggers like GDB,
+	  and is used by UML and some other applications.
+	  Unless you are very sure you won't run anything that needs it,
+	  say Y.
+
+endmenu
+
 menu "Block layer"
 source "block/Kconfig"
 endmenu
--- linux-2.6/arch/alpha/kernel/entry.S.utrace-ptrace-compat
+++ linux-2.6/arch/alpha/kernel/entry.S
@@ -879,14 +879,14 @@ sys_getxpid:
 	/* See linux/kernel/timer.c sys_getppid for discussion
 	   about this loop.  */
 	ldq	$3, TASK_GROUP_LEADER($2)
-	ldq	$4, TASK_REAL_PARENT($3)
+	ldq	$4, TASK_PARENT($3)
 	ldl	$0, TASK_TGID($2)
 1:	ldl	$1, TASK_TGID($4)
 #ifdef CONFIG_SMP
 	mov	$4, $5
 	mb
 	ldq	$3, TASK_GROUP_LEADER($2)
-	ldq	$4, TASK_REAL_PARENT($3)
+	ldq	$4, TASK_PARENT($3)
 	cmpeq	$4, $5, $5
 	beq	$5, 1b
 #endif
--- linux-2.6/arch/alpha/kernel/asm-offsets.c.utrace-ptrace-compat
+++ linux-2.6/arch/alpha/kernel/asm-offsets.c
@@ -27,7 +27,7 @@ void foo(void)
         DEFINE(TASK_EUID, offsetof(struct task_struct, euid));
         DEFINE(TASK_GID, offsetof(struct task_struct, gid));
         DEFINE(TASK_EGID, offsetof(struct task_struct, egid));
-        DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent));
+        DEFINE(TASK_PARENT, offsetof(struct task_struct, parent));
         DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader));
         DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
         BLANK();
--- linux-2.6/arch/i386/kernel/i387.c.utrace-ptrace-compat
+++ linux-2.6/arch/i386/kernel/i387.c
@@ -222,14 +222,10 @@ void set_fpu_twd( struct task_struct *ts
  * FXSR floating point environment conversions.
  */
 
-static int convert_fxsr_to_user( struct _fpstate __user *buf,
-					struct i387_fxsave_struct *fxsave )
+static inline void
+convert_fxsr_env_to_i387(unsigned long env[7],
+			 struct i387_fxsave_struct *fxsave)
 {
-	unsigned long env[7];
-	struct _fpreg __user *to;
-	struct _fpxreg *from;
-	int i;
-
 	env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
 	env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
 	env[2] = twd_fxsr_to_i387(fxsave);
@@ -237,7 +233,17 @@ static int convert_fxsr_to_user( struct 
 	env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
 	env[5] = fxsave->foo;
 	env[6] = fxsave->fos;
+}
+
+static int convert_fxsr_to_user(struct _fpstate __user *buf,
+				struct i387_fxsave_struct *fxsave)
+{
+	unsigned long env[7];
+	struct _fpreg __user *to;
+	struct _fpxreg *from;
+	int i;
 
+	convert_fxsr_env_to_i387(env, fxsave);
 	if ( __copy_to_user( buf, env, 7 * sizeof(unsigned long) ) )
 		return 1;
 
@@ -255,6 +261,20 @@ static int convert_fxsr_to_user( struct 
 	return 0;
 }
 
+static inline void
+convert_fxsr_env_from_i387(struct i387_fxsave_struct *fxsave,
+			   const unsigned long env[7])
+{
+	fxsave->cwd = (unsigned short)(env[0] & 0xffff);
+	fxsave->swd = (unsigned short)(env[1] & 0xffff);
+	fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
+	fxsave->fip = env[3];
+	fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
+	fxsave->fcs = (env[4] & 0xffff);
+	fxsave->foo = env[5];
+	fxsave->fos = env[6];
+}
+
 static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave,
 					  struct _fpstate __user *buf )
 {
@@ -266,14 +286,7 @@ static int convert_fxsr_from_user( struc
 	if ( __copy_from_user( env, buf, 7 * sizeof(long) ) )
 		return 1;
 
-	fxsave->cwd = (unsigned short)(env[0] & 0xffff);
-	fxsave->swd = (unsigned short)(env[1] & 0xffff);
-	fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
-	fxsave->fip = env[3];
-	fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
-	fxsave->fcs = (env[4] & 0xffff);
-	fxsave->foo = env[5];
-	fxsave->fos = env[6];
+	convert_fxsr_env_from_i387(fxsave, env);
 
 	to = (struct _fpxreg *) &fxsave->st_space[0];
 	from = &buf->_st[0];
@@ -388,88 +401,82 @@ int restore_i387( struct _fpstate __user
  * ptrace request handlers.
  */
 
-static inline int get_fpregs_fsave( struct user_i387_struct __user *buf,
-				    struct task_struct *tsk )
+static inline void get_fpregs_fsave(struct user_i387_struct *buf,
+				    struct task_struct *tsk)
 {
-	return __copy_to_user( buf, &tsk->thread.i387.fsave,
-			       sizeof(struct user_i387_struct) );
+	memcpy(buf, &tsk->thread.i387.fsave, sizeof(struct user_i387_struct));
 }
 
-static inline int get_fpregs_fxsave( struct user_i387_struct __user *buf,
-				     struct task_struct *tsk )
+static inline void get_fpregs_fxsave(struct user_i387_struct *buf,
+				     struct task_struct *tsk)
 {
-	return convert_fxsr_to_user( (struct _fpstate __user *)buf,
-				     &tsk->thread.i387.fxsave );
+	struct _fpreg *to;
+	const struct _fpxreg *from;
+	unsigned int i;
+
+	convert_fxsr_env_to_i387((unsigned long *) buf,
+				 &tsk->thread.i387.fxsave);
+
+	to = (struct _fpreg *) buf->st_space;
+	from = (const struct _fpxreg *) &tsk->thread.i387.fxsave.st_space[0];
+	for (i = 0; i < 8; i++, to++, from++)
+		*to = *(const struct _fpreg *) from;
 }
 
-int get_fpregs( struct user_i387_struct __user *buf, struct task_struct *tsk )
+int get_fpregs(struct user_i387_struct *buf, struct task_struct *tsk)
 {
 	if ( HAVE_HWFP ) {
-		if ( cpu_has_fxsr ) {
-			return get_fpregs_fxsave( buf, tsk );
-		} else {
-			return get_fpregs_fsave( buf, tsk );
-		}
+		if (cpu_has_fxsr)
+			get_fpregs_fxsave(buf, tsk);
+		else
+			get_fpregs_fsave(buf, tsk);
+		return 0;
 	} else {
 		return save_i387_soft( &tsk->thread.i387.soft,
 				       (struct _fpstate __user *)buf );
 	}
 }
 
-static inline int set_fpregs_fsave( struct task_struct *tsk,
-				    struct user_i387_struct __user *buf )
+static inline void set_fpregs_fsave(struct task_struct *tsk,
+				    const struct user_i387_struct *buf)
 {
-	return __copy_from_user( &tsk->thread.i387.fsave, buf,
-				 sizeof(struct user_i387_struct) );
+	memcpy(&tsk->thread.i387.fsave, buf, sizeof(struct user_i387_struct));
 }
 
-static inline int set_fpregs_fxsave( struct task_struct *tsk,
-				     struct user_i387_struct __user *buf )
+static inline void set_fpregs_fxsave(struct task_struct *tsk,
+				     const struct user_i387_struct *buf)
 {
-	return convert_fxsr_from_user( &tsk->thread.i387.fxsave,
-				       (struct _fpstate __user *)buf );
+	struct _fpxreg *to;
+	const struct _fpreg *from;
+	unsigned int i;
+
+	convert_fxsr_env_from_i387(&tsk->thread.i387.fxsave,
+				   (unsigned long *) buf);
+
+	to = (struct _fpxreg *) &tsk->thread.i387.fxsave.st_space[0];
+	from = (const struct _fpreg *) buf->st_space;
+	for (i = 0; i < 8; i++, to++, from++)
+		*(struct _fpreg *) to = *from;
 }
 
-int set_fpregs( struct task_struct *tsk, struct user_i387_struct __user *buf )
+int set_fpregs(struct task_struct *tsk, const struct user_i387_struct *buf)
 {
 	if ( HAVE_HWFP ) {
-		if ( cpu_has_fxsr ) {
-			return set_fpregs_fxsave( tsk, buf );
-		} else {
-			return set_fpregs_fsave( tsk, buf );
-		}
+		if (cpu_has_fxsr)
+			set_fpregs_fxsave(tsk, buf);
+		else
+			set_fpregs_fsave(tsk, buf);
+		return 0;
 	} else {
 		return restore_i387_soft( &tsk->thread.i387.soft,
 					  (struct _fpstate __user *)buf );
 	}
 }
 
-int get_fpxregs( struct user_fxsr_struct __user *buf, struct task_struct *tsk )
+void updated_fpxregs(struct task_struct *tsk)
 {
-	if ( cpu_has_fxsr ) {
-		if (__copy_to_user( buf, &tsk->thread.i387.fxsave,
-				    sizeof(struct user_fxsr_struct) ))
-			return -EFAULT;
-		return 0;
-	} else {
-		return -EIO;
-	}
-}
-
-int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct __user *buf )
-{
-	int ret = 0;
-
-	if ( cpu_has_fxsr ) {
-		if (__copy_from_user( &tsk->thread.i387.fxsave, buf,
-				  sizeof(struct user_fxsr_struct) ))
-			ret = -EFAULT;
-		/* mxcsr reserved bits must be masked to zero for security reasons */
-		tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
-	} else {
-		ret = -EIO;
-	}
-	return ret;
+	/* mxcsr reserved bits must be masked to zero for security reasons */
+	tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
 }
 
 /*
--- linux-2.6/arch/i386/kernel/signal.c.utrace-ptrace-compat
+++ linux-2.6/arch/i386/kernel/signal.c
@@ -19,7 +19,7 @@
 #include <linux/stddef.h>
 #include <linux/personality.h>
 #include <linux/suspend.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/elf.h>
 #include <asm/processor.h>
 #include <asm/ucontext.h>
@@ -385,16 +385,6 @@ static int setup_frame(int sig, struct k
 	regs->xss = __USER_DS;
 	regs->xcs = __USER_CS;
 
-	/*
-	 * Clear TF when entering the signal handler, but
-	 * notify any tracer that was single-stepping it.
-	 * The tracer may want to single-step inside the
-	 * handler too.
-	 */
-	regs->eflags &= ~TF_MASK;
-	if (test_thread_flag(TIF_SINGLESTEP))
-		ptrace_notify(SIGTRAP);
-
 #if DEBUG_SIG
 	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
 		current->comm, current->pid, frame, regs->eip, frame->pretcode);
@@ -479,16 +469,6 @@ static int setup_rt_frame(int sig, struc
 	regs->xss = __USER_DS;
 	regs->xcs = __USER_CS;
 
-	/*
-	 * Clear TF when entering the signal handler, but
-	 * notify any tracer that was single-stepping it.
-	 * The tracer may want to single-step inside the
-	 * handler too.
-	 */
-	regs->eflags &= ~TF_MASK;
-	if (test_thread_flag(TIF_SINGLESTEP))
-		ptrace_notify(SIGTRAP);
-
 #if DEBUG_SIG
 	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
 		current->comm, current->pid, frame, regs->eip, frame->pretcode);
@@ -533,14 +513,12 @@ handle_signal(unsigned long sig, siginfo
 	}
 
 	/*
-	 * If TF is set due to a debugger (PT_DTRACE), clear the TF flag so
+	 * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF flag so
 	 * that register information in the sigcontext is correct.
 	 */
 	if (unlikely(regs->eflags & TF_MASK)
-	    && likely(current->ptrace & PT_DTRACE)) {
-		current->ptrace &= ~PT_DTRACE;
+	    && likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
 		regs->eflags &= ~TF_MASK;
-	}
 
 	/* Set up the stack frame */
 	if (ka->sa.sa_flags & SA_SIGINFO)
@@ -555,6 +533,15 @@ handle_signal(unsigned long sig, siginfo
 			sigaddset(&current->blocked,sig);
 		recalc_sigpending();
 		spin_unlock_irq(&current->sighand->siglock);
+
+		/*
+		 * Clear TF when entering the signal handler, but
+		 * notify any tracer that was single-stepping it.
+		 * The tracer may want to single-step inside the
+		 * handler too.
+		 */
+		regs->eflags &= ~TF_MASK;
+		tracehook_report_handle_signal(sig, ka, oldset, regs);
 	}
 
 	return ret;
--- linux-2.6/arch/i386/kernel/vm86.c.utrace-ptrace-compat
+++ linux-2.6/arch/i386/kernel/vm86.c
@@ -529,13 +529,6 @@ int handle_vm86_trap(struct kernel_vm86_
 	}
 	if (trapno !=1)
 		return 1; /* we let this handle by the calling routine */
-	if (current->ptrace & PT_PTRACED) {
-		unsigned long flags;
-		spin_lock_irqsave(&current->sighand->siglock, flags);
-		sigdelset(&current->blocked, SIGTRAP);
-		recalc_sigpending();
-		spin_unlock_irqrestore(&current->sighand->siglock, flags);
-	}
 	send_sig(SIGTRAP, current, 1);
 	current->thread.trap_no = trapno;
 	current->thread.error_code = error_code;
--- linux-2.6/arch/i386/kernel/process.c.utrace-ptrace-compat
+++ linux-2.6/arch/i386/kernel/process.c
@@ -751,9 +751,6 @@ asmlinkage int sys_execve(struct pt_regs
 			(char __user * __user *) regs.edx,
 			&regs);
 	if (error == 0) {
-		task_lock(current);
-		current->ptrace &= ~PT_DTRACE;
-		task_unlock(current);
 		/* Make sure we don't return using sysenter.. */
 		set_thread_flag(TIF_IRET);
 	}
--- linux-2.6/arch/i386/kernel/ptrace.c.utrace-ptrace-compat
+++ linux-2.6/arch/i386/kernel/ptrace.c
@@ -12,12 +12,15 @@
 #include <linux/smp_lock.h>
 #include <linux/errno.h>
 #include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/user.h>
 #include <linux/security.h>
 #include <linux/audit.h>
 #include <linux/seccomp.h>
 #include <linux/signal.h>
+#include <linux/module.h>
 
+#include <asm/tracehook.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -27,10 +30,6 @@
 #include <asm/ldt.h>
 #include <asm/desc.h>
 
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
 
 /*
  * Determines which flags the user has access to [1 = access, 0 = no access].
@@ -39,9 +38,6 @@
  */
 #define FLAG_MASK 0x00050dd5
 
-/* set's the trap flag. */
-#define TRAP_FLAG 0x100
-
 /*
  * Offset of eflags on child stack..
  */
@@ -114,6 +110,7 @@ static int putreg(struct task_struct *ch
 		case EFL:
 			value &= FLAG_MASK;
 			value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
+			clear_tsk_thread_flag(child, TIF_FORCED_TF);
 			break;
 	}
 	if (regno > GS*4)
@@ -134,6 +131,10 @@ static unsigned long getreg(struct task_
 		case GS:
 			retval = child->thread.gs;
 			break;
+		case EFL:
+			if (test_tsk_thread_flag(child, TIF_FORCED_TF))
+				retval &= ~X86_EFLAGS_TF;
+			goto fetch;
 		case DS:
 		case ES:
 		case SS:
@@ -141,10 +142,12 @@ static unsigned long getreg(struct task_
 			retval = 0xffff;
 			/* fall through */
 		default:
+		fetch:
 			if (regno > GS*4)
 				regno -= 2*4;
 			regno = regno - sizeof(struct pt_regs);
 			retval &= get_stack_long(child, regno);
+			break;
 	}
 	return retval;
 }
@@ -222,7 +225,7 @@ static inline int is_at_popf(struct task
 	return 0;
 }
 
-static void set_singlestep(struct task_struct *child)
+void tracehook_enable_single_step(struct task_struct *child)
 {
 	struct pt_regs *regs = get_child_regs(child);
 
@@ -236,11 +239,11 @@ static void set_singlestep(struct task_s
 	/*
 	 * If TF was already set, don't do anything else
 	 */
-	if (regs->eflags & TRAP_FLAG)
+	if (regs->eflags & X86_EFLAGS_TF)
 		return;
 
 	/* Set TF on the kernel stack.. */
-	regs->eflags |= TRAP_FLAG;
+	regs->eflags |= X86_EFLAGS_TF;
 
 	/*
 	 * ..but if TF is changed by the instruction we will trace,
@@ -250,43 +253,323 @@ static void set_singlestep(struct task_s
 	if (is_at_popf(child, regs))
 		return;
 	
-	child->ptrace |= PT_DTRACE;
+	set_tsk_thread_flag(child, TIF_FORCED_TF);
 }
 
-static void clear_singlestep(struct task_struct *child)
+void tracehook_disable_single_step(struct task_struct *child)
 {
 	/* Always clear TIF_SINGLESTEP... */
 	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
 
 	/* But touch TF only if it was set by us.. */
-	if (child->ptrace & PT_DTRACE) {
+	if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) {
 		struct pt_regs *regs = get_child_regs(child);
-		regs->eflags &= ~TRAP_FLAG;
-		child->ptrace &= ~PT_DTRACE;
+		regs->eflags &= ~X86_EFLAGS_TF;
 	}
 }
 
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure the single step bit is not set.
- */
-void ptrace_disable(struct task_struct *child)
+
+static int
+genregs_get(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    void *kbuf, void __user *ubuf)
+{
+	if (kbuf) {
+		unsigned long *kp = kbuf;
+		while (count > 0) {
+			*kp++ = getreg(target, pos);
+			pos += 4;
+			count -= 4;
+		}
+	}
+	else {
+		unsigned long __user *up = ubuf;
+		while (count > 0) {
+			if (__put_user(getreg(target, pos), up++))
+				return -EFAULT;
+			pos += 4;
+			count -= 4;
+		}
+	}
+
+	return 0;
+}
+
+static int
+genregs_set(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    const void *kbuf, const void __user *ubuf)
+{
+	int ret = 0;
+
+	if (kbuf) {
+		const unsigned long *kp = kbuf;
+		while (!ret && count > 0) {
+			ret = putreg(target, pos, *kp++);
+			pos += 4;
+			count -= 4;
+		}
+	}
+	else {
+		int ret = 0;
+		const unsigned long __user *up = ubuf;
+		while (!ret && count > 0) {
+			unsigned long val;
+			ret = __get_user(val, up++);
+			if (!ret)
+				ret = putreg(target, pos, val);
+			pos += 4;
+			count -= 4;
+		}
+	}
+
+	return ret;
+}
+
+static int
+fpregs_active(struct task_struct *target, const struct utrace_regset *regset)
+{
+	return tsk_used_math(target) ? regset->n : 0;
+}
+
+static int
+fpregs_get(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   void *kbuf, void __user *ubuf)
+{
+	struct user_i387_struct fp;
+	int ret;
+
+	if (tsk_used_math(target)) {
+		if (target == current)
+			unlazy_fpu(target);
+	}
+	else
+		init_fpu(target);
+
+	ret = get_fpregs(&fp, target);
+	if (ret == 0)
+		ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    &fp, 0, -1);
+
+	return ret;
+}
+
+static int
+fpregs_set(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   const void *kbuf, const void __user *ubuf)
+{
+	struct user_i387_struct fp;
+	int ret;
+
+	if (tsk_used_math(target)) {
+		if (target == current)
+			unlazy_fpu(target);
+	}
+	else if (pos == 0 && count == sizeof(fp))
+		set_stopped_child_used_math(target);
+	else
+		init_fpu(target);
+
+	if (pos > 0 || count < sizeof(fp)) {
+		ret = get_fpregs(&fp, target);
+		if (ret == 0)
+			ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+						   &fp, 0, -1);
+		if (ret)
+			return ret;
+		kbuf = &fp;
+	}
+	else if (kbuf == NULL) {
+		if (__copy_from_user(&fp, ubuf, sizeof(fp)))
+			return -EFAULT;
+		kbuf = &fp;
+	}
+
+	return set_fpregs(target, kbuf);
+}
+
+static int
+fpxregs_active(struct task_struct *target, const struct utrace_regset *regset)
+{
+	return !cpu_has_fxsr ? -ENODEV : tsk_used_math(target) ? regset->n : 0;
+}
+
+static int
+fpxregs_get(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    void *kbuf, void __user *ubuf)
+{
+	if (!cpu_has_fxsr)
+		return -ENODEV;
+
+	if (tsk_used_math(target))
+		unlazy_fpu(target);
+	else
+		init_fpu(target);
+
+	return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				     &target->thread.i387.fxsave, 0, -1);
+}
+
+static int
+fpxregs_set(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   const void *kbuf, const void __user *ubuf)
+{
+	int ret;
+
+	if (!cpu_has_fxsr)
+		return -ENODEV;
+
+	if (tsk_used_math(target))
+		unlazy_fpu(target);
+	else if (pos == 0 && count == sizeof(target->thread.i387.fxsave))
+		set_stopped_child_used_math(target);
+	else
+		init_fpu(target);
+
+	ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				   &target->thread.i387.fxsave, 0, -1);
+
+	updated_fpxregs(target);
+
+	return ret;
+}
+
+
+static int
+dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset)
+{
+	if (tsk->thread.debugreg[DR_CONTROL] | tsk->thread.debugreg[DR_STATUS])
+		return 8;
+	return 0;
+}
+
+static int
+dbregs_get(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   void *kbuf, void __user *ubuf)
 { 
-	clear_singlestep(child);
-	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-	clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+	/*
+	 * The hardware updates the status register on a debug trap,
+	 * but do_debug (traps.c) save it for us when that happens.
+	 * So whether the target is current or not, thread.debugreg is good.
+	 */
+
+	return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				     target->thread.debugreg, 0, -1);
+}
+
+static int
+dbregs_set(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   const void *kbuf, const void __user *ubuf)
+{
+	for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) {
+		unsigned long val;
+		unsigned int i;
+
+		if (kbuf) {
+			val = *(const unsigned long *) kbuf;
+			kbuf += sizeof(unsigned long);
+		}
+		else {
+			if (__get_user(val, (unsigned long __user *) ubuf))
+				return -EFAULT;
+			ubuf += sizeof(unsigned long);
+		}
+
+		if (pos < 4) {
+			if (val >= TASK_SIZE - 3)
+				return -EIO;
+			goto set;
+		}
+		else if (pos < 6) {
+			if (val != 0)
+				return -EIO;
+			continue;
+		}
+		else if (pos < 7)
+			goto set;
+
+		/* Sanity-check data. Take one half-byte at once with
+		 * check = (val >> (16 + 4*i)) & 0xf. It contains the
+		 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
+		 * 2 and 3 are LENi. Given a list of invalid values,
+		 * we do mask |= 1 << invalid_value, so that
+		 * (mask >> check) & 1 is a correct test for invalid
+		 * values.
+		 *
+		 * R/Wi contains the type of the breakpoint /
+		 * watchpoint, LENi contains the length of the watched
+		 * data in the watchpoint case.
+		 *
+		 * The invalid values are:
+		 * - LENi == 0x10 (undefined), so mask |= 0x0f00.
+		 * - R/Wi == 0x10 (break on I/O reads or writes), so
+		 *   mask |= 0x4444.
+		 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
+		 *   0x1110.
+		 *
+		 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
+		 *
+		 * See the Intel Manual "System Programming Guide",
+		 * 15.2.4
+		 *
+		 * Note that LENi == 0x10 is defined on x86_64 in long
+		 * mode (i.e. even for 32-bit userspace software, but
+		 * 64-bit kernel), so the x86_64 mask value is 0x5454.
+		 * See the AMD manual no. 24593 (AMD64 System
+		 * Programming)*/
+		val &= ~DR_CONTROL_RESERVED;
+		for (i = 0; i < 4; i++)
+			if ((0x5f54 >> ((val >> (16 + 4*i)) & 0xf)) & 1)
+				return -EIO;
+		if (val)
+			set_tsk_thread_flag(target, TIF_DEBUG);
+		else
+			clear_tsk_thread_flag(target, TIF_DEBUG);
+
+	set:
+		target->thread.debugreg[pos] = val;
+		if (target == current)
+			switch (pos) {
+#define DBREG(n) case n: set_debugreg(target->thread.debugreg[n], n); break
+				DBREG(0);
+				DBREG(1);
+				DBREG(2);
+				DBREG(3);
+				DBREG(6);
+				DBREG(7);
+#undef	DBREG
+			}
+	}
+
+	return 0;
 }
 
+
 /*
  * Perform get_thread_area on behalf of the traced child.
  */
 static int
-ptrace_get_thread_area(struct task_struct *child,
-		       int idx, struct user_desc __user *user_desc)
+tls_get(struct task_struct *target,
+	const struct utrace_regset *regset,
+	unsigned int pos, unsigned int count,
+	void *kbuf,  void __user *ubuf)
 {
-	struct user_desc info;
-	struct desc_struct *desc;
+	struct user_desc info, *ip;
+	const struct desc_struct *desc;
 
 /*
  * Get the current Thread-Local Storage area:
@@ -308,23 +591,29 @@ ptrace_get_thread_area(struct task_struc
 #define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
 #define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
 
-	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-		return -EINVAL;
-
-	desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-
-	info.entry_number = idx;
-	info.base_addr = GET_BASE(desc);
-	info.limit = GET_LIMIT(desc);
-	info.seg_32bit = GET_32BIT(desc);
-	info.contents = GET_CONTENTS(desc);
-	info.read_exec_only = !GET_WRITABLE(desc);
-	info.limit_in_pages = GET_LIMIT_PAGES(desc);
-	info.seg_not_present = !GET_PRESENT(desc);
-	info.useable = GET_USEABLE(desc);
-
-	if (copy_to_user(user_desc, &info, sizeof(info)))
-		return -EFAULT;
+	desc = &target->thread.tls_array[pos / sizeof(struct user_desc)];
+	ip = kbuf ?: &info;
+	memset(ip, 0, sizeof *ip);
+	for (; count > 0; count -= sizeof(struct user_desc), ++desc) {
+		ip->entry_number = (desc - &target->thread.tls_array[0]
+				    + GDT_ENTRY_TLS_MIN);
+		ip->base_addr = GET_BASE(desc);
+		ip->limit = GET_LIMIT(desc);
+		ip->seg_32bit = GET_32BIT(desc);
+		ip->contents = GET_CONTENTS(desc);
+		ip->read_exec_only = !GET_WRITABLE(desc);
+		ip->limit_in_pages = GET_LIMIT_PAGES(desc);
+		ip->seg_not_present = !GET_PRESENT(desc);
+		ip->useable = GET_USEABLE(desc);
+
+		if (kbuf)
+			++ip;
+		else {
+			if (__copy_to_user(ubuf, &info, sizeof(info)))
+				return -EFAULT;
+			ubuf += sizeof(info);
+		}
+	}
 
 	return 0;
 }
@@ -333,308 +622,154 @@ ptrace_get_thread_area(struct task_struc
  * Perform set_thread_area on behalf of the traced child.
  */
 static int
-ptrace_set_thread_area(struct task_struct *child,
-		       int idx, struct user_desc __user *user_desc)
+tls_set(struct task_struct *target,
+	const struct utrace_regset *regset,
+	unsigned int pos, unsigned int count,
+	const void *kbuf, const void __user *ubuf)
 {
 	struct user_desc info;
 	struct desc_struct *desc;
-
-	if (copy_from_user(&info, user_desc, sizeof(info)))
-		return -EFAULT;
-
-	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-		return -EINVAL;
-
-	desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-	if (LDT_empty(&info)) {
-		desc->a = 0;
-		desc->b = 0;
-	} else {
-		desc->a = LDT_entry_a(&info);
-		desc->b = LDT_entry_b(&info);
+	struct desc_struct newtls[GDT_ENTRY_TLS_ENTRIES];
+	unsigned int i;
+	int cpu;
+
+	pos /= sizeof(struct user_desc);
+	count /= sizeof(struct user_desc);
+
+	desc = newtls;
+	for (i = 0; i < count; ++i, ++desc) {
+		const struct user_desc *ip;
+		if (kbuf) {
+			ip = kbuf;
+			kbuf += sizeof(struct user_desc);
+		}
+		else {
+			ip = &info;
+			if (__copy_from_user(&info, ubuf, sizeof(info)))
+				return -EFAULT;
+			ubuf += sizeof(struct user_desc);
+		}
+
+		if (LDT_empty(ip)) {
+			desc->a = 0;
+			desc->b = 0;
+		} else {
+			desc->a = LDT_entry_a(ip);
+			desc->b = LDT_entry_b(ip);
+		}
 	}
 
+	/*
+	 * We must not get preempted while modifying the TLS.
+	 */
+	cpu = get_cpu();
+	memcpy(&target->thread.tls_array[pos], newtls,
+	       count * sizeof(newtls[0]));
+	if (target == current)
+		load_TLS(&target->thread, cpu);
+	put_cpu();
+
 	return 0;
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
-{
-	struct user * dummy = NULL;
-	int i, ret;
-	unsigned long __user *datap = (unsigned long __user *)data;
-
-	switch (request) {
-	/* when I and D space are separate, these will need to be fixed. */
-	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
-	case PTRACE_PEEKDATA: {
-		unsigned long tmp;
-		int copied;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		ret = -EIO;
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp, datap);
-		break;
-	}
-
-	/* read the word at location addr in the USER area. */
-	case PTRACE_PEEKUSR: {
-		unsigned long tmp;
-
-		ret = -EIO;
-		if ((addr & 3) || addr < 0 || 
-		    addr > sizeof(struct user) - 3)
-			break;
-
-		tmp = 0;  /* Default return condition */
-		if(addr < FRAME_SIZE*sizeof(long))
-			tmp = getreg(child, addr);
-		if(addr >= (long) &dummy->u_debugreg[0] &&
-		   addr <= (long) &dummy->u_debugreg[7]){
-			addr -= (long) &dummy->u_debugreg[0];
-			addr = addr >> 2;
-			tmp = child->thread.debugreg[addr];
-		}
-		ret = put_user(tmp, datap);
-		break;
-	}
-
-	/* when I and D space are separate, this will have to be fixed. */
-	case PTRACE_POKETEXT: /* write the word at location addr. */
-	case PTRACE_POKEDATA:
-		ret = 0;
-		if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
-			break;
-		ret = -EIO;
-		break;
-
-	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
-		ret = -EIO;
-		if ((addr & 3) || addr < 0 || 
-		    addr > sizeof(struct user) - 3)
-			break;
-
-		if (addr < FRAME_SIZE*sizeof(long)) {
-			ret = putreg(child, addr, data);
-			break;
-		}
-		/* We need to be very careful here.  We implicitly
-		   want to modify a portion of the task_struct, and we
-		   have to be selective about what portions we allow someone
-		   to modify. */
-
-		  ret = -EIO;
-		  if(addr >= (long) &dummy->u_debugreg[0] &&
-		     addr <= (long) &dummy->u_debugreg[7]){
-
-			  if(addr == (long) &dummy->u_debugreg[4]) break;
-			  if(addr == (long) &dummy->u_debugreg[5]) break;
-			  if(addr < (long) &dummy->u_debugreg[4] &&
-			     ((unsigned long) data) >= TASK_SIZE-3) break;
-			  
-			  /* Sanity-check data. Take one half-byte at once with
-			   * check = (val >> (16 + 4*i)) & 0xf. It contains the
-			   * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
-			   * 2 and 3 are LENi. Given a list of invalid values,
-			   * we do mask |= 1 << invalid_value, so that
-			   * (mask >> check) & 1 is a correct test for invalid
-			   * values.
-			   *
-			   * R/Wi contains the type of the breakpoint /
-			   * watchpoint, LENi contains the length of the watched
-			   * data in the watchpoint case.
-			   *
-			   * The invalid values are:
-			   * - LENi == 0x10 (undefined), so mask |= 0x0f00.
-			   * - R/Wi == 0x10 (break on I/O reads or writes), so
-			   *   mask |= 0x4444.
-			   * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
-			   *   0x1110.
-			   *
-			   * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
-			   *
-			   * See the Intel Manual "System Programming Guide",
-			   * 15.2.4
-			   *
-			   * Note that LENi == 0x10 is defined on x86_64 in long
-			   * mode (i.e. even for 32-bit userspace software, but
-			   * 64-bit kernel), so the x86_64 mask value is 0x5454.
-			   * See the AMD manual no. 24593 (AMD64 System
-			   * Programming)*/
-
-			  if(addr == (long) &dummy->u_debugreg[7]) {
-				  data &= ~DR_CONTROL_RESERVED;
-				  for(i=0; i<4; i++)
-					  if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
-						  goto out_tsk;
-				  if (data)
-					  set_tsk_thread_flag(child, TIF_DEBUG);
-				  else
-					  clear_tsk_thread_flag(child, TIF_DEBUG);
-			  }
-			  addr -= (long) &dummy->u_debugreg;
-			  addr = addr >> 2;
-			  child->thread.debugreg[addr] = data;
-			  ret = 0;
-		  }
-		  break;
-
-	case PTRACE_SYSEMU: /* continue and stop at next syscall, which will not be executed */
-	case PTRACE_SYSCALL:	/* continue and stop at next (return from) syscall */
-	case PTRACE_CONT:	/* restart after signal. */
-		ret = -EIO;
-		if (!valid_signal(data))
-			break;
-		if (request == PTRACE_SYSEMU) {
-			set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		} else if (request == PTRACE_SYSCALL) {
-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-			clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
-		} else {
-			clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		}
-		child->exit_code = data;
-		/* make sure the single step bit is not set. */
-		clear_singlestep(child);
-		wake_up_process(child);
-		ret = 0;
-		break;
 
 /*
- * make the child exit.  Best I can do is send it a sigkill. 
- * perhaps it should be put in the status that it wants to 
- * exit.
+ * Determine how many TLS slots are in use.
  */
-	case PTRACE_KILL:
-		ret = 0;
-		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
-			break;
-		child->exit_code = SIGKILL;
-		/* make sure the single step bit is not set. */
-		clear_singlestep(child);
-		wake_up_process(child);
-		break;
-
-	case PTRACE_SYSEMU_SINGLESTEP: /* Same as SYSEMU, but singlestep if not syscall */
-	case PTRACE_SINGLESTEP:	/* set the trap flag. */
-		ret = -EIO;
-		if (!valid_signal(data))
-			break;
-
-		if (request == PTRACE_SYSEMU_SINGLESTEP)
-			set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
-		else
-			clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
-
-		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		set_singlestep(child);
-		child->exit_code = data;
-		/* give it a chance to run. */
-		wake_up_process(child);
-		ret = 0;
-		break;
-
-	case PTRACE_DETACH:
-		/* detach a process that was attached. */
-		ret = ptrace_detach(child, data);
-		break;
-
-	case PTRACE_GETREGS: { /* Get all gp regs from the child. */
-	  	if (!access_ok(VERIFY_WRITE, datap, FRAME_SIZE*sizeof(long))) {
-			ret = -EIO;
-			break;
-		}
-		for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) {
-			__put_user(getreg(child, i), datap);
-			datap++;
-		}
-		ret = 0;
-		break;
-	}
-
-	case PTRACE_SETREGS: { /* Set all gp regs in the child. */
-		unsigned long tmp;
-	  	if (!access_ok(VERIFY_READ, datap, FRAME_SIZE*sizeof(long))) {
-			ret = -EIO;
-			break;
-		}
-		for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) {
-			__get_user(tmp, datap);
-			putreg(child, i, tmp);
-			datap++;
-		}
-		ret = 0;
-		break;
-	}
-
-	case PTRACE_GETFPREGS: { /* Get the child FPU state. */
-		if (!access_ok(VERIFY_WRITE, datap,
-			       sizeof(struct user_i387_struct))) {
-			ret = -EIO;
-			break;
-		}
-		ret = 0;
-		if (!tsk_used_math(child))
-			init_fpu(child);
-		get_fpregs((struct user_i387_struct __user *)data, child);
-		break;
-	}
-
-	case PTRACE_SETFPREGS: { /* Set the child FPU state. */
-		if (!access_ok(VERIFY_READ, datap,
-			       sizeof(struct user_i387_struct))) {
-			ret = -EIO;
-			break;
-		}
-		set_stopped_child_used_math(child);
-		set_fpregs(child, (struct user_i387_struct __user *)data);
-		ret = 0;
-		break;
-	}
-
-	case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */
-		if (!access_ok(VERIFY_WRITE, datap,
-			       sizeof(struct user_fxsr_struct))) {
-			ret = -EIO;
+static int
+tls_active(struct task_struct *target, const struct utrace_regset *regset)
+{
+	int i;
+	for (i = GDT_ENTRY_TLS_ENTRIES; i > 0; --i) {
+		struct desc_struct *desc = &target->thread.tls_array[i - 1];
+		if ((desc->a | desc->b) != 0)
 			break;
-		}
-		if (!tsk_used_math(child))
-			init_fpu(child);
-		ret = get_fpxregs((struct user_fxsr_struct __user *)data, child);
-		break;
 	}
+	return i;
+}
 
-	case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */
-		if (!access_ok(VERIFY_READ, datap,
-			       sizeof(struct user_fxsr_struct))) {
-			ret = -EIO;
-			break;
-		}
-		set_stopped_child_used_math(child);
-		ret = set_fpxregs(child, (struct user_fxsr_struct __user *)data);
-		break;
-	}
 
+/*
+ * These are our native regset flavors.
+ * XXX ioperm? vm86?
+ */
+static const struct utrace_regset native_regsets[] = {
+	{
+		.n = FRAME_SIZE, .size = sizeof(long), .align = sizeof(long),
+		.get = genregs_get, .set = genregs_set
+	},
+	{
+		.n = sizeof(struct user_i387_struct) / sizeof(long),
+		.size = sizeof(long), .align = sizeof(long),
+		.active = fpregs_active,
+		.get = fpregs_get, .set = fpregs_set
+	},
+	{
+		.n = sizeof(struct user_fxsr_struct) / sizeof(long),
+		.size = sizeof(long), .align = sizeof(long),
+		.active = fpxregs_active,
+		.get = fpxregs_get, .set = fpxregs_set
+	},
+	{
+		.n = GDT_ENTRY_TLS_ENTRIES,
+		.bias = GDT_ENTRY_TLS_MIN,
+		.size = sizeof(struct user_desc),
+		.align = sizeof(struct user_desc),
+		.active = tls_active, .get = tls_get, .set = tls_set
+	},
+	{
+		.n = 8, .size = sizeof(long), .align = sizeof(long),
+		.active = dbregs_active,
+		.get = dbregs_get, .set = dbregs_set
+	},
+};
+
+const struct utrace_regset_view utrace_i386_native = {
+	.name = "i386", .e_machine = EM_386,
+	.regsets = native_regsets,
+	.n = sizeof native_regsets / sizeof native_regsets[0],
+};
+EXPORT_SYMBOL_GPL(utrace_i386_native);
+
+#ifdef CONFIG_PTRACE
+static const struct ptrace_layout_segment i386_uarea[] = {
+	{0, FRAME_SIZE*4, 0, 0},
+	{offsetof(struct user, u_debugreg[0]),
+	 offsetof(struct user, u_debugreg[8]), 4, 0},
+	{0, 0, -1, 0}
+};
+
+fastcall int arch_ptrace(long *req, struct task_struct *child,
+			 struct utrace_attached_engine *engine,
+			 unsigned long addr, unsigned long data, long *val)
+{
+	switch (*req) {
+	case PTRACE_PEEKUSR:
+		return ptrace_peekusr(child, engine, i386_uarea, addr, data);
+	case PTRACE_POKEUSR:
+		return ptrace_pokeusr(child, engine, i386_uarea, addr, data);
+	case PTRACE_GETREGS:
+		return ptrace_whole_regset(child, engine, data, 0, 0);
+	case PTRACE_SETREGS:
+		return ptrace_whole_regset(child, engine, data, 0, 1);
+	case PTRACE_GETFPREGS:
+		return ptrace_whole_regset(child, engine, data, 1, 0);
+	case PTRACE_SETFPREGS:
+		return ptrace_whole_regset(child, engine, data, 1, 1);
+	case PTRACE_GETFPXREGS:
+		return ptrace_whole_regset(child, engine, data, 2, 0);
+	case PTRACE_SETFPXREGS:
+		return ptrace_whole_regset(child, engine, data, 2, 1);
 	case PTRACE_GET_THREAD_AREA:
-		ret = ptrace_get_thread_area(child, addr,
-					(struct user_desc __user *) data);
-		break;
-
 	case PTRACE_SET_THREAD_AREA:
-		ret = ptrace_set_thread_area(child, addr,
-					(struct user_desc __user *) data);
-		break;
-
-	default:
-		ret = ptrace_request(child, request, addr, data);
-		break;
+		return ptrace_onereg_access(child, engine,
+					    utrace_native_view(current), 3,
+					    addr, (void __user *)data,
+					    *req == PTRACE_SET_THREAD_AREA);
 	}
- out_tsk:
-	return ret;
+	return -ENOSYS;
 }
+#endif
 
 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
 {
@@ -658,78 +793,24 @@ void send_sigtrap(struct task_struct *ts
  * - triggered by current->work.syscall_trace
  */
 __attribute__((regparm(3)))
-int do_syscall_trace(struct pt_regs *regs, int entryexit)
+void do_syscall_trace(struct pt_regs *regs, int entryexit)
 {
-	int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
-	/*
-	 * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
-	 * interception
-	 */
-	int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
-	int ret = 0;
-
 	/* do the secure computing check first */
 	if (!entryexit)
 		secure_computing(regs->orig_eax);
 
-	if (unlikely(current->audit_context)) {
-		if (entryexit)
-			audit_syscall_exit(AUDITSC_RESULT(regs->eax),
-						regs->eax);
-		/* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
-		 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
-		 * not used, entry.S will call us only on syscall exit, not
-		 * entry; so when TIF_SYSCALL_AUDIT is used we must avoid
-		 * calling send_sigtrap() on syscall entry.
-		 *
-		 * Note that when PTRACE_SYSEMU_SINGLESTEP is used,
-		 * is_singlestep is false, despite his name, so we will still do
-		 * the correct thing.
-		 */
-		else if (is_singlestep)
-			goto out;
-	}
-
-	if (!(current->ptrace & PT_PTRACED))
-		goto out;
+	if (unlikely(current->audit_context) && entryexit)
+		audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax);
 
-	/* If a process stops on the 1st tracepoint with SYSCALL_TRACE
-	 * and then is resumed with SYSEMU_SINGLESTEP, it will come in
-	 * here. We have to check this and return */
-	if (is_sysemu && entryexit)
-		return 0;
-
-	/* Fake a debug trap */
-	if (is_singlestep)
-		send_sigtrap(current, regs, 0);
-
- 	if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
-		goto out;
-
-	/* the 0x80 provides a way for the tracing parent to distinguish
-	   between a syscall stop and SIGTRAP delivery */
-	/* Note that the debugger could change the result of test_thread_flag!*/
-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0));
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall(regs, entryexit);
 
-	/*
-	 * this isn't the same as continuing with a signal, but it will do
-	 * for normal use.  strace only continues with a signal if the
-	 * stopping signal is not SIGTRAP.  -brl
-	 */
-	if (current->exit_code) {
-		send_sig(current->exit_code, current, 1);
-		current->exit_code = 0;
+	if (test_thread_flag(TIF_SINGLESTEP) && entryexit) {
+		send_sigtrap(current, regs, 0);	/* XXX */
+		tracehook_report_syscall_step(regs);
 	}
-	ret = is_sysemu;
-out:
+
 	if (unlikely(current->audit_context) && !entryexit)
 		audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_eax,
 				    regs->ebx, regs->ecx, regs->edx, regs->esi);
-	if (ret == 0)
-		return 0;
-
-	regs->orig_eax = -1; /* force skip of syscall restarting */
-	if (unlikely(current->audit_context))
-		audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax);
-	return 1;
 }
--- linux-2.6/arch/i386/kernel/entry.S.utrace-ptrace-compat
+++ linux-2.6/arch/i386/kernel/entry.S
@@ -314,7 +314,7 @@ sysenter_past_esp:
 	GET_THREAD_INFO(%ebp)
 
 	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-	testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+	testw $(_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
 	jnz syscall_trace_entry
 	cmpl $(nr_syscalls), %eax
 	jae syscall_badsys
@@ -348,7 +348,7 @@ ENTRY(system_call)
 no_singlestep:
 					# system call tracing in operation / emulation
 	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-	testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+	testw $(_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
 	jnz syscall_trace_entry
 	cmpl $(nr_syscalls), %eax
 	jae syscall_badsys
@@ -476,9 +476,6 @@ syscall_trace_entry:
 	movl %esp, %eax
 	xorl %edx,%edx
 	call do_syscall_trace
-	cmpl $0, %eax
-	jne resume_userspace		# ret != 0 -> running under PTRACE_SYSEMU,
-					# so must skip actual syscall
 	movl ORIG_EAX(%esp), %eax
 	cmpl $(nr_syscalls), %eax
 	jnae syscall_call
--- linux-2.6/arch/arm26/kernel/ptrace.c.utrace-ptrace-compat
+++ linux-2.6/arch/arm26/kernel/ptrace.c
@@ -653,30 +653,16 @@ asmlinkage void syscall_trace(int why, s
 {
 	unsigned long ip;
 
-	if (!test_thread_flag(TIF_SYSCALL_TRACE))
-		return;
-	if (!(current->ptrace & PT_PTRACED))
-		return;
+	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+		/*
+		 * Save IP.  IP is used to denote syscall entry/exit:
+		 *  IP = 0 -> entry, = 1 -> exit
+		 */
+		ip = regs->ARM_ip;
+		regs->ARM_ip = why;
 
-	/*
-	 * Save IP.  IP is used to denote syscall entry/exit:
-	 *  IP = 0 -> entry, = 1 -> exit
-	 */
-	ip = regs->ARM_ip;
-	regs->ARM_ip = why;
+		tracehook_report_syscall(regs, why);
 
-	/* the 0x80 provides a way for the tracing parent to distinguish
-	   between a syscall stop and SIGTRAP delivery */
-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
-				 ? 0x80 : 0));
-	/*
-	 * this isn't the same as continuing with a signal, but it will do
-	 * for normal use.  strace only continues with a signal if the
-	 * stopping signal is not SIGTRAP.  -brl
-	 */
-	if (current->exit_code) {
-		send_sig(current->exit_code, current, 1);
-		current->exit_code = 0;
+		regs->ARM_ip = ip;
 	}
-	regs->ARM_ip = ip;
 }
--- linux-2.6/arch/mips/kernel/ptrace.c.utrace-ptrace-compat
+++ linux-2.6/arch/mips/kernel/ptrace.c
@@ -475,26 +475,9 @@ asmlinkage void do_syscall_trace(struct 
 		audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
 		                   regs->regs[2]);
 
-	if (!(current->ptrace & PT_PTRACED))
-		goto out;
-	if (!test_thread_flag(TIF_SYSCALL_TRACE))
-		goto out;
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall(regs, entryexit);
 
-	/* The 0x80 provides a way for the tracing parent to distinguish
-	   between a syscall stop and SIGTRAP delivery */
-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
-	                         0x80 : 0));
-
-	/*
-	 * this isn't the same as continuing with a signal, but it will do
-	 * for normal use.  strace only continues with a signal if the
-	 * stopping signal is not SIGTRAP.  -brl
-	 */
-	if (current->exit_code) {
-		send_sig(current->exit_code, current, 1);
-		current->exit_code = 0;
-	}
- out:
 	if (unlikely(current->audit_context) && !entryexit)
 		audit_syscall_entry(audit_arch(), regs->regs[2],
 				    regs->regs[4], regs->regs[5],
--- linux-2.6/arch/mips/kernel/sysirix.c.utrace-ptrace-compat
+++ linux-2.6/arch/mips/kernel/sysirix.c
@@ -582,7 +582,7 @@ out:
 
 asmlinkage int irix_getpid(struct pt_regs *regs)
 {
-	regs->regs[3] = current->real_parent->pid;
+	regs->regs[3] = current->parent->pid;
 	return current->pid;
 }
 
--- linux-2.6/arch/powerpc/lib/sstep.c.utrace-ptrace-compat
+++ linux-2.6/arch/powerpc/lib/sstep.c
@@ -12,6 +12,9 @@
 #include <linux/ptrace.h>
 #include <asm/sstep.h>
 #include <asm/processor.h>
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#endif
 
 extern char system_call_common[];
 
--- linux-2.6/arch/powerpc/platforms/cell/spufs/run.c.utrace-ptrace-compat
+++ linux-2.6/arch/powerpc/platforms/cell/spufs/run.c
@@ -51,6 +51,7 @@ static inline int spu_run_fini(struct sp
 
 	if (signal_pending(current))
 		ret = -ERESTARTSYS;
+#if 0 /* XXX */
 	if (unlikely(current->ptrace & PT_PTRACED)) {
 		if ((*status & SPU_STATUS_STOPPED_BY_STOP)
 		    && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
@@ -58,6 +59,7 @@ static inline int spu_run_fini(struct sp
 			ret = -ERESTARTSYS;
 		}
 	}
+#endif
 	return ret;
 }
 
--- linux-2.6/arch/powerpc/kernel/ptrace32.c.utrace-ptrace-compat
+++ linux-2.6/arch/powerpc/kernel/ptrace32.c
@@ -1,436 +0,0 @@
-/*
- * ptrace for 32-bit processes running on a 64-bit kernel.
- *
- *  PowerPC version
- *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- *  Derived from "arch/m68k/kernel/ptrace.c"
- *  Copyright (C) 1994 by Hamish Macdonald
- *  Taken from linux/kernel/ptrace.c and modified for M680x0.
- *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
- *
- * Modified by Cort Dougan (cort@hq.fsmlabs.com)
- * and Paul Mackerras (paulus@samba.org).
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file COPYING in the main directory of
- * this archive for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/security.h>
-#include <linux/signal.h>
-
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-
-#include "ptrace-common.h"
-
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
-
-long compat_sys_ptrace(int request, int pid, unsigned long addr,
-		       unsigned long data)
-{
-	struct task_struct *child;
-	int ret;
-
-	lock_kernel();
-	if (request == PTRACE_TRACEME) {
-		ret = ptrace_traceme();
-		goto out;
-	}
-
-	child = ptrace_get_task_struct(pid);
-	if (IS_ERR(child)) {
-		ret = PTR_ERR(child);
-		goto out;
-	}
-
-	if (request == PTRACE_ATTACH) {
-		ret = ptrace_attach(child);
-		goto out_tsk;
-	}
-
-	ret = ptrace_check_attach(child, request == PTRACE_KILL);
-	if (ret < 0)
-		goto out_tsk;
-
-	switch (request) {
-	/* when I and D space are separate, these will need to be fixed. */
-	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
-	case PTRACE_PEEKDATA: {
-		unsigned int tmp;
-		int copied;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		ret = -EIO;
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp, (u32 __user *)data);
-		break;
-	}
-
-	/*
-	 * Read 4 bytes of the other process' storage
-	 *  data is a pointer specifying where the user wants the
-	 *	4 bytes copied into
-	 *  addr is a pointer in the user's storage that contains an 8 byte
-	 *	address in the other process of the 4 bytes that is to be read
-	 * (this is run in a 32-bit process looking at a 64-bit process)
-	 * when I and D space are separate, these will need to be fixed.
-	 */
-	case PPC_PTRACE_PEEKTEXT_3264:
-	case PPC_PTRACE_PEEKDATA_3264: {
-		u32 tmp;
-		int copied;
-		u32 __user * addrOthers;
-
-		ret = -EIO;
-
-		/* Get the addr in the other process that we want to read */
-		if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
-			break;
-
-		copied = access_process_vm(child, (u64)addrOthers, &tmp,
-				sizeof(tmp), 0);
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp, (u32 __user *)data);
-		break;
-	}
-
-	/* Read a register (specified by ADDR) out of the "user area" */
-	case PTRACE_PEEKUSR: {
-		int index;
-		unsigned long tmp;
-
-		ret = -EIO;
-		/* convert to index and check */
-		index = (unsigned long) addr >> 2;
-		if ((addr & 3) || (index > PT_FPSCR32))
-			break;
-
-		if (index < PT_FPR0) {
-			tmp = get_reg(child, index);
-		} else {
-			flush_fp_to_thread(child);
-			/*
-			 * the user space code considers the floating point
-			 * to be an array of unsigned int (32 bits) - the
-			 * index passed in is based on this assumption.
-			 */
-			tmp = ((unsigned int *)child->thread.fpr)[index - PT_FPR0];
-		}
-		ret = put_user((unsigned int)tmp, (u32 __user *)data);
-		break;
-	}
-  
-	/*
-	 * Read 4 bytes out of the other process' pt_regs area
-	 *  data is a pointer specifying where the user wants the
-	 *	4 bytes copied into
-	 *  addr is the offset into the other process' pt_regs structure
-	 *	that is to be read
-	 * (this is run in a 32-bit process looking at a 64-bit process)
-	 */
-	case PPC_PTRACE_PEEKUSR_3264: {
-		u32 index;
-		u32 reg32bits;
-		u64 tmp;
-		u32 numReg;
-		u32 part;
-
-		ret = -EIO;
-		/* Determine which register the user wants */
-		index = (u64)addr >> 2;
-		numReg = index / 2;
-		/* Determine which part of the register the user wants */
-		if (index % 2)
-			part = 1;  /* want the 2nd half of the register (right-most). */
-		else
-			part = 0;  /* want the 1st half of the register (left-most). */
-
-		/* Validate the input - check to see if address is on the wrong boundary or beyond the end of the user area */
-		if ((addr & 3) || numReg > PT_FPSCR)
-			break;
-
-		if (numReg >= PT_FPR0) {
-			flush_fp_to_thread(child);
-			tmp = ((unsigned long int *)child->thread.fpr)[numReg - PT_FPR0];
-		} else { /* register within PT_REGS struct */
-			tmp = get_reg(child, numReg);
-		} 
-		reg32bits = ((u32*)&tmp)[part];
-		ret = put_user(reg32bits, (u32 __user *)data);
-		break;
-	}
-
-	/* If I and D space are separate, this will have to be fixed. */
-	case PTRACE_POKETEXT: /* write the word at location addr. */
-	case PTRACE_POKEDATA: {
-		unsigned int tmp;
-		tmp = data;
-		ret = 0;
-		if (access_process_vm(child, addr, &tmp, sizeof(tmp), 1)
-				== sizeof(tmp))
-			break;
-		ret = -EIO;
-		break;
-	}
-
-	/*
-	 * Write 4 bytes into the other process' storage
-	 *  data is the 4 bytes that the user wants written
-	 *  addr is a pointer in the user's storage that contains an
-	 *	8 byte address in the other process where the 4 bytes
-	 *	that is to be written
-	 * (this is run in a 32-bit process looking at a 64-bit process)
-	 * when I and D space are separate, these will need to be fixed.
-	 */
-	case PPC_PTRACE_POKETEXT_3264:
-	case PPC_PTRACE_POKEDATA_3264: {
-		u32 tmp = data;
-		u32 __user * addrOthers;
-
-		/* Get the addr in the other process that we want to write into */
-		ret = -EIO;
-		if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
-			break;
-		ret = 0;
-		if (access_process_vm(child, (u64)addrOthers, &tmp,
-					sizeof(tmp), 1) == sizeof(tmp))
-			break;
-		ret = -EIO;
-		break;
-	}
-
-	/* write the word at location addr in the USER area */
-	case PTRACE_POKEUSR: {
-		unsigned long index;
-
-		ret = -EIO;
-		/* convert to index and check */
-		index = (unsigned long) addr >> 2;
-		if ((addr & 3) || (index > PT_FPSCR32))
-			break;
-
-		if (index == PT_ORIG_R3)
-			break;
-		if (index < PT_FPR0) {
-			ret = put_reg(child, index, data);
-		} else {
-			flush_fp_to_thread(child);
-			/*
-			 * the user space code considers the floating point
-			 * to be an array of unsigned int (32 bits) - the
-			 * index passed in is based on this assumption.
-			 */
-			((unsigned int *)child->thread.fpr)[index - PT_FPR0] = data;
-			ret = 0;
-		}
-		break;
-	}
-
-	/*
-	 * Write 4 bytes into the other process' pt_regs area
-	 *  data is the 4 bytes that the user wants written
-	 *  addr is the offset into the other process' pt_regs structure
-	 *	that is to be written into
-	 * (this is run in a 32-bit process looking at a 64-bit process)
-	 */
-	case PPC_PTRACE_POKEUSR_3264: {
-		u32 index;
-		u32 numReg;
-
-		ret = -EIO;
-		/* Determine which register the user wants */
-		index = (u64)addr >> 2;
-		numReg = index / 2;
-		/*
-		 * Validate the input - check to see if address is on the
-		 * wrong boundary or beyond the end of the user area
-		 */
-		if ((addr & 3) || (numReg > PT_FPSCR))
-			break;
-		/* Insure it is a register we let them change */
-		if ((numReg == PT_ORIG_R3)
-				|| ((numReg > PT_CCR) && (numReg < PT_FPR0)))
-			break;
-		if (numReg >= PT_FPR0) {
-			flush_fp_to_thread(child);
-		}
-		if (numReg == PT_MSR)
-			data = (data & MSR_DEBUGCHANGE)
-				| (child->thread.regs->msr & ~MSR_DEBUGCHANGE);
-		((u32*)child->thread.regs)[index] = data;
-		ret = 0;
-		break;
-	}
-
-	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
-	case PTRACE_CONT: { /* restart after signal. */
-		ret = -EIO;
-		if (!valid_signal(data))
-			break;
-		if (request == PTRACE_SYSCALL)
-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		else
-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		child->exit_code = data;
-		/* make sure the single step bit is not set. */
-		clear_single_step(child);
-		wake_up_process(child);
-		ret = 0;
-		break;
-	}
-
-	/*
-	 * make the child exit.  Best I can do is send it a sigkill.
-	 * perhaps it should be put in the status that it wants to
-	 * exit.
-	 */
-	case PTRACE_KILL: {
-		ret = 0;
-		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
-			break;
-		child->exit_code = SIGKILL;
-		/* make sure the single step bit is not set. */
-		clear_single_step(child);
-		wake_up_process(child);
-		break;
-	}
-
-	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
-		ret = -EIO;
-		if (!valid_signal(data))
-			break;
-		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		set_single_step(child);
-		child->exit_code = data;
-		/* give it a chance to run. */
-		wake_up_process(child);
-		ret = 0;
-		break;
-	}
-
-	case PTRACE_GET_DEBUGREG: {
-		ret = -EINVAL;
-		/* We only support one DABR and no IABRS at the moment */
-		if (addr > 0)
-			break;
-		ret = put_user(child->thread.dabr, (u32 __user *)data);
-		break;
-	}
-
-	case PTRACE_SET_DEBUGREG:
-		ret = ptrace_set_debugreg(child, addr, data);
-		break;
-
-	case PTRACE_DETACH:
-		ret = ptrace_detach(child, data);
-		break;
-
-	case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
-		unsigned int __user *tmp = (unsigned int __user *)addr;
-
-		for (i = 0; i < 32; i++) {
-			ret = put_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
-		}
-		break;
-	}
-
-	case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
-		unsigned int __user *tmp = (unsigned int __user *)addr;
-
-		for (i = 0; i < 32; i++) {
-			ret = get_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
-		}
-		break;
-	}
-
-	case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
-		unsigned int __user *tmp = (unsigned int __user *)addr;
-
-		flush_fp_to_thread(child);
-
-		for (i = 0; i < 32; i++) {
-			ret = put_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
-		}
-		break;
-	}
-
-	case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
-		unsigned int __user *tmp = (unsigned int __user *)addr;
-
-		flush_fp_to_thread(child);
-
-		for (i = 0; i < 32; i++) {
-			ret = get_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
-		}
-		break;
-	}
-
-	case PTRACE_GETEVENTMSG:
-		ret = put_user(child->ptrace_message, (unsigned int __user *) data);
-		break;
-
-#ifdef CONFIG_ALTIVEC
-	case PTRACE_GETVRREGS:
-		/* Get the child altivec register state. */
-		flush_altivec_to_thread(child);
-		ret = get_vrregs((unsigned long __user *)data, child);
-		break;
-
-	case PTRACE_SETVRREGS:
-		/* Set the child altivec register state. */
-		flush_altivec_to_thread(child);
-		ret = set_vrregs(child, (unsigned long __user *)data);
-		break;
-#endif
-
-	default:
-		ret = ptrace_request(child, request, addr, data);
-		break;
-	}
-out_tsk:
-	put_task_struct(child);
-out:
-	unlock_kernel();
-	return ret;
-}
--- linux-2.6/arch/powerpc/kernel/ptrace-common.h.utrace-ptrace-compat
+++ linux-2.6/arch/powerpc/kernel/ptrace-common.h
@@ -1,161 +0,0 @@
-/*
- *    Copyright (c) 2002 Stephen Rothwell, IBM Coproration
- *    Extracted from ptrace.c and ptrace32.c
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file README.legal in the main directory of
- * this archive for more details.
- */
-
-#ifndef _PPC64_PTRACE_COMMON_H
-#define _PPC64_PTRACE_COMMON_H
-
-#include <asm/system.h>
-
-/*
- * Set of msr bits that gdb can change on behalf of a process.
- */
-#define MSR_DEBUGCHANGE	(MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1)
-
-/*
- * Get contents of register REGNO in task TASK.
- */
-static inline unsigned long get_reg(struct task_struct *task, int regno)
-{
-	unsigned long tmp = 0;
-
-	/*
-	 * Put the correct FP bits in, they might be wrong as a result
-	 * of our lazy FP restore.
-	 */
-	if (regno == PT_MSR) {
-		tmp = ((unsigned long *)task->thread.regs)[PT_MSR];
-		tmp |= task->thread.fpexc_mode;
-	} else if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
-		tmp = ((unsigned long *)task->thread.regs)[regno];
-	}
-
-	return tmp;
-}
-
-/*
- * Write contents of register REGNO in task TASK.
- */
-static inline int put_reg(struct task_struct *task, int regno,
-			  unsigned long data)
-{
-	if (regno < PT_SOFTE) {
-		if (regno == PT_MSR)
-			data = (data & MSR_DEBUGCHANGE)
-				| (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
-		((unsigned long *)task->thread.regs)[regno] = data;
-		return 0;
-	}
-	return -EIO;
-}
-
-static inline void set_single_step(struct task_struct *task)
-{
-	struct pt_regs *regs = task->thread.regs;
-	if (regs != NULL)
-		regs->msr |= MSR_SE;
-	set_tsk_thread_flag(task, TIF_SINGLESTEP);
-}
-
-static inline void clear_single_step(struct task_struct *task)
-{
-	struct pt_regs *regs = task->thread.regs;
-	if (regs != NULL)
-		regs->msr &= ~MSR_SE;
-	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
-}
-
-#ifdef CONFIG_ALTIVEC
-/*
- * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
- * The transfer totals 34 quadword.  Quadwords 0-31 contain the
- * corresponding vector registers.  Quadword 32 contains the vscr as the
- * last word (offset 12) within that quadword.  Quadword 33 contains the
- * vrsave as the first word (offset 0) within the quadword.
- *
- * This definition of the VMX state is compatible with the current PPC32
- * ptrace interface.  This allows signal handling and ptrace to use the
- * same structures.  This also simplifies the implementation of a bi-arch
- * (combined (32- and 64-bit) gdb.
- */
-
-/*
- * Get contents of AltiVec register state in task TASK
- */
-static inline int get_vrregs(unsigned long __user *data,
-			     struct task_struct *task)
-{
-	unsigned long regsize;
-
-	/* copy AltiVec registers VR[0] .. VR[31] */
-	regsize = 32 * sizeof(vector128);
-	if (copy_to_user(data, task->thread.vr, regsize))
-		return -EFAULT;
-	data += (regsize / sizeof(unsigned long));
-
-	/* copy VSCR */
-	regsize = 1 * sizeof(vector128);
-	if (copy_to_user(data, &task->thread.vscr, regsize))
-		return -EFAULT;
-	data += (regsize / sizeof(unsigned long));
-
-	/* copy VRSAVE */
-	if (put_user(task->thread.vrsave, (u32 __user *)data))
-		return -EFAULT;
-
-	return 0;
-}
-
-/*
- * Write contents of AltiVec register state into task TASK.
- */
-static inline int set_vrregs(struct task_struct *task,
-			     unsigned long __user *data)
-{
-	unsigned long regsize;
-
-	/* copy AltiVec registers VR[0] .. VR[31] */
-	regsize = 32 * sizeof(vector128);
-	if (copy_from_user(task->thread.vr, data, regsize))
-		return -EFAULT;
-	data += (regsize / sizeof(unsigned long));
-
-	/* copy VSCR */
-	regsize = 1 * sizeof(vector128);
-	if (copy_from_user(&task->thread.vscr, data, regsize))
-		return -EFAULT;
-	data += (regsize / sizeof(unsigned long));
-
-	/* copy VRSAVE */
-	if (get_user(task->thread.vrsave, (u32 __user *)data))
-		return -EFAULT;
-
-	return 0;
-}
-#endif
-
-static inline int ptrace_set_debugreg(struct task_struct *task,
-				      unsigned long addr, unsigned long data)
-{
-	/* We only support one DABR and no IABRS at the moment */
-	if (addr > 0)
-		return -EINVAL;
-
-	/* The bottom 3 bits are flags */
-	if ((data & ~0x7UL) >= TASK_SIZE)
-		return -EIO;
-
-	/* Ensure translation is on */
-	if (data && !(data & DABR_TRANSLATION))
-		return -EIO;
-
-	task->thread.dabr = data;
-	return 0;
-}
-
-#endif /* _PPC64_PTRACE_COMMON_H */
--- linux-2.6/arch/powerpc/kernel/sys_ppc32.c.utrace-ptrace-compat
+++ linux-2.6/arch/powerpc/kernel/sys_ppc32.c
@@ -429,11 +429,6 @@ long compat_sys_execve(unsigned long a0,
 
 	error = compat_do_execve(filename, compat_ptr(a1), compat_ptr(a2), regs);
 
-	if (error == 0) {
-		task_lock(current);
-		current->ptrace &= ~PT_DTRACE;
-		task_unlock(current);
-	}
 	putname(filename);
 
 out:
--- linux-2.6/arch/powerpc/kernel/signal_64.c.utrace-ptrace-compat
+++ linux-2.6/arch/powerpc/kernel/signal_64.c
@@ -24,6 +24,7 @@
 #include <linux/stddef.h>
 #include <linux/elf.h>
 #include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/module.h>
 
 #include <asm/sigcontext.h>
@@ -459,6 +460,8 @@ static int handle_signal(unsigned long s
 			sigaddset(&current->blocked,sig);
 		recalc_sigpending();
 		spin_unlock_irq(&current->sighand->siglock);
+
+		tracehook_report_handle_signal(sig, ka, oldset, regs);
 	}
 
 	return ret;
--- linux-2.6/arch/powerpc/kernel/process.c.utrace-ptrace-compat
+++ linux-2.6/arch/powerpc/kernel/process.c
@@ -814,11 +814,6 @@ int sys_execve(unsigned long a0, unsigne
 	flush_spe_to_thread(current);
 	error = do_execve(filename, (char __user * __user *) a1,
 			  (char __user * __user *) a2, regs);
-	if (error == 0) {
-		task_lock(current);
-		current->ptrace &= ~PT_DTRACE;
-		task_unlock(current);
-	}
 	putname(filename);
 out:
 	return error;
--- linux-2.6/arch/powerpc/kernel/signal_32.c.utrace-ptrace-compat
+++ linux-2.6/arch/powerpc/kernel/signal_32.c
@@ -25,6 +25,7 @@
 #include <linux/signal.h>
 #include <linux/errno.h>
 #include <linux/elf.h>
+#include <linux/tracehook.h>
 #ifdef CONFIG_PPC64
 #include <linux/syscalls.h>
 #include <linux/compat.h>
@@ -631,6 +632,58 @@ int copy_siginfo_to_user32(struct compat
 
 #define copy_siginfo_to_user	copy_siginfo_to_user32
 
+/* mostly stolen from arch/s390/kernel/compat_signal.c */
+int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+{
+	int err;
+	u32 tmp;
+
+	if (!access_ok (VERIFY_READ, from, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	err = __get_user(to->si_signo, &from->si_signo);
+	err |= __get_user(to->si_errno, &from->si_errno);
+	err |= __get_user(to->si_code, &from->si_code);
+
+	if (to->si_code < 0)
+		err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+	else {
+		switch (to->si_code >> 16) {
+		case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
+		case __SI_MESGQ >> 16:
+			err |= __get_user(to->si_int, &from->si_int);
+			/* fallthrough */
+		case __SI_KILL >> 16:
+			err |= __get_user(to->si_pid, &from->si_pid);
+			err |= __get_user(to->si_uid, &from->si_uid);
+			break;
+		case __SI_CHLD >> 16:
+			err |= __get_user(to->si_pid, &from->si_pid);
+			err |= __get_user(to->si_uid, &from->si_uid);
+			err |= __get_user(to->si_utime, &from->si_utime);
+			err |= __get_user(to->si_stime, &from->si_stime);
+			err |= __get_user(to->si_status, &from->si_status);
+			break;
+		case __SI_FAULT >> 16:
+			err |= __get_user(tmp, &from->si_addr);
+			to->si_addr = (void __user *)(u64) tmp;
+			break;
+		case __SI_POLL >> 16:
+			err |= __get_user(to->si_band, &from->si_band);
+			err |= __get_user(to->si_fd, &from->si_fd);
+			break;
+		case __SI_TIMER >> 16:
+			err |= __get_user(to->si_tid, &from->si_tid);
+			err |= __get_user(to->si_overrun, &from->si_overrun);
+			err |= __get_user(to->si_int, &from->si_int);
+			break;
+		default:
+			break;
+		}
+	}
+	return err;
+}
+
 /*
  * Note: it is necessary to treat pid and sig as unsigned ints, with the
  * corresponding cast to a signed int to insure that the proper conversion
@@ -1216,6 +1269,8 @@ no_signal:
 		   its frame, and we can clear the TIF_RESTORE_SIGMASK flag */
 		if (test_thread_flag(TIF_RESTORE_SIGMASK))
 			clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+		tracehook_report_handle_signal(signr, &ka, oldset, regs);
 	}
 
 	return ret;
--- linux-2.6/arch/powerpc/kernel/ptrace.c.utrace-ptrace-compat
+++ linux-2.6/arch/powerpc/kernel/ptrace.c
@@ -22,128 +22,239 @@
 #include <linux/smp_lock.h>
 #include <linux/errno.h>
 #include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/user.h>
 #include <linux/security.h>
 #include <linux/signal.h>
 #include <linux/seccomp.h>
 #include <linux/audit.h>
-#ifdef CONFIG_PPC32
+#include <linux/elf.h>
 #include <linux/module.h>
-#endif
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
+#include <asm/tracehook.h>
 
-#ifdef CONFIG_PPC64
-#include "ptrace-common.h"
-#endif
-
-#ifdef CONFIG_PPC32
 /*
  * Set of msr bits that gdb can change on behalf of a process.
  */
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC64
+#define MSR_DEBUGCHANGE	(MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1)
+#elif defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 #define MSR_DEBUGCHANGE	0
-#else
+#else  /* CONFIG_PPC32 */
 #define MSR_DEBUGCHANGE	(MSR_SE | MSR_BE)
-#endif
-#endif /* CONFIG_PPC32 */
+#endif /* CONFIG_PPC64 */
 
 /*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
+ * Last register that can be changed via ptrace.
  */
+#ifdef CONFIG_PPC64
+#define PT_LAST	PT_SOFTE
+#else
+#define PT_LAST	PT_MQ
+#endif
+
+static int
+genregs_get(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    void *kbuf, void __user *ubuf)
+{
+	if (target->thread.regs == NULL)
+		return -EIO;
 
 #ifdef CONFIG_PPC32
-/*
- * Get contents of register REGNO in task TASK.
- */
-static inline unsigned long get_reg(struct task_struct *task, int regno)
+	CHECK_FULL_REGS(target->thread.regs);
+#endif
+
+	return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				     target->thread.regs, 0, -1);
+}
+
+static int
+genregs_set(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    const void *kbuf, const void __user *ubuf)
+{
+	unsigned long msr_save;
+	int ret = 0;
+
+	if (target->thread.regs == NULL)
+		return -EIO;
+
+#ifdef CONFIG_PPC32
+	CHECK_FULL_REGS(target->thread.regs);
+#endif
+
+	/*
+	 * Just ignore attempts to set the registers beyond PT_LAST.
+	 * They are read-only.
+	 */
+
+	msr_save = target->thread.regs->msr &~ MSR_DEBUGCHANGE;
+
+	ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				   target->thread.regs, 0,
+				   (PT_LAST + 1) * sizeof(long));
+
+	target->thread.regs->msr &= MSR_DEBUGCHANGE;
+	target->thread.regs->msr |= msr_save;
+
+	return ret;
+}
+
+static int
+fpregs_get(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   void *kbuf, void __user *ubuf)
 {
-	if (regno < sizeof(struct pt_regs) / sizeof(unsigned long)
-	    && task->thread.regs != NULL)
-		return ((unsigned long *)task->thread.regs)[regno];
-	return (0);
+	BUILD_BUG_ON(offsetof(struct thread_struct, fpscr)
+		     != offsetof(struct thread_struct, fpr[32]));
+
+	flush_fp_to_thread(target);
+
+	return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				     &target->thread.fpr, 0, -1);
 }
 
-/*
- * Write contents of register REGNO in task TASK.
- */
-static inline int put_reg(struct task_struct *task, int regno,
-			  unsigned long data)
+static int
+fpregs_set(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    const void *kbuf, const void __user *ubuf)
 {
-	if (regno <= PT_MQ && task->thread.regs != NULL) {
-		if (regno == PT_MSR)
-			data = (data & MSR_DEBUGCHANGE)
-				| (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
-		((unsigned long *)task->thread.regs)[regno] = data;
-		return 0;
-	}
-	return -EIO;
+	return utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				    &target->thread.fpr, 0, -1);
 }
 
 #ifdef CONFIG_ALTIVEC
 /*
- * Get contents of AltiVec register state in task TASK
+ * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
+ * The transfer totals 34 quadword.  Quadwords 0-31 contain the
+ * corresponding vector registers.  Quadword 32 contains the vscr as the
+ * last word (offset 12) within that quadword.  Quadword 33 contains the
+ * vrsave as the first word (offset 0) within the quadword.
+ *
+ * This definition of the VMX state is compatible with the current PPC32
+ * ptrace interface.  This allows signal handling and ptrace to use the
+ * same structures.  This also simplifies the implementation of a bi-arch
+ * (combined (32- and 64-bit) gdb.
  */
-static inline int get_vrregs(unsigned long __user *data, struct task_struct *task)
-{
-	int i, j;
 
-	if (!access_ok(VERIFY_WRITE, data, 133 * sizeof(unsigned long)))
-		return -EFAULT;
+static int
+vrregs_active(struct task_struct *target, const struct utrace_regset *regset)
+{
+	flush_altivec_to_thread(target);
+	return target->thread.used_vr ? regset->n : 0;
+}
 
-	/* copy AltiVec registers VR[0] .. VR[31] */
-	for (i = 0; i < 32; i++)
-		for (j = 0; j < 4; j++, data++)
-			if (__put_user(task->thread.vr[i].u[j], data))
-				return -EFAULT;
+static int
+vrregs_get(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   void *kbuf, void __user *ubuf)
+{
+	BUILD_BUG_ON(offsetof(struct thread_struct, vscr)
+		     != offsetof(struct thread_struct, vr[32]));
+	BUILD_BUG_ON(offsetof(struct thread_struct, vscr) + sizeof(vector128)
+		     != offsetof(struct thread_struct, vrsave));
 
-	/* copy VSCR */
-	for (i = 0; i < 4; i++, data++)
-		if (__put_user(task->thread.vscr.u[i], data))
-			return -EFAULT;
-
-        /* copy VRSAVE */
-	if (__put_user(task->thread.vrsave, data))
-		return -EFAULT;
+	flush_altivec_to_thread(target);
 
-	return 0;
+	return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				     &target->thread.vr, 0, -1);
 }
 
-/*
- * Write contents of AltiVec register state into task TASK.
- */
-static inline int set_vrregs(struct task_struct *task, unsigned long __user *data)
+static int
+vrregs_set(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    const void *kbuf, const void __user *ubuf)
 {
-	int i, j;
+	flush_altivec_to_thread(target);
 
-	if (!access_ok(VERIFY_READ, data, 133 * sizeof(unsigned long)))
-		return -EFAULT;
+	return utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				    &target->thread.vr, 0, -1);
+}
+#endif	/* CONFIG_ALTIVEC */
 
-	/* copy AltiVec registers VR[0] .. VR[31] */
-	for (i = 0; i < 32; i++)
-		for (j = 0; j < 4; j++, data++)
-			if (__get_user(task->thread.vr[i].u[j], data))
-				return -EFAULT;
+#ifdef CONFIG_PPC64
+/* We only support one DABR and no IABRS at the moment */
+
+static int
+set_thread_dabr(struct task_struct *tsk, unsigned long dabr)
+{
+	/* The bottom 3 bits are flags */
+	if ((dabr & ~0x7UL) >= TASK_SIZE)
+		return -EIO;
 
-	/* copy VSCR */
-	for (i = 0; i < 4; i++, data++)
-		if (__get_user(task->thread.vscr.u[i], data))
-			return -EFAULT;
-
-	/* copy VRSAVE */
-	if (__get_user(task->thread.vrsave, data))
-		return -EFAULT;
+	/* Ensure translation is on */
+	if (dabr && !(dabr & DABR_TRANSLATION))
+		return -EIO;
 
+	tsk->thread.dabr = dabr;
 	return 0;
 }
-#endif
 
-#ifdef CONFIG_SPE
+static int
+debugreg_get(struct task_struct *target,
+	     const struct utrace_regset *regset,
+	     unsigned int pos, unsigned int count,
+	     void *kbuf, void __user *ubuf)
+{
+	return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				     &target->thread.dabr, 0, -1);
+}
+
+static int
+debugreg_set(struct task_struct *target,
+	     const struct utrace_regset *regset,
+	     unsigned int pos, unsigned int count,
+	     const void *kbuf, const void __user *ubuf)
+{
+	unsigned long dabr;
+	int ret;
+
+	ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, &dabr, 0, -1);
+	if (ret == 0)
+		ret = set_thread_dabr(target, dabr);
+
+	return ret;
+}
+
+static int
+ppc32_dabr_get(struct task_struct *target,
+	       const struct utrace_regset *regset,
+	       unsigned int pos, unsigned int count,
+	       void *kbuf, void __user *ubuf)
+{
+	u32 dabr = target->thread.dabr;
+	return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, &dabr, 0, -1);
+}
+
+static int
+ppc32_dabr_set(struct task_struct *target,
+	       const struct utrace_regset *regset,
+	       unsigned int pos, unsigned int count,
+	       const void *kbuf, const void __user *ubuf)
+{
+	u32 dabr;
+	int ret;
+
+	ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, &dabr, 0, -1);
+	if (ret == 0)
+		ret = set_thread_dabr(target, dabr);
+
+	return ret;
+}
+#endif	/* CONFIG_PPC64 */
 
+#ifdef CONFIG_SPE
 /*
  * For get_evrregs/set_evrregs functions 'data' has the following layout:
  *
@@ -154,375 +265,447 @@ static inline int set_vrregs(struct task
  * }
  */
 
-/*
- * Get contents of SPE register state in task TASK.
- */
-static inline int get_evrregs(unsigned long *data, struct task_struct *task)
+static int
+evrregs_active(struct task_struct *target, const struct utrace_regset *regset)
 {
-	int i;
+	if (target->thread.regs->msr & MSR_SPE)
+		giveup_spe(target);
+	return target->thread.used_spe ? regset->n : 0;
+}
 
-	if (!access_ok(VERIFY_WRITE, data, 35 * sizeof(unsigned long)))
-		return -EFAULT;
+static int
+evrregs_get(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    void *kbuf, void __user *ubuf)
+{
+	BUILD_BUG_ON(offsetof(struct thread_struct, acc)
+		     != offsetof(struct thread_struct, evr[32]));
+	BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64)
+		     != offsetof(struct thread_struct, spefscr));
 
-	/* copy SPEFSCR */
-	if (__put_user(task->thread.spefscr, &data[34]))
-		return -EFAULT;
+	if (target->thread.regs->msr & MSR_SPE)
+		giveup_spe(target);
 
-	/* copy SPE registers EVR[0] .. EVR[31] */
-	for (i = 0; i < 32; i++, data++)
-		if (__put_user(task->thread.evr[i], data))
-			return -EFAULT;
+	return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				     &target->thread.evr, 0, -1);
+}
 
-	/* copy ACC */
-	if (__put_user64(task->thread.acc, (unsigned long long *)data))
-		return -EFAULT;
+static int
+evrregs_set(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    const void *kbuf, const void __user *ubuf)
+{
+	/* this is to clear the MSR_SPE bit to force a reload
+	 * of register state from memory */
+	if (target->thread.regs->msr & MSR_SPE)
+		giveup_spe(target);
 
-	return 0;
+	return utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				    &target->thread.evr, 0, -1);
 }
+#endif /* CONFIG_SPE */
+
 
 /*
- * Write contents of SPE register state into task TASK.
+ * These are our native regset flavors.
  */
-static inline int set_evrregs(struct task_struct *task, unsigned long *data)
-{
-	int i;
+static const struct utrace_regset native_regsets[] = {
+	{
+		.n = ELF_NGREG, .size = sizeof(long), .align = sizeof(long),
+		.get = genregs_get, .set = genregs_set
+	},
+	{
+		.n = ELF_NFPREG,
+		.size = sizeof(double), .align = sizeof(double),
+		.get = fpregs_get, .set = fpregs_set
+	},
+#ifdef CONFIG_ALTIVEC
+	{
+		.n = 33*4+1, .size = sizeof(u32), .align = sizeof(u32),
+		.active = vrregs_active, .get = vrregs_get, .set = vrregs_set
+	},
+#endif
+#ifdef CONFIG_SPE
+	{
+		.n = 35, .size = sizeof(long), .align = sizeof(long),
+		.active = evrregs_active,
+		.get = evrregs_get, .set = evrregs_set
+	},
+#endif
+#ifdef CONFIG_PPC64
+	{
+		.n = 1, .size = sizeof(long), .align = sizeof(long),
+		.get = debugreg_get, .set = debugreg_set
+	},
+#endif
+};
+
+const struct utrace_regset_view utrace_ppc_native_view = {
+	.name = UTS_MACHINE, .e_machine = ELF_ARCH,
+	.regsets = native_regsets,
+	.n = sizeof native_regsets / sizeof native_regsets[0],
+};
+EXPORT_SYMBOL_GPL(utrace_ppc_native_view);
+
 
-	if (!access_ok(VERIFY_READ, data, 35 * sizeof(unsigned long)))
-		return -EFAULT;
+#ifdef CONFIG_PPC64
+#include <linux/compat.h>
 
-	/* copy SPEFSCR */
-	if (__get_user(task->thread.spefscr, &data[34]))
-		return -EFAULT;
-
-	/* copy SPE registers EVR[0] .. EVR[31] */
-	for (i = 0; i < 32; i++, data++)
-		if (__get_user(task->thread.evr[i], data))
-			return -EFAULT;
-	/* copy ACC */
-	if (__get_user64(task->thread.acc, (unsigned long long*)data))
-		return -EFAULT;
+static int
+ppc32_gpr_get(struct task_struct *target,
+	      const struct utrace_regset *regset,
+	      unsigned int pos, unsigned int count,
+	      void *kbuf, void __user *ubuf)
+{
+	unsigned long *regs = (unsigned long *) target->thread.regs;
+
+	if (regs == NULL)
+		return -EIO;
+
+	regs += pos / sizeof(u32);
+
+	if (kbuf) {
+		u32 *out = kbuf;
+		for (; count > 0; count -= sizeof(u32))
+			*out++ = *regs++;
+	}
+	else {
+		u32 __user *out = ubuf;
+		for (; count > 0; count -= sizeof(u32))
+			if (put_user((u32) *regs++, out++))
+				return -EFAULT;
+	}
 
 	return 0;
 }
-#endif /* CONFIG_SPE */
 
-static inline void
-set_single_step(struct task_struct *task)
+static int
+ppc32_gpr_set(struct task_struct *target,
+	      const struct utrace_regset *regset,
+	      unsigned int pos, unsigned int count,
+	      const void *kbuf, const void __user *ubuf)
 {
-	struct pt_regs *regs = task->thread.regs;
+	unsigned long *regs = (unsigned long *) target->thread.regs;
 
-	if (regs != NULL) {
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
-		task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC;
-		regs->msr |= MSR_DE;
-#else
-		regs->msr |= MSR_SE;
-#endif
-	}
-}
+	if (regs == NULL)
+		return -EIO;
 
-static inline void
-clear_single_step(struct task_struct *task)
-{
-	struct pt_regs *regs = task->thread.regs;
+	/*
+	 * Just ignore attempts to set the registers beyond PT_LAST.
+	 * They are read-only.
+	 */
+	if (count > (PT_LAST + 1) * sizeof(u32) - pos)
+		count = (PT_LAST + 1) * sizeof(u32) - pos;
 
-	if (regs != NULL) {
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
-		task->thread.dbcr0 = 0;
-		regs->msr &= ~MSR_DE;
-#else
-		regs->msr &= ~MSR_SE;
-#endif
+	pos /= sizeof(u32);
+
+	if (kbuf) {
+		const u32 *in = kbuf;
+		for (; count > 0; count -= sizeof(u32), ++pos, ++in) {
+			if (pos == PT_MSR)
+				regs[pos] = ((regs[pos] &~ MSR_DEBUGCHANGE)
+					     | (*in & MSR_DEBUGCHANGE));
+			else
+				regs[pos] = *in;
+		}
 	}
+	else {
+		const u32 __user *in = kbuf;
+		for (; count > 0; count -= sizeof(u32), ++pos) {
+			u32 val;
+			if (get_user(val, in++))
+				return -EFAULT;
+			else if (pos == PT_MSR)
+				regs[pos] = ((regs[pos] &~ MSR_DEBUGCHANGE)
+					     | (val & MSR_DEBUGCHANGE));
+			else
+				regs[pos] = val;
+		}
+	}
+
+	return 0;
 }
-#endif /* CONFIG_PPC32 */
 
 /*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
+ * These are the regset flavors matching the CONFIG_PPC32 native set.
  */
-void ptrace_disable(struct task_struct *child)
-{
-	/* make sure the single step bit is not set. */
-	clear_single_step(child);
-}
-
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
-{
-	int ret = -EPERM;
-
-	switch (request) {
-	/* when I and D space are separate, these will need to be fixed. */
-	case PTRACE_PEEKTEXT: /* read word at location addr. */
-	case PTRACE_PEEKDATA: {
-		unsigned long tmp;
-		int copied;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		ret = -EIO;
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp,(unsigned long __user *) data);
-		break;
-	}
+static const struct utrace_regset ppc32_regsets[] = {
+	{
+		.n = ELF_NGREG,
+		.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
+		.get = ppc32_gpr_get, .set = ppc32_gpr_set
+	},
+	{
+		.n = ELF_NFPREG,
+		.size = sizeof(double), .align = sizeof(double),
+		.get = fpregs_get, .set = fpregs_set
+	},
+#ifdef CONFIG_ALTIVEC
+	{
+		.n = 33*4+1, .size = sizeof(u32), .align = sizeof(u32),
+		.active = vrregs_active, .get = vrregs_get, .set = vrregs_set
+	},
+#endif
+	{
+		.n = 1,
+		.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
+		.get = ppc32_dabr_get, .set = ppc32_dabr_set
+	},
+};
+
+const struct utrace_regset_view utrace_ppc32_view = {
+	.name = "ppc", .e_machine = EM_PPC,
+	.regsets = ppc32_regsets,
+	.n = sizeof ppc32_regsets / sizeof ppc32_regsets[0],
+};
+EXPORT_SYMBOL_GPL(utrace_ppc32_view);
+#endif
 
-	/* read the word at location addr in the USER area. */
-	case PTRACE_PEEKUSR: {
-		unsigned long index, tmp;
 
-		ret = -EIO;
-		/* convert to index and check */
-#ifdef CONFIG_PPC32
-		index = (unsigned long) addr >> 2;
-		if ((addr & 3) || (index > PT_FPSCR)
-		    || (child->thread.regs == NULL))
+#ifdef CONFIG_PTRACE
+static const struct ptrace_layout_segment ppc_uarea[] = {
+	{0, PT_FPR0 * sizeof(long), 0, 0},
+	{PT_FPR0 * sizeof(long), (PT_FPSCR + 1) * sizeof(long), 1, 0},
+	{0, 0, -1, 0}
+};
+
+fastcall int arch_ptrace(long *request, struct task_struct *child,
+			 struct utrace_attached_engine *engine,
+			 unsigned long addr, unsigned long data, long *val)
+{
+	switch (*request) {
+	case PTRACE_PEEKUSR:
+		return ptrace_peekusr(child, engine, ppc_uarea, addr, data);
+	case PTRACE_POKEUSR:
+		return ptrace_pokeusr(child, engine, ppc_uarea, addr, data);
+	case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
+	case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
+		return ptrace_regset_access(child, engine,
+					    utrace_native_view(current), 0,
+					    0, 32 * sizeof(long),
+					    (void __user *)addr,
+					    *request == PPC_PTRACE_SETREGS);
+	case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
+	case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
+		return ptrace_regset_access(child, engine,
+					    utrace_native_view(current), 1,
+					    0, 32 * sizeof(double),
+					    (void __user *)addr,
+					    *request == PPC_PTRACE_SETFPREGS);
+#ifdef CONFIG_PPC64
+	case PTRACE_GET_DEBUGREG:
+	case PTRACE_SET_DEBUGREG:
+		return ptrace_onereg_access(child, engine,
+					    utrace_native_view(current), 3,
+					    addr, (unsigned long __user *)data,
+					    *request == PTRACE_SET_DEBUGREG);
+#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_ALTIVEC
+	case PTRACE_GETVRREGS:
+		return ptrace_whole_regset(child, engine, data, 2, 0);
+	case PTRACE_SETVRREGS:
+		return ptrace_whole_regset(child, engine, data, 2, 1);
+#endif
+#ifdef CONFIG_SPE
+#ifdef CONFIG_ALTIVEC
+#define REGSET_EVR 3
 #else
-		index = (unsigned long) addr >> 3;
-		if ((addr & 7) || (index > PT_FPSCR))
+#define REGSET_EVR 2
 #endif
-			break;
-
-#ifdef CONFIG_PPC32
-		CHECK_FULL_REGS(child->thread.regs);
+	case PTRACE_GETEVRREGS:
+		return ptrace_whole_regset(child, engine, data, REGSET_EVR, 0);
+	case PTRACE_SETEVRREGS:
+		return ptrace_whole_regset(child, engine, data, REGSET_EVR, 1);
 #endif
-		if (index < PT_FPR0) {
-			tmp = get_reg(child, (int) index);
-		} else {
-			flush_fp_to_thread(child);
-			tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
-		}
-		ret = put_user(tmp,(unsigned long __user *) data);
-		break;
 	}
+	return -ENOSYS;
+}
 
-	/* If I and D space are separate, this will have to be fixed. */
-	case PTRACE_POKETEXT: /* write the word at location addr. */
-	case PTRACE_POKEDATA:
-		ret = 0;
-		if (access_process_vm(child, addr, &data, sizeof(data), 1)
-				== sizeof(data))
-			break;
-		ret = -EIO;
-		break;
-
-	/* write the word at location addr in the USER area */
-	case PTRACE_POKEUSR: {
-		unsigned long index;
+#ifdef CONFIG_COMPAT
+#include <linux/mm.h>
+#include <asm/uaccess.h>
 
-		ret = -EIO;
-		/* convert to index and check */
-#ifdef CONFIG_PPC32
-		index = (unsigned long) addr >> 2;
-		if ((addr & 3) || (index > PT_FPSCR)
-		    || (child->thread.regs == NULL))
-#else
-		index = (unsigned long) addr >> 3;
-		if ((addr & 7) || (index > PT_FPSCR))
+static const struct ptrace_layout_segment ppc32_uarea[] = {
+	{0, PT_FPR0 * sizeof(u32), 0, 0},
+	{PT_FPR0 * sizeof(u32), (PT_FPSCR32 + 1) * sizeof(u32), 1, 0},
+	{0, 0, -1, 0}
+};
+
+fastcall int arch_compat_ptrace(compat_long_t *request,
+				struct task_struct *child,
+				struct utrace_attached_engine *engine,
+				compat_ulong_t addr, compat_ulong_t data,
+				compat_long_t *val)
+{
+	void __user *uaddr = (void __user *) (unsigned long) addr;
+	int ret = -ENOSYS;
+
+	switch (*request) {
+	case PTRACE_PEEKUSR:
+		return ptrace_compat_peekusr(child, engine, ppc32_uarea,
+					     addr, data);
+	case PTRACE_POKEUSR:
+		return ptrace_compat_pokeusr(child, engine, ppc32_uarea,
+					     addr, data);
+
+	case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
+	case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
+		return ptrace_regset_access(child, engine,
+					    utrace_native_view(current), 0,
+					    0, 32 * sizeof(compat_long_t),
+					    uaddr,
+					    *request == PPC_PTRACE_SETREGS);
+	case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
+	case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
+		return ptrace_regset_access(child, engine,
+					    utrace_native_view(current), 1,
+					    0, 32 * sizeof(double),
+					    uaddr,
+					    *request == PPC_PTRACE_SETFPREGS);
+#ifdef CONFIG_ALTIVEC
+	case PTRACE_GETVRREGS:
+		return ptrace_whole_regset(child, engine, data, 2, 0);
+	case PTRACE_SETVRREGS:
+		return ptrace_whole_regset(child, engine, data, 2, 1);
 #endif
-			break;
+	case PTRACE_GET_DEBUGREG:
+	case PTRACE_SET_DEBUGREG:
+		return ptrace_onereg_access(child, engine,
+					    utrace_native_view(current), 3,
+					    addr,
+					    (unsigned long __user *)
+					    (unsigned long) data,
+					    *request == PTRACE_SET_DEBUGREG);
 
-#ifdef CONFIG_PPC32
-		CHECK_FULL_REGS(child->thread.regs);
-#endif
-		if (index == PT_ORIG_R3)
-			break;
-		if (index < PT_FPR0) {
-			ret = put_reg(child, index, data);
-		} else {
-			flush_fp_to_thread(child);
-			((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
-			ret = 0;
-		}
-		break;
-	}
+	/*
+	 * Read 4 bytes of the other process' storage
+	 *  data is a pointer specifying where the user wants the
+	 *	4 bytes copied into
+	 *  addr is a pointer in the user's storage that contains an 8 byte
+	 *	address in the other process of the 4 bytes that is to be read
+	 * (this is run in a 32-bit process looking at a 64-bit process)
+	 * when I and D space are separate, these will need to be fixed.
+	 */
+	case PPC_PTRACE_PEEKTEXT_3264:
+	case PPC_PTRACE_PEEKDATA_3264: {
+		u32 tmp;
+		int copied;
+		u32 __user * addrOthers;
 
-	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
-	case PTRACE_CONT: { /* restart after signal. */
 		ret = -EIO;
-		if (!valid_signal(data))
+
+		/* Get the addr in the other process that we want to read */
+		if (get_user(addrOthers, ((u32 __user * __user *)
+					  (unsigned long) addr)) != 0)
 			break;
-		if (request == PTRACE_SYSCALL)
-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		else
-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		child->exit_code = data;
-		/* make sure the single step bit is not set. */
-		clear_single_step(child);
-		wake_up_process(child);
-		ret = 0;
-		break;
-	}
 
-/*
- * make the child exit.  Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
-	case PTRACE_KILL: {
-		ret = 0;
-		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
+		copied = access_process_vm(child, (u64)addrOthers, &tmp,
+				sizeof(tmp), 0);
+		if (copied != sizeof(tmp))
 			break;
-		child->exit_code = SIGKILL;
-		/* make sure the single step bit is not set. */
-		clear_single_step(child);
-		wake_up_process(child);
+		ret = put_user(tmp, (u32 __user *)(unsigned long)data);
 		break;
 	}
 
-	case PTRACE_SINGLESTEP: {  /* set the trap flag. */
+	/*
+	 * Write 4 bytes into the other process' storage
+	 *  data is the 4 bytes that the user wants written
+	 *  addr is a pointer in the user's storage that contains an
+	 *	8 byte address in the other process where the 4 bytes
+	 *	that is to be written
+	 * (this is run in a 32-bit process looking at a 64-bit process)
+	 * when I and D space are separate, these will need to be fixed.
+	 */
+	case PPC_PTRACE_POKETEXT_3264:
+	case PPC_PTRACE_POKEDATA_3264: {
+		u32 tmp = data;
+		u32 __user * addrOthers;
+
+		/* Get the addr in the other process that we want to write into */
 		ret = -EIO;
-		if (!valid_signal(data))
+		if (get_user(addrOthers, ((u32 __user * __user *)
+					  (unsigned long) addr)) != 0)
 			break;
-		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		set_single_step(child);
-		child->exit_code = data;
-		/* give it a chance to run. */
-		wake_up_process(child);
 		ret = 0;
-		break;
-	}
-
-#ifdef CONFIG_PPC64
-	case PTRACE_GET_DEBUGREG: {
-		ret = -EINVAL;
-		/* We only support one DABR and no IABRS at the moment */
-		if (addr > 0)
+		if (access_process_vm(child, (u64)addrOthers, &tmp,
+					sizeof(tmp), 1) == sizeof(tmp))
 			break;
-		ret = put_user(child->thread.dabr,
-			       (unsigned long __user *)data);
+		ret = -EIO;
 		break;
 	}
 
-	case PTRACE_SET_DEBUGREG:
-		ret = ptrace_set_debugreg(child, addr, data);
-		break;
-#endif
+	/*
+	 * This is like PTRACE_PEEKUSR on a 64-bit process,
+	 * but here we access only 4 bytes at a time.
+	 */
+	case PPC_PTRACE_PEEKUSR_3264: {
+		union
+		{
+			u64 whole;
+			u32 half[2];
+		} reg;
+		int setno;
+		const struct utrace_regset *regset;
 
-	case PTRACE_DETACH:
-		ret = ptrace_detach(child, data);
-		break;
+		ret = -EIO;
+		if ((addr & 3) || addr > PT_FPSCR*8)
+			break;
 
-	case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
-		unsigned long __user *tmp = (unsigned long __user *)addr;
-
-		for (i = 0; i < 32; i++) {
-			ret = put_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
+		setno = 0;
+		if (addr >= PT_FPR0*8) {
+			setno = 1;
+			addr -= PT_FPR0*8;
 		}
+		regset = utrace_regset(child, NULL,
+				       &utrace_ppc_native_view, setno);
+		ret = (*regset->get)(child, regset, addr &~ 7,
+				     sizeof(reg.whole), &reg.whole, NULL);
+		if (ret == 0)
+			ret = put_user(reg.half[(addr >> 2) & 1],
+				       (u32 __user *)(unsigned long)data);
 		break;
 	}
 
-	case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
-		unsigned long __user *tmp = (unsigned long __user *)addr;
-
-		for (i = 0; i < 32; i++) {
-			ret = get_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
-		}
-		break;
-	}
+	/*
+	 * This is like PTRACE_POKEUSR on a 64-bit process,
+	 * but here we access only 4 bytes at a time.
+	 */
+	case PPC_PTRACE_POKEUSR_3264: {
+		union
+		{
+			u64 whole;
+			u32 half[2];
+		} reg;
+		int setno;
+		const struct utrace_regset *regset;
 
-	case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
-		unsigned long __user *tmp = (unsigned long __user *)addr;
-
-		flush_fp_to_thread(child);
-
-		for (i = 0; i < 32; i++) {
-			ret = put_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
-		}
-		break;
-	}
+		ret = -EIO;
+		if ((addr & 3) || addr > PT_FPSCR*8)
+			break;
 
-	case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
-		int i;
-		unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
-		unsigned long __user *tmp = (unsigned long __user *)addr;
-
-		flush_fp_to_thread(child);
-
-		for (i = 0; i < 32; i++) {
-			ret = get_user(*reg, tmp);
-			if (ret)
-				break;
-			reg++;
-			tmp++;
+		setno = 0;
+		if (addr >= PT_FPR0*8) {
+			setno = 1;
+			addr -= PT_FPR0*8;
 		}
+		regset = utrace_regset(child, NULL,
+				       &utrace_ppc_native_view, setno);
+		ret = (*regset->get)(child, regset, addr &~ 7,
+				     sizeof(reg.whole), &reg.whole, NULL);
+		BUG_ON(ret);
+		reg.half[(addr >> 2) & 1] = data;
+		ret = (*regset->set)(child, regset, addr &~ 7,
+				     sizeof(reg.whole), &reg.whole, NULL);
 		break;
 	}
-
-#ifdef CONFIG_ALTIVEC
-	case PTRACE_GETVRREGS:
-		/* Get the child altivec register state. */
-		flush_altivec_to_thread(child);
-		ret = get_vrregs((unsigned long __user *)data, child);
-		break;
-
-	case PTRACE_SETVRREGS:
-		/* Set the child altivec register state. */
-		flush_altivec_to_thread(child);
-		ret = set_vrregs(child, (unsigned long __user *)data);
-		break;
-#endif
-#ifdef CONFIG_SPE
-	case PTRACE_GETEVRREGS:
-		/* Get the child spe register state. */
-		if (child->thread.regs->msr & MSR_SPE)
-			giveup_spe(child);
-		ret = get_evrregs((unsigned long __user *)data, child);
-		break;
-
-	case PTRACE_SETEVRREGS:
-		/* Set the child spe register state. */
-		/* this is to clear the MSR_SPE bit to force a reload
-		 * of register state from memory */
-		if (child->thread.regs->msr & MSR_SPE)
-			giveup_spe(child);
-		ret = set_evrregs(child, (unsigned long __user *)data);
-		break;
-#endif
-
-	default:
-		ret = ptrace_request(child, request, addr, data);
-		break;
 	}
-
 	return ret;
 }
+#endif	/* CONFIG_COMPAT */
+#endif	/* CONFIG_PTRACE */
 
-static void do_syscall_trace(void)
-{
-	/* the 0x80 provides a way for the tracing parent to distinguish
-	   between a syscall stop and SIGTRAP delivery */
-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
-				 ? 0x80 : 0));
-
-	/*
-	 * this isn't the same as continuing with a signal, but it will do
-	 * for normal use.  strace only continues with a signal if the
-	 * stopping signal is not SIGTRAP.  -brl
-	 */
-	if (current->exit_code) {
-		send_sig(current->exit_code, current, 1);
-		current->exit_code = 0;
-	}
-}
 
 void do_syscall_trace_enter(struct pt_regs *regs)
 {
@@ -530,9 +713,8 @@ void do_syscall_trace_enter(struct pt_re
 	secure_computing(regs->gpr[0]);
 #endif
 
-	if (test_thread_flag(TIF_SYSCALL_TRACE)
-	    && (current->ptrace & PT_PTRACED))
-		do_syscall_trace();
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall(regs, 0);
 
 	if (unlikely(current->audit_context))
 		audit_syscall_entry(
@@ -556,10 +738,13 @@ void do_syscall_trace_leave(struct pt_re
 		audit_syscall_exit((regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
 				   regs->result);
 
-	if ((test_thread_flag(TIF_SYSCALL_TRACE)
-	     || test_thread_flag(TIF_SINGLESTEP))
-	    && (current->ptrace & PT_PTRACED))
-		do_syscall_trace();
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall(regs, 1);
+
+	if (test_thread_flag(TIF_SINGLESTEP)) {
+		force_sig(SIGTRAP, current); /* XXX */
+		tracehook_report_syscall_step(regs);
+	}
 }
 
 #ifdef CONFIG_PPC32
--- linux-2.6/arch/powerpc/kernel/Makefile.utrace-ptrace-compat
+++ linux-2.6/arch/powerpc/kernel/Makefile
@@ -10,12 +10,14 @@ CFLAGS_prom_init.o      += -fPIC
 CFLAGS_btext.o		+= -fPIC
 endif
 
+CFLAGS_ptrace.o		+= -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
 obj-y				:= semaphore.o cputable.o ptrace.o syscalls.o \
 				   irq.o align.o signal_32.o pmc.o vdso.o \
 				   init_task.o process.o systbl.o idle.o
 obj-y				+= vdso32/
 obj-$(CONFIG_PPC64)		+= setup_64.o binfmt_elf32.o sys_ppc32.o \
-				   signal_64.o ptrace32.o \
+				   signal_64.o \
 				   paca.o cpu_setup_power4.o \
 				   firmware.o sysfs.o
 obj-$(CONFIG_PPC64)		+= vdso64/
--- linux-2.6/arch/powerpc/kernel/asm-offsets.c.utrace-ptrace-compat
+++ linux-2.6/arch/powerpc/kernel/asm-offsets.c
@@ -58,7 +58,6 @@ int main(void)
 	DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
 #else
 	DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
-	DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
 #endif /* CONFIG_PPC64 */
 
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
@@ -79,7 +78,6 @@ int main(void)
 	DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 	DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
-	DEFINE(PT_PTRACED, PT_PTRACED);
 #endif
 #ifdef CONFIG_SPE
 	DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
--- linux-2.6/arch/sparc64/kernel/signal.c.utrace-ptrace-compat
+++ linux-2.6/arch/sparc64/kernel/signal.c
@@ -23,6 +23,7 @@
 #include <linux/smp_lock.h>
 #include <linux/binfmts.h>
 #include <linux/bitops.h>
+#include <linux/tracehook.h>
 
 #include <asm/uaccess.h>
 #include <asm/ptrace.h>
@@ -491,6 +492,7 @@ static inline void handle_signal(unsigne
 		sigaddset(&current->blocked,signr);
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
+	tracehook_report_handle_signal(signr, ka, oldset, regs);
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
--- linux-2.6/arch/sparc64/kernel/systbls.S.utrace-ptrace-compat
+++ linux-2.6/arch/sparc64/kernel/systbls.S
@@ -24,7 +24,7 @@ sys_call_table32:
 /*10*/  .word sys_unlink, sunos_execv, sys_chdir, sys32_chown16, sys32_mknod
 /*15*/	.word sys_chmod, sys32_lchown16, sparc_brk, sys32_perfctr, sys32_lseek
 /*20*/	.word sys_getpid, sys_capget, sys_capset, sys32_setuid16, sys32_getuid16
-/*25*/	.word sys32_vmsplice, sys_ptrace, sys_alarm, sys32_sigaltstack, sys32_pause
+/*25*/	.word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys32_pause
 /*30*/	.word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
 	.word sys_chown, sys_sync, sys32_kill, compat_sys_newstat, sys32_sendfile
 /*40*/	.word compat_sys_newlstat, sys_dup, sys_pipe, compat_sys_times, sys_getuid
@@ -166,7 +166,7 @@ sunos_sys_table:
 	.word sys_chmod, sys32_lchown16, sunos_brk
 	.word sunos_nosys, sys32_lseek, sunos_getpid
 	.word sunos_nosys, sunos_nosys, sunos_nosys
-	.word sunos_getuid, sunos_nosys, sys_ptrace
+	.word sunos_getuid, sunos_nosys, compat_sys_ptrace
 	.word sunos_nosys, sunos_nosys, sunos_nosys
 	.word sunos_nosys, sunos_nosys, sunos_nosys
 	.word sys_access, sunos_nosys, sunos_nosys
--- linux-2.6/arch/sparc64/kernel/signal32.c.utrace-ptrace-compat
+++ linux-2.6/arch/sparc64/kernel/signal32.c
@@ -21,6 +21,7 @@
 #include <linux/binfmts.h>
 #include <linux/compat.h>
 #include <linux/bitops.h>
+#include <linux/tracehook.h>
 
 #include <asm/uaccess.h>
 #include <asm/ptrace.h>
@@ -1236,6 +1237,7 @@ static inline void handle_signal32(unsig
 		sigaddset(&current->blocked,signr);
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
+	tracehook_report_handle_signal(signr, ka, oldset, regs);
 }
 
 static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
--- linux-2.6/arch/sparc64/kernel/sys_sparc32.c.utrace-ptrace-compat
+++ linux-2.6/arch/sparc64/kernel/sys_sparc32.c
@@ -740,9 +740,6 @@ asmlinkage long sparc32_execve(struct pt
 		current_thread_info()->xfsr[0] = 0;
 		current_thread_info()->fpsaved[0] = 0;
 		regs->tstate &= ~TSTATE_PEF;
-		task_lock(current);
-		current->ptrace &= ~PT_DTRACE;
-		task_unlock(current);
 	}
 out:
 	return error;
--- linux-2.6/arch/sparc64/kernel/process.c.utrace-ptrace-compat
+++ linux-2.6/arch/sparc64/kernel/process.c
@@ -808,9 +808,6 @@ asmlinkage int sparc_execve(struct pt_re
 		current_thread_info()->xfsr[0] = 0;
 		current_thread_info()->fpsaved[0] = 0;
 		regs->tstate &= ~TSTATE_PEF;
-		task_lock(current);
-		current->ptrace &= ~PT_DTRACE;
-		task_unlock(current);
 	}
 out:
 	return error;
--- linux-2.6/arch/sparc64/kernel/ptrace.c.utrace-ptrace-compat
+++ linux-2.6/arch/sparc64/kernel/ptrace.c
@@ -1,6 +1,6 @@
-/* ptrace.c: Sparc process tracing support.
+/* ptrace.c: Sparc64 process tracing support.
  *
- * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1996, 2006 David S. Miller (davem@davemloft.net)
  * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  *
  * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
@@ -11,103 +11,597 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/security.h>
 #include <linux/seccomp.h>
 #include <linux/audit.h>
-#include <linux/signal.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+#include <linux/ptrace.h>
 
 #include <asm/asi.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/psrcompat.h>
-#include <asm/visasm.h>
 #include <asm/spitfire.h>
 #include <asm/page.h>
 #include <asm/cpudata.h>
+#include <asm/psrcompat.h>
+
+#define GENREG_G0	0
+#define GENREG_O0	8
+#define GENREG_L0	16
+#define GENREG_I0	24
+#define GENREG_TSTATE	32
+#define GENREG_TPC	33
+#define GENREG_TNPC	34
+#define GENREG_Y	35
+
+#define SPARC64_NGREGS	36
+
+static int genregs_get(struct task_struct *target,
+		       const struct utrace_regset *regset,
+		       unsigned int pos, unsigned int count,
+		       void *kbuf, void __user *ubuf)
+{
+	struct pt_regs *regs = task_pt_regs(target);
+	int err;
+
+	err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, regs->u_regs,
+				    GENREG_G0 * 8, GENREG_L0 * 8);
 
-/* Returning from ptrace is a bit tricky because the syscall return
- * low level code assumes any value returned which is negative and
- * is a valid errno will mean setting the condition codes to indicate
- * an error return.  This doesn't work, so we have this hook.
+	if (err == 0 && count > 0 && pos < (GENREG_TSTATE * 8)) {
+		struct thread_info *t = task_thread_info(target);
+		unsigned long rwindow[16], fp, *win;
+		int wsaved;
+
+		if (target == current)
+			flushw_user();
+
+		wsaved = __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_WSAVED];
+		fp = regs->u_regs[UREG_FP] + STACK_BIAS;
+		if (wsaved && t->rwbuf_stkptrs[wsaved - 1] == fp)
+			win = &t->reg_window[wsaved - 1].locals[0];
+		else {
+			if (target == current) {
+				if (copy_from_user(rwindow,
+						   (void __user *) fp,
+						   16 * sizeof(long)))
+					err = -EFAULT;
+			} else
+				err = access_process_vm(target, fp, rwindow,
+							16 * sizeof(long), 0);
+			if (err)
+				return err;
+			win = rwindow;
+		}
+
+		err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    win, GENREG_L0 * 8,
+					    GENREG_TSTATE * 8);
+	}
+
+	if (err == 0)
+		err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    &regs->tstate, GENREG_TSTATE * 8,
+					    GENREG_Y * 8);
+	if (err == 0 && count > 0) {
+		if (kbuf)
+			*(unsigned long *) kbuf = regs->y;
+		else if (put_user(regs->y, (unsigned long __user *) ubuf))
+			return -EFAULT;
+	}
+
+	return err;
+}
+
+/* Consistent with signal handling, we only allow userspace to
+ * modify the %asi, %icc, and %xcc fields of the %tstate register.
  */
-static inline void pt_error_return(struct pt_regs *regs, unsigned long error)
+#define TSTATE_DEBUGCHANGE	(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)
+
+static int genregs_set(struct task_struct *target,
+		       const struct utrace_regset *regset,
+		       unsigned int pos, unsigned int count,
+		       const void *kbuf, const void __user *ubuf)
 {
-	regs->u_regs[UREG_I0] = error;
-	regs->tstate |= (TSTATE_ICARRY | TSTATE_XCARRY);
-	regs->tpc = regs->tnpc;
-	regs->tnpc += 4;
+	struct pt_regs *regs = task_pt_regs(target);
+	unsigned long tstate_save;
+	int err;
+
+	err = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, regs->u_regs,
+				   GENREG_G0 * 8, GENREG_L0 * 8);
+
+	if (err == 0 && count > 0 && pos < (GENREG_TSTATE * 8)) {
+		unsigned long fp = regs->u_regs[UREG_FP] + STACK_BIAS;
+		unsigned long rwindow[16], *winbuf;
+		unsigned int copy = (GENREG_TSTATE * 8) - pos;
+		unsigned int off;
+		int err;
+
+		if (target == current)
+			flushw_user();
+
+		if (count < copy)
+			copy = count;
+		off = pos - (GENREG_L0 * 8);
+
+		if (kbuf) {
+			winbuf = (unsigned long *) kbuf;
+			kbuf += copy;
+		}
+		else {
+			winbuf = rwindow;
+			if (copy_from_user(winbuf, ubuf, copy))
+				return -EFAULT;
+			ubuf += copy;
+		}
+		count -= copy;
+		pos += copy;
+
+		if (target == current)
+			err = copy_to_user((void __user *) fp + off,
+					   winbuf, copy);
+		else
+			err = access_process_vm(target, fp + off,
+						winbuf, copy, 1);
+	}
+
+	tstate_save = regs->tstate &~ TSTATE_DEBUGCHANGE;
+
+	if (err == 0)
+		err = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					    &regs->tstate, GENREG_TSTATE * 8,
+					    GENREG_Y * 8);
+
+	regs->tstate &= TSTATE_DEBUGCHANGE;
+	regs->tstate |= tstate_save;
+
+	if (err == 0 && count > 0) {
+		if (kbuf)
+			regs->y = *(unsigned long *) kbuf;
+		else if (get_user(regs->y, (unsigned long __user *) ubuf))
+			return -EFAULT;
+	}
+
+	return err;
 }
 
-static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
+#define FPREG_F0	0
+#define FPREG_FSR	32
+#define FPREG_GSR	33
+#define FPREG_FPRS	34
+
+#define SPARC64_NFPREGS	35
+
+static int fpregs_get(struct task_struct *target,
+		      const struct utrace_regset *regset,
+		      unsigned int pos, unsigned int count,
+		      void *kbuf, void __user *ubuf)
 {
-	regs->u_regs[UREG_I0] = value;
-	regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
-	regs->tpc = regs->tnpc;
-	regs->tnpc += 4;
+	struct thread_info *t = task_thread_info(target);
+	int err;
+
+	err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				    t->fpregs, FPREG_F0 * 8, FPREG_FSR * 8);
+
+	if (err == 0)
+		err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    &t->xfsr[0], FPREG_FSR * 8,
+					    FPREG_GSR * 8);
+
+	if (err == 0)
+		err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    &t->gsr[0], FPREG_GSR * 8,
+					    FPREG_FPRS * 8);
+
+	if (err == 0 && count > 0) {
+		struct pt_regs *regs = task_pt_regs(target);
+
+		if (kbuf)
+			*(unsigned long *) kbuf = regs->fprs;
+		else if (put_user(regs->fprs, (unsigned long __user *) ubuf))
+			return -EFAULT;
+	}
+
+	return err;
 }
 
-static inline void
-pt_succ_return_linux(struct pt_regs *regs, unsigned long value, void __user *addr)
+static int fpregs_set(struct task_struct *target,
+		      const struct utrace_regset *regset,
+		      unsigned int pos, unsigned int count,
+		      const void *kbuf, const void __user *ubuf)
 {
-	if (test_thread_flag(TIF_32BIT)) {
-		if (put_user(value, (unsigned int __user *) addr)) {
-			pt_error_return(regs, EFAULT);
-			return;
-		}
-	} else {
-		if (put_user(value, (long __user *) addr)) {
-			pt_error_return(regs, EFAULT);
-			return;
-		}
+	struct thread_info *t = task_thread_info(target);
+	int err;
+
+	err = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				   t->fpregs, FPREG_F0 * 8, FPREG_FSR * 8);
+
+	if (err == 0)
+		err = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					   &t->xfsr[0], FPREG_FSR * 8,
+					   FPREG_GSR * 8);
+
+	if (err == 0)
+		err = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					   &t->gsr[0], FPREG_GSR * 8,
+					   FPREG_FPRS * 8);
+
+	if (err == 0 && count > 0) {
+		struct pt_regs *regs = task_pt_regs(target);
+
+		if (kbuf)
+			regs->fprs = *(unsigned long *) kbuf;
+		else if (get_user(regs->fprs, (unsigned long __user *) ubuf))
+			return -EFAULT;
 	}
-	regs->u_regs[UREG_I0] = 0;
-	regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
-	regs->tpc = regs->tnpc;
-	regs->tnpc += 4;
-}
-
-static void
-pt_os_succ_return (struct pt_regs *regs, unsigned long val, void __user *addr)
-{
-	if (current->personality == PER_SUNOS)
-		pt_succ_return (regs, val);
-	else
-		pt_succ_return_linux (regs, val, addr);
-}
-
-/* #define ALLOW_INIT_TRACING */
-/* #define DEBUG_PTRACE */
-
-#ifdef DEBUG_PTRACE
-char *pt_rq [] = {
-	/* 0  */ "TRACEME", "PEEKTEXT", "PEEKDATA", "PEEKUSR",
-	/* 4  */ "POKETEXT", "POKEDATA", "POKEUSR", "CONT",
-	/* 8  */ "KILL", "SINGLESTEP", "SUNATTACH", "SUNDETACH",
-	/* 12 */ "GETREGS", "SETREGS", "GETFPREGS", "SETFPREGS",
-	/* 16 */ "READDATA", "WRITEDATA", "READTEXT", "WRITETEXT",
-	/* 20 */ "GETFPAREGS", "SETFPAREGS", "unknown", "unknown",
-	/* 24 */ "SYSCALL", ""
+
+	return err;
+}
+
+static const struct utrace_regset native_regsets[] = {
+	{
+		.n = SPARC64_NGREGS,
+		.size = sizeof(long), .align = sizeof(long),
+		.get = genregs_get, .set = genregs_set
+	},
+	{
+		.n = SPARC64_NFPREGS,
+		.size = sizeof(long), .align = sizeof(long),
+		.get = fpregs_get, .set = fpregs_set
+	},
 };
-#endif
 
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
- */
-void ptrace_disable(struct task_struct *child)
+const struct utrace_regset_view utrace_sparc64_native_view = {
+	.name = UTS_MACHINE, .e_machine = ELF_ARCH,
+	.regsets = native_regsets,
+	.n = sizeof native_regsets / sizeof native_regsets[0],
+};
+EXPORT_SYMBOL_GPL(utrace_sparc64_native_view);
+
+#ifdef CONFIG_COMPAT
+
+#define GENREG32_G0	0
+#define GENREG32_O0	8
+#define GENREG32_L0	16
+#define GENREG32_I0	24
+#define GENREG32_PSR	32
+#define GENREG32_PC	33
+#define GENREG32_NPC	34
+#define GENREG32_Y	35
+#define GENREG32_WIM	36
+#define GENREG32_TBR	37
+
+#define SPARC32_NGREGS	38
+
+static int genregs32_get(struct task_struct *target,
+			 const struct utrace_regset *regset,
+			 unsigned int pos, unsigned int count,
+			 void *kbuf, void __user *ubuf)
 {
-	/* nothing to do */
+	struct pt_regs *regs = task_pt_regs(target);
+
+	while (count > 0 && pos < (GENREG32_L0 * 4)) {
+		u32 val = regs->u_regs[(pos - (GENREG32_G0*4))/sizeof(u32)];
+		if (kbuf) {
+			*(u32 *) kbuf = val;
+			kbuf += sizeof(u32);
+		} else if (put_user(val, (u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+	}
+
+	if (count > 0 && pos < (GENREG32_PSR * 4)) {
+		struct thread_info *t = task_thread_info(target);
+		unsigned long fp;
+		u32 rwindow[16];
+		int wsaved;
+
+		if (target == current)
+			flushw_user();
+
+		wsaved = __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_WSAVED];
+		fp = regs->u_regs[UREG_FP] & 0xffffffffUL;
+		if (wsaved && t->rwbuf_stkptrs[wsaved - 1] == fp) {
+			int i;
+			for (i = 0; i < 8; i++)
+				rwindow[i + 0] =
+					t->reg_window[wsaved-1].locals[i];
+			for (i = 0; i < 8; i++)
+				rwindow[i + 8] =
+					t->reg_window[wsaved-1].ins[i];
+		} else {
+			int err;
+
+			if (target == current) {
+				err = 0;
+				if (copy_from_user(rwindow, (void __user *) fp,
+						   16 * sizeof(u32)))
+					err = -EFAULT;
+			} else
+				err = access_process_vm(target, fp, rwindow,
+							16 * sizeof(u32), 0);
+			if (err)
+				return err;
+		}
+
+		while (count > 0 && pos < (GENREG32_PSR * 4)) {
+			u32 val = rwindow[(pos - (GENREG32_L0*4))/sizeof(u32)];
+
+			if (kbuf) {
+				*(u32 *) kbuf = val;
+				kbuf += sizeof(u32);
+			} else if (put_user(val, (u32 __user *) ubuf))
+				return -EFAULT;
+			else
+				ubuf += sizeof(u32);
+			pos += sizeof(u32);
+			count -= sizeof(u32);
+		}
+	}
+
+	if (count > 0 && pos == (GENREG32_PSR * 4)) {
+		u32 psr = tstate_to_psr(regs->tstate);
+
+		if (kbuf) {
+			*(u32 *) kbuf = psr;
+			kbuf += sizeof(u32);
+		} else if (put_user(psr, (u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+	}
+
+	if (count > 0 && pos == (GENREG32_PC * 4)) {
+		u32 val = regs->tpc;
+
+		if (kbuf) {
+			*(u32 *) kbuf = val;
+			kbuf += sizeof(u32);
+		} else if (put_user(val, (u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+	}
+
+	if (count > 0 && pos == (GENREG32_NPC * 4)) {
+		u32 val = regs->tnpc;
+
+		if (kbuf) {
+			*(u32 *) kbuf = val;
+			kbuf += sizeof(u32);
+		} else if (put_user(val, (u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+	}
+
+	if (count > 0 && pos == (GENREG32_Y * 4)) {
+		if (kbuf) {
+			*(u32 *) kbuf = regs->y;
+			kbuf += sizeof(u32);
+		} else if (put_user(regs->y, (u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+	}
+
+	if (count > 0) {
+		if (kbuf)
+			memset(kbuf, 0, count);
+		else if (clear_user(ubuf, count))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int genregs32_set(struct task_struct *target,
+			 const struct utrace_regset *regset,
+			 unsigned int pos, unsigned int count,
+			 const void *kbuf, const void __user *ubuf)
+{
+	struct pt_regs *regs = task_pt_regs(target);
+
+	while (count > 0 && pos < (GENREG32_L0 * 4)) {
+		unsigned long *loc;
+		loc = &regs->u_regs[(pos - (GENREG32_G0*4))/sizeof(u32)];
+		if (kbuf) {
+			*loc = *(u32 *) kbuf;
+			kbuf += sizeof(u32);
+		} else if (get_user(*loc, (u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+	}
+
+	if (count > 0 && pos < (GENREG32_PSR * 4)) {
+		unsigned long fp;
+		u32 regbuf[16];
+		unsigned int off, copy;
+		int err;
+
+		if (target == current)
+			flushw_user();
+
+		copy = (GENREG32_PSR * 4) - pos;
+		if (count < copy)
+			copy = count;
+		BUG_ON(copy > 16 * sizeof(u32));
+
+		fp = regs->u_regs[UREG_FP] & 0xffffffffUL;
+		off = pos - (GENREG32_L0 * 4);
+		if (kbuf) {
+			memcpy(regbuf, kbuf, copy);
+			kbuf += copy;
+		} else if (copy_from_user(regbuf, ubuf, copy))
+			return -EFAULT;
+		else
+			ubuf += copy;
+		pos += copy;
+		count -= copy;
+
+		if (target == current) {
+			err = 0;
+			if (copy_to_user((void __user *) fp + off,
+					 regbuf, count))
+				err = -EFAULT;
+		} else
+			err = access_process_vm(target, fp + off,
+						regbuf, count, 1);
+		if (err)
+			return err;
+	}
+
+	if (count > 0 && pos == (GENREG32_PSR * 4)) {
+		unsigned long tstate, tstate_save;
+		u32 psr;
+
+		tstate_save = regs->tstate&~(TSTATE_ICC|TSTATE_XCC);
+
+		if (kbuf) {
+			psr = *(u32 *) kbuf;
+			kbuf += sizeof(u32);
+		} else if (get_user(psr, (u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+
+		tstate = psr_to_tstate_icc(psr);
+		regs->tstate = tstate_save | tstate;
+	}
+
+	if (count > 0 && pos == (GENREG32_PC * 4)) {
+		if (kbuf) {
+			regs->tpc = *(u32 *) kbuf;
+			kbuf += sizeof(u32);
+		} else if (get_user(regs->tpc, (u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+	}
+
+	if (count > 0 && pos == (GENREG32_NPC * 4)) {
+		if (kbuf) {
+			regs->tnpc = *(u32 *) kbuf;
+			kbuf += sizeof(u32);
+		} else if (get_user(regs->tnpc, (u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+	}
+
+	if (count > 0 && pos == (GENREG32_Y * 4)) {
+		if (kbuf) {
+			regs->y = *(u32 *) kbuf;
+			kbuf += sizeof(u32);
+		} else if (get_user(regs->y, (u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+	}
+
+	/* Ignore WIM and TBR */
+
+	return 0;
 }
 
+#define FPREG32_F0	0
+#define FPREG32_FSR	32
+
+#define SPARC32_NFPREGS	33
+
+static int fpregs32_get(struct task_struct *target,
+			const struct utrace_regset *regset,
+			unsigned int pos, unsigned int count,
+			void *kbuf, void __user *ubuf)
+{
+	struct thread_info *t = task_thread_info(target);
+	int err;
+
+	err = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				    t->fpregs, FPREG32_F0 * 4,
+				    FPREG32_FSR * 4);
+
+	if (err == 0 && count > 0) {
+		if (kbuf) {
+			*(u32 *) kbuf = t->xfsr[0];
+		} else if (put_user(t->xfsr[0], (u32 __user *) ubuf))
+			return -EFAULT;
+	}
+
+	return err;
+}
+
+static int fpregs32_set(struct task_struct *target,
+			const struct utrace_regset *regset,
+			unsigned int pos, unsigned int count,
+			const void *kbuf, const void __user *ubuf)
+{
+	struct thread_info *t = task_thread_info(target);
+	int err;
+
+	err = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				   t->fpregs, FPREG32_F0 * 4,
+				   FPREG32_FSR * 4);
+
+	if (err == 0 && count > 0) {
+		u32 fsr;
+		if (kbuf) {
+			fsr = *(u32 *) kbuf;
+		} else if (get_user(fsr, (u32 __user *) ubuf))
+			return -EFAULT;
+		t->xfsr[0] = (t->xfsr[0] & 0xffffffff00000000UL) | fsr;
+	}
+
+	return 0;
+}
+
+static const struct utrace_regset sparc32_regsets[] = {
+	{
+		.n = SPARC32_NGREGS,
+		.size = sizeof(u32), .align = sizeof(u32),
+		.get = genregs32_get, .set = genregs32_set
+	},
+	{
+		.n = SPARC32_NFPREGS,
+		.size = sizeof(u32), .align = sizeof(u32),
+		.get = fpregs32_get, .set = fpregs32_set
+	},
+};
+
+const struct utrace_regset_view utrace_sparc32_view = {
+	.name = "sparc", .e_machine = EM_SPARC,
+	.regsets = sparc32_regsets,
+	.n = sizeof sparc32_regsets / sizeof sparc32_regsets[0],
+};
+EXPORT_SYMBOL_GPL(utrace_sparc32_view);
+
+#endif	/* CONFIG_COMPAT */
+
 /* To get the necessary page struct, access_process_vm() first calls
  * get_user_pages().  This has done a flush_dcache_page() on the
  * accessed page.  Then our caller (copy_{to,from}_user_page()) did
@@ -167,484 +661,124 @@ void flush_ptrace_access(struct vm_area_
 	}
 }
 
-asmlinkage void do_ptrace(struct pt_regs *regs)
-{
-	int request = regs->u_regs[UREG_I0];
-	pid_t pid = regs->u_regs[UREG_I1];
-	unsigned long addr = regs->u_regs[UREG_I2];
-	unsigned long data = regs->u_regs[UREG_I3];
-	unsigned long addr2 = regs->u_regs[UREG_I4];
-	struct task_struct *child;
-	int ret;
-
-	if (test_thread_flag(TIF_32BIT)) {
-		addr &= 0xffffffffUL;
-		data &= 0xffffffffUL;
-		addr2 &= 0xffffffffUL;
-	}
-	lock_kernel();
-#ifdef DEBUG_PTRACE
-	{
-		char *s;
-
-		if ((request >= 0) && (request <= 24))
-			s = pt_rq [request];
-		else
-			s = "unknown";
-
-		if (request == PTRACE_POKEDATA && data == 0x91d02001){
-			printk ("do_ptrace: breakpoint pid=%d, addr=%016lx addr2=%016lx\n",
-				pid, addr, addr2);
-		} else 
-			printk("do_ptrace: rq=%s(%d) pid=%d addr=%016lx data=%016lx addr2=%016lx\n",
-			       s, request, pid, addr, data, addr2);
-	}
-#endif
-	if (request == PTRACE_TRACEME) {
-		ret = ptrace_traceme();
-		pt_succ_return(regs, 0);
-		goto out;
-	}
-
-	child = ptrace_get_task_struct(pid);
-	if (IS_ERR(child)) {
-		ret = PTR_ERR(child);
-		pt_error_return(regs, -ret);
-		goto out;
-	}
-
-	if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
-	    || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
-		if (ptrace_attach(child)) {
-			pt_error_return(regs, EPERM);
-			goto out_tsk;
-		}
-		pt_succ_return(regs, 0);
-		goto out_tsk;
-	}
-
-	ret = ptrace_check_attach(child, request == PTRACE_KILL);
-	if (ret < 0) {
-		pt_error_return(regs, -ret);
-		goto out_tsk;
-	}
-
-	if (!(test_thread_flag(TIF_32BIT))	&&
-	    ((request == PTRACE_READDATA64)		||
-	     (request == PTRACE_WRITEDATA64)		||
-	     (request == PTRACE_READTEXT64)		||
-	     (request == PTRACE_WRITETEXT64)		||
-	     (request == PTRACE_PEEKTEXT64)		||
-	     (request == PTRACE_POKETEXT64)		||
-	     (request == PTRACE_PEEKDATA64)		||
-	     (request == PTRACE_POKEDATA64))) {
-		addr = regs->u_regs[UREG_G2];
-		addr2 = regs->u_regs[UREG_G3];
-		request -= 30; /* wheee... */
-	}
-
-	switch(request) {
-	case PTRACE_PEEKUSR:
-		if (addr != 0)
-			pt_error_return(regs, EIO);
-		else
-			pt_succ_return(regs, 0);
-		goto out_tsk;
-
-	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
-	case PTRACE_PEEKDATA: {
-		unsigned long tmp64;
-		unsigned int tmp32;
-		int res, copied;
-
-		res = -EIO;
-		if (test_thread_flag(TIF_32BIT)) {
-			copied = access_process_vm(child, addr,
-						   &tmp32, sizeof(tmp32), 0);
-			tmp64 = (unsigned long) tmp32;
-			if (copied == sizeof(tmp32))
-				res = 0;
-		} else {
-			copied = access_process_vm(child, addr,
-						   &tmp64, sizeof(tmp64), 0);
-			if (copied == sizeof(tmp64))
-				res = 0;
-		}
-		if (res < 0)
-			pt_error_return(regs, -res);
-		else
-			pt_os_succ_return(regs, tmp64, (void __user *) data);
-		goto out_tsk;
-	}
-
-	case PTRACE_POKETEXT: /* write the word at location addr. */
-	case PTRACE_POKEDATA: {
-		unsigned long tmp64;
-		unsigned int tmp32;
-		int copied, res = -EIO;
-
-		if (test_thread_flag(TIF_32BIT)) {
-			tmp32 = data;
-			copied = access_process_vm(child, addr,
-						   &tmp32, sizeof(tmp32), 1);
-			if (copied == sizeof(tmp32))
-				res = 0;
-		} else {
-			tmp64 = data;
-			copied = access_process_vm(child, addr,
-						   &tmp64, sizeof(tmp64), 1);
-			if (copied == sizeof(tmp64))
-				res = 0;
-		}
-		if (res < 0)
-			pt_error_return(regs, -res);
-		else
-			pt_succ_return(regs, res);
-		goto out_tsk;
-	}
-
-	case PTRACE_GETREGS: {
-		struct pt_regs32 __user *pregs =
-			(struct pt_regs32 __user *) addr;
-		struct pt_regs *cregs = task_pt_regs(child);
-		int rval;
-
-		if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
-		    __put_user(cregs->tpc, (&pregs->pc)) ||
-		    __put_user(cregs->tnpc, (&pregs->npc)) ||
-		    __put_user(cregs->y, (&pregs->y))) {
-			pt_error_return(regs, EFAULT);
-			goto out_tsk;
-		}
-		for (rval = 1; rval < 16; rval++)
-			if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
-				pt_error_return(regs, EFAULT);
-				goto out_tsk;
-			}
-		pt_succ_return(regs, 0);
-#ifdef DEBUG_PTRACE
-		printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
-#endif
-		goto out_tsk;
-	}
-
-	case PTRACE_GETREGS64: {
-		struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-		struct pt_regs *cregs = task_pt_regs(child);
-		unsigned long tpc = cregs->tpc;
-		int rval;
-
-		if ((task_thread_info(child)->flags & _TIF_32BIT) != 0)
-			tpc &= 0xffffffff;
-		if (__put_user(cregs->tstate, (&pregs->tstate)) ||
-		    __put_user(tpc, (&pregs->tpc)) ||
-		    __put_user(cregs->tnpc, (&pregs->tnpc)) ||
-		    __put_user(cregs->y, (&pregs->y))) {
-			pt_error_return(regs, EFAULT);
-			goto out_tsk;
-		}
-		for (rval = 1; rval < 16; rval++)
-			if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
-				pt_error_return(regs, EFAULT);
-				goto out_tsk;
-			}
-		pt_succ_return(regs, 0);
-#ifdef DEBUG_PTRACE
-		printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
-#endif
-		goto out_tsk;
-	}
-
-	case PTRACE_SETREGS: {
-		struct pt_regs32 __user *pregs =
-			(struct pt_regs32 __user *) addr;
-		struct pt_regs *cregs = task_pt_regs(child);
-		unsigned int psr, pc, npc, y;
-		int i;
-
-		/* Must be careful, tracing process can only set certain
-		 * bits in the psr.
-		 */
-		if (__get_user(psr, (&pregs->psr)) ||
-		    __get_user(pc, (&pregs->pc)) ||
-		    __get_user(npc, (&pregs->npc)) ||
-		    __get_user(y, (&pregs->y))) {
-			pt_error_return(regs, EFAULT);
-			goto out_tsk;
-		}
-		cregs->tstate &= ~(TSTATE_ICC);
-		cregs->tstate |= psr_to_tstate_icc(psr);
-               	if (!((pc | npc) & 3)) {
-			cregs->tpc = pc;
-			cregs->tnpc = npc;
-		}
-		cregs->y = y;
-		for (i = 1; i < 16; i++) {
-			if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
-				pt_error_return(regs, EFAULT);
-				goto out_tsk;
-			}
-		}
-		pt_succ_return(regs, 0);
-		goto out_tsk;
-	}
-
-	case PTRACE_SETREGS64: {
-		struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-		struct pt_regs *cregs = task_pt_regs(child);
-		unsigned long tstate, tpc, tnpc, y;
-		int i;
-
-		/* Must be careful, tracing process can only set certain
-		 * bits in the psr.
-		 */
-		if (__get_user(tstate, (&pregs->tstate)) ||
-		    __get_user(tpc, (&pregs->tpc)) ||
-		    __get_user(tnpc, (&pregs->tnpc)) ||
-		    __get_user(y, (&pregs->y))) {
-			pt_error_return(regs, EFAULT);
-			goto out_tsk;
-		}
-		if ((task_thread_info(child)->flags & _TIF_32BIT) != 0) {
-			tpc &= 0xffffffff;
-			tnpc &= 0xffffffff;
-		}
-		tstate &= (TSTATE_ICC | TSTATE_XCC);
-		cregs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
-		cregs->tstate |= tstate;
-		if (!((tpc | tnpc) & 3)) {
-			cregs->tpc = tpc;
-			cregs->tnpc = tnpc;
-		}
-		cregs->y = y;
-		for (i = 1; i < 16; i++) {
-			if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
-				pt_error_return(regs, EFAULT);
-				goto out_tsk;
-			}
-		}
-		pt_succ_return(regs, 0);
-		goto out_tsk;
-	}
-
-	case PTRACE_GETFPREGS: {
-		struct fps {
-			unsigned int regs[32];
-			unsigned int fsr;
-			unsigned int flags;
-			unsigned int extra;
-			unsigned int fpqd;
-			struct fq {
-				unsigned int insnaddr;
-				unsigned int insn;
-			} fpq[16];
-		};
-		struct fps __user *fps = (struct fps __user *) addr;
-		unsigned long *fpregs = task_thread_info(child)->fpregs;
-
-		if (copy_to_user(&fps->regs[0], fpregs,
-				 (32 * sizeof(unsigned int))) ||
-		    __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr)) ||
-		    __put_user(0, (&fps->fpqd)) ||
-		    __put_user(0, (&fps->flags)) ||
-		    __put_user(0, (&fps->extra)) ||
-		    clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) {
-			pt_error_return(regs, EFAULT);
-			goto out_tsk;
-		}
-		pt_succ_return(regs, 0);
-		goto out_tsk;
-	}
-
-	case PTRACE_GETFPREGS64: {
-		struct fps {
-			unsigned int regs[64];
-			unsigned long fsr;
-		};
-		struct fps __user *fps = (struct fps __user *) addr;
-		unsigned long *fpregs = task_thread_info(child)->fpregs;
-
-		if (copy_to_user(&fps->regs[0], fpregs,
-				 (64 * sizeof(unsigned int))) ||
-		    __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
-			pt_error_return(regs, EFAULT);
-			goto out_tsk;
-		}
-		pt_succ_return(regs, 0);
-		goto out_tsk;
-	}
-
-	case PTRACE_SETFPREGS: {
-		struct fps {
-			unsigned int regs[32];
-			unsigned int fsr;
-			unsigned int flags;
-			unsigned int extra;
-			unsigned int fpqd;
-			struct fq {
-				unsigned int insnaddr;
-				unsigned int insn;
-			} fpq[16];
-		};
-		struct fps __user *fps = (struct fps __user *) addr;
-		unsigned long *fpregs = task_thread_info(child)->fpregs;
-		unsigned fsr;
-
-		if (copy_from_user(fpregs, &fps->regs[0],
-				   (32 * sizeof(unsigned int))) ||
-		    __get_user(fsr, (&fps->fsr))) {
-			pt_error_return(regs, EFAULT);
-			goto out_tsk;
-		}
-		task_thread_info(child)->xfsr[0] &= 0xffffffff00000000UL;
-		task_thread_info(child)->xfsr[0] |= fsr;
-		if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
-			task_thread_info(child)->gsr[0] = 0;
-		task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
-		pt_succ_return(regs, 0);
-		goto out_tsk;
-	}
-
-	case PTRACE_SETFPREGS64: {
-		struct fps {
-			unsigned int regs[64];
-			unsigned long fsr;
-		};
-		struct fps __user *fps = (struct fps __user *) addr;
-		unsigned long *fpregs = task_thread_info(child)->fpregs;
-
-		if (copy_from_user(fpregs, &fps->regs[0],
-				   (64 * sizeof(unsigned int))) ||
-		    __get_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
-			pt_error_return(regs, EFAULT);
-			goto out_tsk;
-		}
-		if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
-			task_thread_info(child)->gsr[0] = 0;
-		task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
-		pt_succ_return(regs, 0);
-		goto out_tsk;
-	}
+#ifdef CONFIG_PTRACE
+static const struct ptrace_layout_segment sparc64_getregs_layout[] = {
+	{ 0, offsetof(struct pt_regs, u_regs[15]), 0, sizeof(long) },
+	{ offsetof(struct pt_regs, u_regs[15]),
+	  offsetof(struct pt_regs, tstate),
+	  -1, 0 },
+	{ offsetof(struct pt_regs, tstate), offsetof(struct pt_regs, y),
+	  0, 32 * sizeof(long) },
+	{0, 0, -1, 0}
+};
 
-	case PTRACE_READTEXT:
-	case PTRACE_READDATA: {
-		int res = ptrace_readdata(child, addr,
-					  (char __user *)addr2, data);
-		if (res == data) {
-			pt_succ_return(regs, 0);
-			goto out_tsk;
-		}
-		if (res >= 0)
-			res = -EIO;
-		pt_error_return(regs, -res);
-		goto out_tsk;
-	}
+int arch_ptrace(long *request, struct task_struct *child,
+		struct utrace_attached_engine *engine,
+		unsigned long addr, unsigned long data,
+		long *retval)
+{
+	void __user *uaddr = (void __user *) addr;
+	struct pt_regs *uregs = uaddr;
+	int err = -ENOSYS;
+
+	switch (*request) {
+	case PTRACE_GETREGS64:
+		err = ptrace_layout_access(child, engine,
+					   &utrace_sparc64_native_view,
+					   sparc64_getregs_layout,
+					   0, offsetof(struct pt_regs, y),
+					   uaddr, NULL, 0);
+		if (!err &&
+		    (put_user(task_pt_regs(child)->y, &uregs->y) ||
+		     put_user(task_pt_regs(child)->fprs, &uregs->fprs)))
+			err = -EFAULT;
+		break;
 
-	case PTRACE_WRITETEXT:
-	case PTRACE_WRITEDATA: {
-		int res = ptrace_writedata(child, (char __user *) addr2,
-					   addr, data);
-		if (res == data) {
-			pt_succ_return(regs, 0);
-			goto out_tsk;
-		}
-		if (res >= 0)
-			res = -EIO;
-		pt_error_return(regs, -res);
-		goto out_tsk;
-	}
-	case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
-		addr = 1;
-
-	case PTRACE_CONT: { /* restart after signal. */
-		if (!valid_signal(data)) {
-			pt_error_return(regs, EIO);
-			goto out_tsk;
-		}
+	case PTRACE_SETREGS64:
+		err = ptrace_layout_access(child, engine,
+					   &utrace_sparc64_native_view,
+					   sparc64_getregs_layout,
+					   0, offsetof(struct pt_regs, y),
+					   uaddr, NULL, 1);
+		if (!err &&
+		    (get_user(task_pt_regs(child)->y, &uregs->y) ||
+		     get_user(task_pt_regs(child)->fprs, &uregs->fprs)))
+			err = -EFAULT;
+		break;
 
-		if (request == PTRACE_SYSCALL) {
-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		} else {
-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		}
+	case PTRACE_GETFPREGS64:
+	case PTRACE_SETFPREGS64:
+		err = ptrace_regset_access(child, engine,
+					   utrace_native_view(current),
+					   2, 0, 34 * sizeof(long), uaddr,
+					   (*request == PTRACE_SETFPREGS64));
+		break;
 
-		child->exit_code = data;
-#ifdef DEBUG_PTRACE
-		printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
-			child->pid, child->exit_code,
-			task_pt_regs(child)->tpc,
-			task_pt_regs(child)->tnpc);
+	case PTRACE_SUNDETACH:
+		*request = PTRACE_DETACH;
+		break;
 		       
-#endif
-		wake_up_process(child);
-		pt_succ_return(regs, 0);
-		goto out_tsk;
-	}
+	default:
+		break;
+	};
+	return err;
+}
 
-/*
- * make the child exit.  Best I can do is send it a sigkill. 
- * perhaps it should be put in the status that it wants to 
- * exit.
- */
-	case PTRACE_KILL: {
-		if (child->exit_state == EXIT_ZOMBIE) {	/* already dead */
-			pt_succ_return(regs, 0);
-			goto out_tsk;
-		}
-		child->exit_code = SIGKILL;
-		wake_up_process(child);
-		pt_succ_return(regs, 0);
-		goto out_tsk;
-	}
+#ifdef CONFIG_COMPAT
+static const struct ptrace_layout_segment sparc32_getregs_layout[] = {
+	{ 0, offsetof(struct pt_regs32, u_regs[0]),
+	  0, GENREG32_PSR * sizeof(u32) },
+	{ offsetof(struct pt_regs32, u_regs[0]),
+	  offsetof(struct pt_regs32, u_regs[15]),
+	  0, 1 * sizeof(u32) },
+	{ offsetof(struct pt_regs32, u_regs[15]), sizeof(struct pt_regs32),
+	  -1, 0 },
+	{0, 0, -1, 0}
+};
 
-	case PTRACE_SUNDETACH: { /* detach a process that was attached. */
-		int error = ptrace_detach(child, data);
-		if (error) {
-			pt_error_return(regs, EIO);
-			goto out_tsk;
-		}
-		pt_succ_return(regs, 0);
-		goto out_tsk;
-	}
+int arch_compat_ptrace(compat_long_t *request, struct task_struct *child,
+		       struct utrace_attached_engine *engine,
+		       compat_ulong_t addr, compat_ulong_t data,
+		       compat_long_t *retval)
+{
+	void __user *uaddr = (void __user *) (unsigned long) addr;
+	int err = -ENOSYS;
 
-	/* PTRACE_DUMPCORE unsupported... */
+	switch (*request) {
+	case PTRACE_GETREGS:
+	case PTRACE_SETREGS:
+		err = ptrace_layout_access(child, engine,
+					   &utrace_sparc32_view,
+					   sparc32_getregs_layout,
+					   0, sizeof(struct pt_regs32),
+					   uaddr, NULL,
+					   (*request ==
+					    PTRACE_SETREGS));
+		break;
 
-	case PTRACE_GETEVENTMSG: {
-		int err;
+	case PTRACE_GETFPREGS:
+	case PTRACE_SETFPREGS:
+		err = ptrace_whole_regset(child, engine, addr, 1,
+					  (*request == PTRACE_SETFPREGS));
+		break;
 
-		if (test_thread_flag(TIF_32BIT))
-			err = put_user(child->ptrace_message,
-				       (unsigned int __user *) data);
-		else
-			err = put_user(child->ptrace_message,
-				       (unsigned long __user *) data);
-		if (err)
-			pt_error_return(regs, -err);
-		else
-			pt_succ_return(regs, 0);
+	case PTRACE_SUNDETACH:
+		*request = PTRACE_DETACH;
 		break;
-	}
 
-	default: {
-		int err = ptrace_request(child, request, addr, data);
-		if (err)
-			pt_error_return(regs, -err);
-		else
-			pt_succ_return(regs, 0);
-		goto out_tsk;
-	}
-	}
-out_tsk:
-	if (child)
-		put_task_struct(child);
-out:
-	unlock_kernel();
+	default:
+		break;
+	};
+	return err;
 }
+#endif	/* CONFIG_COMPAT */
+#endif /* CONFIG_PTRACE */
 
 asmlinkage void syscall_trace(struct pt_regs *regs, int syscall_exit_p)
 {
 	/* do the secure computing check first */
-	secure_computing(regs->u_regs[UREG_G1]);
+	if (!syscall_exit_p)
+		secure_computing(regs->u_regs[UREG_G1]);
 
 	if (unlikely(current->audit_context) && syscall_exit_p) {
 		unsigned long tstate = regs->tstate;
@@ -656,26 +790,9 @@ asmlinkage void syscall_trace(struct pt_
 		audit_syscall_exit(result, regs->u_regs[UREG_I0]);
 	}
 
-	if (!(current->ptrace & PT_PTRACED))
-		goto out;
-
-	if (!test_thread_flag(TIF_SYSCALL_TRACE))
-		goto out;
-
-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
-				 ? 0x80 : 0));
-
-	/*
-	 * this isn't the same as continuing with a signal, but it will do
-	 * for normal use.  strace only continues with a signal if the
-	 * stopping signal is not SIGTRAP.  -brl
-	 */
-	if (current->exit_code) {
-		send_sig(current->exit_code, current, 1);
-		current->exit_code = 0;
-	}
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall(regs, syscall_exit_p);
 
-out:
 	if (unlikely(current->audit_context) && !syscall_exit_p)
 		audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
 				     AUDIT_ARCH_SPARC :
--- linux-2.6/arch/sparc64/kernel/Makefile.utrace-ptrace-compat
+++ linux-2.6/arch/sparc64/kernel/Makefile
@@ -5,6 +5,8 @@
 EXTRA_AFLAGS := -ansi
 EXTRA_CFLAGS := -Werror
 
+CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
 extra-y		:= head.o init_task.o vmlinux.lds
 
 obj-y		:= process.o setup.o cpu.o idprom.o \
--- linux-2.6/arch/sparc64/kernel/entry.S.utrace-ptrace-compat
+++ linux-2.6/arch/sparc64/kernel/entry.S
@@ -1409,7 +1409,6 @@ execve_merge:
 
 	.globl	sys_pipe, sys_sigpause, sys_nis_syscall
 	.globl	sys_rt_sigreturn
-	.globl	sys_ptrace
 	.globl	sys_sigaltstack
 	.align	32
 sys_pipe:	ba,pt		%xcc, sparc_pipe
@@ -1452,11 +1451,6 @@ sys32_rt_sigreturn:
 		 add		%o7, 1f-.-4, %o7
 		nop
 #endif
-sys_ptrace:	add		%sp, PTREGS_OFF, %o0
-		call		do_ptrace
-		 add		%o7, 1f-.-4, %o7
-		nop
-		.align		32
 1:		ldx		[%curptr + TI_FLAGS], %l5
 		andcc		%l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
 		be,pt		%icc, rtrap
--- linux-2.6/arch/sparc64/kernel/binfmt_aout32.c.utrace-ptrace-compat
+++ linux-2.6/arch/sparc64/kernel/binfmt_aout32.c
@@ -335,8 +335,6 @@ beyond_if:
 	tsb_context_switch(current->mm);
 
 	start_thread32(regs, ex.a_entry, current->mm->start_stack);
-	if (current->ptrace & PT_PTRACED)
-		send_sig(SIGTRAP, current, 0);
 	return 0;
 }
 
--- linux-2.6/arch/arm/kernel/ptrace.c.utrace-ptrace-compat
+++ linux-2.6/arch/arm/kernel/ptrace.c
@@ -812,34 +812,18 @@ asmlinkage int syscall_trace(int why, st
 {
 	unsigned long ip;
 
-	if (!test_thread_flag(TIF_SYSCALL_TRACE))
-		return scno;
-	if (!(current->ptrace & PT_PTRACED))
-		return scno;
+	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+		/*
+		 * Save IP.  IP is used to denote syscall entry/exit:
+		 *  IP = 0 -> entry, = 1 -> exit
+		 */
+		ip = regs->ARM_ip;
+		regs->ARM_ip = why;
 
-	/*
-	 * Save IP.  IP is used to denote syscall entry/exit:
-	 *  IP = 0 -> entry, = 1 -> exit
-	 */
-	ip = regs->ARM_ip;
-	regs->ARM_ip = why;
+		tracehook_report_syscall(regs, why);
 
-	current->ptrace_message = scno;
-
-	/* the 0x80 provides a way for the tracing parent to distinguish
-	   between a syscall stop and SIGTRAP delivery */
-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
-				 ? 0x80 : 0));
-	/*
-	 * this isn't the same as continuing with a signal, but it will do
-	 * for normal use.  strace only continues with a signal if the
-	 * stopping signal is not SIGTRAP.  -brl
-	 */
-	if (current->exit_code) {
-		send_sig(current->exit_code, current, 1);
-		current->exit_code = 0;
+		regs->ARM_ip = ip;
 	}
-	regs->ARM_ip = ip;
 
-	return current->ptrace_message;
+	return scno;
 }
--- linux-2.6/arch/ia64/kernel/mca.c.utrace-ptrace-compat
+++ linux-2.6/arch/ia64/kernel/mca.c
@@ -1515,7 +1515,7 @@ format_mca_init_stack(void *mca_data, un
 	p->state = TASK_UNINTERRUPTIBLE;
 	cpu_set(cpu, p->cpus_allowed);
 	INIT_LIST_HEAD(&p->tasks);
-	p->parent = p->real_parent = p->group_leader = p;
+	p->parent = p->group_leader = p;
 	INIT_LIST_HEAD(&p->children);
 	INIT_LIST_HEAD(&p->sibling);
 	strncpy(p->comm, type, sizeof(p->comm)-1);
--- linux-2.6/arch/ia64/kernel/signal.c.utrace-ptrace-compat
+++ linux-2.6/arch/ia64/kernel/signal.c
@@ -10,7 +10,7 @@
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/smp.h>
@@ -471,6 +471,8 @@ handle_signal (unsigned long sig, struct
 		sigaddset(&current->blocked, sig);
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
+
+	tracehook_report_handle_signal(sig, ka, oldset, &scr->pt);
 	return 1;
 }
 
--- linux-2.6/arch/ia64/kernel/fsys.S.utrace-ptrace-compat
+++ linux-2.6/arch/ia64/kernel/fsys.S
@@ -83,29 +83,29 @@ ENTRY(fsys_getppid)
 	;;
 
 	ld4 r9=[r9]
-	add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = &current->group_leader->real_parent
+	add r17=IA64_TASK_PARENT_OFFSET,r17 // r17 = &current->group_leader->parent
 	;;
 	and r9=TIF_ALLWORK_MASK,r9
 
-1:	ld8 r18=[r17]				// r18 = current->group_leader->real_parent
+1:	ld8 r18=[r17]				// r18 = current->group_leader->parent
 	;;
 	cmp.ne p8,p0=0,r9
-	add r8=IA64_TASK_TGID_OFFSET,r18	// r8 = &current->group_leader->real_parent->tgid
+	add r8=IA64_TASK_TGID_OFFSET,r18	// r8 = &current->group_leader->parent->tgid
 	;;
 
 	/*
 	 * The .acq is needed to ensure that the read of tgid has returned its data before
-	 * we re-check "real_parent".
+	 * we re-check "parent".
 	 */
-	ld4.acq r8=[r8]				// r8 = current->group_leader->real_parent->tgid
+	ld4.acq r8=[r8]				// r8 = current->group_leader->parent->tgid
 #ifdef CONFIG_SMP
 	/*
-	 * Re-read current->group_leader->real_parent.
+	 * Re-read current->group_leader->parent.
 	 */
-	ld8 r19=[r17]				// r19 = current->group_leader->real_parent
+	ld8 r19=[r17]				// r19 = current->group_leader->parent
 (p8)	br.spnt.many fsys_fallback_syscall
 	;;
-	cmp.ne p6,p0=r18,r19			// did real_parent change?
+	cmp.ne p6,p0=r18,r19			// did parent change?
 	mov r19=0			// i must not leak kernel bits...
 (p6)	br.cond.spnt.few 1b			// yes -> redo the read of tgid and the check
 	;;
--- linux-2.6/arch/ia64/kernel/ptrace.c.utrace-ptrace-compat
+++ linux-2.6/arch/ia64/kernel/ptrace.c
@@ -3,6 +3,9 @@
  *
  * Copyright (C) 1999-2005 Hewlett-Packard Co
  *	David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2006 Intel Co
+ *  2006-08-12	- IA64 Native Utrace implementation support added by
+ *	Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  *
  * Derived from the x86 and Alpha versions.
  */
@@ -12,18 +15,22 @@
 #include <linux/mm.h>
 #include <linux/errno.h>
 #include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/smp_lock.h>
 #include <linux/user.h>
 #include <linux/security.h>
 #include <linux/audit.h>
 #include <linux/signal.h>
+#include <linux/module.h>
 
+#include <asm/tracehook.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
 #include <asm/ptrace_offsets.h>
 #include <asm/rse.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
+#include <asm/elf.h>
 #include <asm/unwind.h>
 #ifdef CONFIG_PERFMON
 #include <asm/perfmon.h>
@@ -547,79 +554,6 @@ ia64_sync_user_rbs (struct task_struct *
 	return 0;
 }
 
-static inline int
-thread_matches (struct task_struct *thread, unsigned long addr)
-{
-	unsigned long thread_rbs_end;
-	struct pt_regs *thread_regs;
-
-	if (ptrace_check_attach(thread, 0) < 0)
-		/*
-		 * If the thread is not in an attachable state, we'll
-		 * ignore it.  The net effect is that if ADDR happens
-		 * to overlap with the portion of the thread's
-		 * register backing store that is currently residing
-		 * on the thread's kernel stack, then ptrace() may end
-		 * up accessing a stale value.  But if the thread
-		 * isn't stopped, that's a problem anyhow, so we're
-		 * doing as well as we can...
-		 */
-		return 0;
-
-	thread_regs = task_pt_regs(thread);
-	thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
-	if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
-		return 0;
-
-	return 1;	/* looks like we've got a winner */
-}
-
-/*
- * GDB apparently wants to be able to read the register-backing store
- * of any thread when attached to a given process.  If we are peeking
- * or poking an address that happens to reside in the kernel-backing
- * store of another thread, we need to attach to that thread, because
- * otherwise we end up accessing stale data.
- *
- * task_list_lock must be read-locked before calling this routine!
- */
-static struct task_struct *
-find_thread_for_addr (struct task_struct *child, unsigned long addr)
-{
-	struct task_struct *p;
-	struct mm_struct *mm;
-	struct list_head *this, *next;
-	int mm_users;
-
-	if (!(mm = get_task_mm(child)))
-		return child;
-
-	/* -1 because of our get_task_mm(): */
-	mm_users = atomic_read(&mm->mm_users) - 1;
-	if (mm_users <= 1)
-		goto out;		/* not multi-threaded */
-
-	/*
-	 * Traverse the current process' children list.  Every task that
-	 * one attaches to becomes a child.  And it is only attached children
-	 * of the debugger that are of interest (ptrace_check_attach checks
-	 * for this).
-	 */
- 	list_for_each_safe(this, next, &current->children) {
-		p = list_entry(this, struct task_struct, sibling);
-		if (p->mm != mm)
-			continue;
-		if (thread_matches(p, addr)) {
-			child = p;
-			goto out;
-		}
-	}
-
-  out:
-	mmput(mm);
-	return child;
-}
-
 /*
  * Write f32-f127 back to task->thread.fph if it has been modified.
  */
@@ -663,6 +597,7 @@ ia64_sync_fph (struct task_struct *task)
 	psr->dfh = 1;
 }
 
+#if 0
 static int
 access_fr (struct unw_frame_info *info, int regnum, int hi,
 	   unsigned long *data, int write_access)
@@ -681,6 +616,7 @@ access_fr (struct unw_frame_info *info, 
 		*data = fpval.u.bits[hi];
 	return ret;
 }
+#endif /* access_fr() */
 
 /*
  * Change the machine-state of CHILD such that it will return via the normal
@@ -781,321 +717,121 @@ access_nat_bits (struct task_struct *chi
 	return 0;
 }
 
-static int
-access_uarea (struct task_struct *child, unsigned long addr,
-	      unsigned long *data, int write_access)
+
+/* "asmlinkage" so the input arguments are preserved... */
+
+asmlinkage void
+syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
+		     long arg4, long arg5, long arg6, long arg7,
+		     struct pt_regs regs)
 {
-	unsigned long *ptr, regnum, urbs_end, rnat_addr, cfm;
-	struct switch_stack *sw;
-	struct pt_regs *pt;
-#	define pt_reg_addr(pt, reg)	((void *)			    \
-					 ((unsigned long) (pt)		    \
-					  + offsetof(struct pt_regs, reg)))
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall(&regs, 0);
 
+	if (unlikely(current->audit_context)) {
+		long syscall;
+		int arch;
 
-	pt = task_pt_regs(child);
-	sw = (struct switch_stack *) (child->thread.ksp + 16);
+		if (IS_IA32_PROCESS(&regs)) {
+			syscall = regs.r1;
+			arch = AUDIT_ARCH_I386;
+		} else {
+			syscall = regs.r15;
+			arch = AUDIT_ARCH_IA64;
+		}
 
-	if ((addr & 0x7) != 0) {
-		dprintk("ptrace: unaligned register address 0x%lx\n", addr);
-		return -1;
+		audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
 	}
 
-	if (addr < PT_F127 + 16) {
-		/* accessing fph */
-		if (write_access)
-			ia64_sync_fph(child);
-		else
-			ia64_flush_fph(child);
-		ptr = (unsigned long *)
-			((unsigned long) &child->thread.fph + addr);
-	} else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) {
-		/* scratch registers untouched by kernel (saved in pt_regs) */
-		ptr = pt_reg_addr(pt, f10) + (addr - PT_F10);
-	} else if (addr >= PT_F12 && addr < PT_F15 + 16) {
-		/*
-		 * Scratch registers untouched by kernel (saved in
-		 * switch_stack).
-		 */
-		ptr = (unsigned long *) ((long) sw
-					 + (addr - PT_NAT_BITS - 32));
-	} else if (addr < PT_AR_LC + 8) {
-		/* preserved state: */
-		struct unw_frame_info info;
-		char nat = 0;
-		int ret;
+}
 
-		unw_init_from_blocked_task(&info, child);
-		if (unw_unwind_to_user(&info) < 0)
-			return -1;
+/* "asmlinkage" so the input arguments are preserved... */
 
-		switch (addr) {
-		      case PT_NAT_BITS:
-			return access_nat_bits(child, pt, &info,
-					       data, write_access);
+asmlinkage void
+syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
+		     long arg4, long arg5, long arg6, long arg7,
+		     struct pt_regs regs)
+{
+	if (unlikely(current->audit_context)) {
+		int success = AUDITSC_RESULT(regs.r10);
+		long result = regs.r8;
 
-		      case PT_R4: case PT_R5: case PT_R6: case PT_R7:
-			if (write_access) {
-				/* read NaT bit first: */
-				unsigned long dummy;
+		if (success != AUDITSC_SUCCESS)
+			result = -result;
+		audit_syscall_exit(success, result);
+	}
 
-				ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4,
-						 &dummy, &nat);
-				if (ret < 0)
-					return ret;
-			}
-			return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data,
-					     &nat, write_access);
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall(&regs, 1);
+}
 
-		      case PT_B1: case PT_B2: case PT_B3:
-		      case PT_B4: case PT_B5:
-			return unw_access_br(&info, (addr - PT_B1)/8 + 1, data,
-					     write_access);
-
-		      case PT_AR_EC:
-			return unw_access_ar(&info, UNW_AR_EC, data,
-					     write_access);
-
-		      case PT_AR_LC:
-			return unw_access_ar(&info, UNW_AR_LC, data,
-					     write_access);
-
-		      default:
-			if (addr >= PT_F2 && addr < PT_F5 + 16)
-				return access_fr(&info, (addr - PT_F2)/16 + 2,
-						 (addr & 8) != 0, data,
-						 write_access);
-			else if (addr >= PT_F16 && addr < PT_F31 + 16)
-				return access_fr(&info,
-						 (addr - PT_F16)/16 + 16,
-						 (addr & 8) != 0,
-						 data, write_access);
-			else {
-				dprintk("ptrace: rejecting access to register "
-					"address 0x%lx\n", addr);
-				return -1;
-			}
-		}
-	} else if (addr < PT_F9+16) {
-		/* scratch state */
-		switch (addr) {
-		      case PT_AR_BSP:
-			/*
-			 * By convention, we use PT_AR_BSP to refer to
-			 * the end of the user-level backing store.
-			 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
-			 * to get the real value of ar.bsp at the time
-			 * the kernel was entered.
-			 *
-			 * Furthermore, when changing the contents of
-			 * PT_AR_BSP (or PT_CFM) we MUST copy any
-			 * users-level stacked registers that are
-			 * stored on the kernel stack back to
-			 * user-space because otherwise, we might end
-			 * up clobbering kernel stacked registers.
-			 * Also, if this happens while the task is
-			 * blocked in a system call, which convert the
-			 * state such that the non-system-call exit
-			 * path is used.  This ensures that the proper
-			 * state will be picked up when resuming
-			 * execution.  However, it *also* means that
-			 * once we write PT_AR_BSP/PT_CFM, it won't be
-			 * possible to modify the syscall arguments of
-			 * the pending system call any longer.  This
-			 * shouldn't be an issue because modifying
-			 * PT_AR_BSP/PT_CFM generally implies that
-			 * we're either abandoning the pending system
-			 * call or that we defer it's re-execution
-			 * (e.g., due to GDB doing an inferior
-			 * function call).
-			 */
-			urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
-			if (write_access) {
-				if (*data != urbs_end) {
-					if (ia64_sync_user_rbs(child, sw,
-							       pt->ar_bspstore,
-							       urbs_end) < 0)
-						return -1;
-					if (in_syscall(pt))
-						convert_to_non_syscall(child,
-								       pt,
-								       cfm);
-					/*
-					 * Simulate user-level write
-					 * of ar.bsp:
-					 */
-					pt->loadrs = 0;
-					pt->ar_bspstore = *data;
-				}
-			} else
-				*data = urbs_end;
-			return 0;
 
-		      case PT_CFM:
-			urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
-			if (write_access) {
-				if (((cfm ^ *data) & PFM_MASK) != 0) {
-					if (ia64_sync_user_rbs(child, sw,
-							       pt->ar_bspstore,
-							       urbs_end) < 0)
-						return -1;
-					if (in_syscall(pt))
-						convert_to_non_syscall(child,
-								       pt,
-								       cfm);
-					pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
-						      | (*data & PFM_MASK));
-				}
-			} else
-				*data = cfm;
-			return 0;
+#ifdef CONFIG_UTRACE
 
-		      case PT_CR_IPSR:
-			if (write_access)
-				pt->cr_ipsr = ((*data & IPSR_MASK)
-					       | (pt->cr_ipsr & ~IPSR_MASK));
-			else
-				*data = (pt->cr_ipsr & IPSR_MASK);
-			return 0;
+/* Utrace implementation starts here */
 
-		      case PT_AR_RSC:
-			if (write_access)
-				pt->ar_rsc = *data | (3 << 2); /* force PL3 */
-			else
-				*data = pt->ar_rsc;
-			return 0;
+typedef struct utrace_get {
+	void *kbuf;
+	void __user *ubuf;
+} utrace_get_t;
+
+typedef struct utrace_set {
+	const void *kbuf;
+	const void __user *ubuf;
+} utrace_set_t;
+
+typedef struct utrace_getset {
+	struct task_struct *target;
+	const struct utrace_regset *regset;
+	union {
+		utrace_get_t get;
+		utrace_set_t set;
+	} u;
+	unsigned int pos;
+	unsigned int count;
+	int ret;
+} utrace_getset_t;
 
-		      case PT_AR_RNAT:
-			urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
-			rnat_addr = (long) ia64_rse_rnat_addr((long *)
-							      urbs_end);
-			if (write_access)
-				return ia64_poke(child, sw, urbs_end,
-						 rnat_addr, *data);
-			else
-				return ia64_peek(child, sw, urbs_end,
-						 rnat_addr, data);
+static int
+access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
+		unsigned long addr, unsigned long *data, int write_access)
+{
+	struct pt_regs *pt;
+	unsigned long *ptr = NULL;
+	int ret;
+	char nat=0;
 
-		      case PT_R1:
-			ptr = pt_reg_addr(pt, r1);
-			break;
-		      case PT_R2:  case PT_R3:
-			ptr = pt_reg_addr(pt, r2) + (addr - PT_R2);
-			break;
-		      case PT_R8:  case PT_R9:  case PT_R10: case PT_R11:
-			ptr = pt_reg_addr(pt, r8) + (addr - PT_R8);
-			break;
-		      case PT_R12: case PT_R13:
-			ptr = pt_reg_addr(pt, r12) + (addr - PT_R12);
-			break;
-		      case PT_R14:
-			ptr = pt_reg_addr(pt, r14);
-			break;
-		      case PT_R15:
-			ptr = pt_reg_addr(pt, r15);
-			break;
-		      case PT_R16: case PT_R17: case PT_R18: case PT_R19:
-		      case PT_R20: case PT_R21: case PT_R22: case PT_R23:
-		      case PT_R24: case PT_R25: case PT_R26: case PT_R27:
-		      case PT_R28: case PT_R29: case PT_R30: case PT_R31:
-			ptr = pt_reg_addr(pt, r16) + (addr - PT_R16);
-			break;
-		      case PT_B0:
-			ptr = pt_reg_addr(pt, b0);
-			break;
-		      case PT_B6:
-			ptr = pt_reg_addr(pt, b6);
-			break;
-		      case PT_B7:
-			ptr = pt_reg_addr(pt, b7);
-			break;
-		      case PT_F6:  case PT_F6+8: case PT_F7: case PT_F7+8:
-		      case PT_F8:  case PT_F8+8: case PT_F9: case PT_F9+8:
-			ptr = pt_reg_addr(pt, f6) + (addr - PT_F6);
+	pt = task_pt_regs(target);
+	switch (addr) {
+		case ELF_GR_OFFSET(1):
+			ptr = &pt->r1;
 			break;
-		      case PT_AR_BSPSTORE:
-			ptr = pt_reg_addr(pt, ar_bspstore);
+		case ELF_GR_OFFSET(2):
+		case ELF_GR_OFFSET(3):
+			ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
 			break;
-		      case PT_AR_UNAT:
-			ptr = pt_reg_addr(pt, ar_unat);
-			break;
-		      case PT_AR_PFS:
-			ptr = pt_reg_addr(pt, ar_pfs);
-			break;
-		      case PT_AR_CCV:
-			ptr = pt_reg_addr(pt, ar_ccv);
-			break;
-		      case PT_AR_FPSR:
-			ptr = pt_reg_addr(pt, ar_fpsr);
+		case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
+			if (write_access) {
+				/* read NaT bit first: */
+				unsigned long dummy;
+
+				ret = unw_get_gr(info, addr/8, &dummy, &nat);
+				if (ret < 0)
+					return ret;
+			}
+			return unw_access_gr(info, addr/8, data, &nat, write_access);
+		case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
+			ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
 			break;
-		      case PT_CR_IIP:
-			ptr = pt_reg_addr(pt, cr_iip);
+		case ELF_GR_OFFSET(12):
+		case ELF_GR_OFFSET(13):
+			ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
 			break;
-		      case PT_PR:
-			ptr = pt_reg_addr(pt, pr);
+		case ELF_GR_OFFSET(14):
+			ptr = &pt->r14;
 			break;
-			/* scratch register */
-
-		      default:
-			/* disallow accessing anything else... */
-			dprintk("ptrace: rejecting access to register "
-				"address 0x%lx\n", addr);
-			return -1;
-		}
-	} else if (addr <= PT_AR_SSD) {
-		ptr = pt_reg_addr(pt, ar_csd) + (addr - PT_AR_CSD);
-	} else {
-		/* access debug registers */
-
-		if (addr >= PT_IBR) {
-			regnum = (addr - PT_IBR) >> 3;
-			ptr = &child->thread.ibr[0];
-		} else {
-			regnum = (addr - PT_DBR) >> 3;
-			ptr = &child->thread.dbr[0];
-		}
-
-		if (regnum >= 8) {
-			dprintk("ptrace: rejecting access to register "
-				"address 0x%lx\n", addr);
-			return -1;
-		}
-#ifdef CONFIG_PERFMON
-		/*
-		 * Check if debug registers are used by perfmon. This
-		 * test must be done once we know that we can do the
-		 * operation, i.e. the arguments are all valid, but
-		 * before we start modifying the state.
-		 *
-		 * Perfmon needs to keep a count of how many processes
-		 * are trying to modify the debug registers for system
-		 * wide monitoring sessions.
-		 *
-		 * We also include read access here, because they may
-		 * cause the PMU-installed debug register state
-		 * (dbr[], ibr[]) to be reset. The two arrays are also
-		 * used by perfmon, but we do not use
-		 * IA64_THREAD_DBG_VALID. The registers are restored
-		 * by the PMU context switch code.
-		 */
-		if (pfm_use_debug_registers(child)) return -1;
-#endif
-
-		if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
-			child->thread.flags |= IA64_THREAD_DBG_VALID;
-			memset(child->thread.dbr, 0,
-			       sizeof(child->thread.dbr));
-			memset(child->thread.ibr, 0,
-			       sizeof(child->thread.ibr));
-		}
-
-		ptr += regnum;
-
-		if ((regnum & 1) && write_access) {
-			/* don't let the user set kernel-level breakpoints: */
-			*ptr = *data & ~(7UL << 56);
-			return 0;
-		}
+		case ELF_GR_OFFSET(15):
+			ptr = &pt->r15;
 	}
 	if (write_access)
 		*ptr = *data;
@@ -1104,567 +840,823 @@ access_uarea (struct task_struct *child,
 	return 0;
 }
 
-static long
-ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
+static int
+access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
+		unsigned long addr, unsigned long *data, int write_access)
 {
-	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
-	struct unw_frame_info info;
-	struct ia64_fpreg fpval;
-	struct switch_stack *sw;
 	struct pt_regs *pt;
-	long ret, retval = 0;
-	char nat = 0;
-	int i;
+	unsigned long *ptr = NULL;
 
-	if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
-		return -EIO;
-
-	pt = task_pt_regs(child);
-	sw = (struct switch_stack *) (child->thread.ksp + 16);
-	unw_init_from_blocked_task(&info, child);
-	if (unw_unwind_to_user(&info) < 0) {
-		return -EIO;
-	}
-
-	if (((unsigned long) ppr & 0x7) != 0) {
-		dprintk("ptrace:unaligned register address %p\n", ppr);
-		return -EIO;
+	pt = task_pt_regs(target);
+	switch (addr) {
+		case ELF_BR_OFFSET(0):
+			ptr = &pt->b0;
+			break;
+		case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
+			return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
+					data, write_access);
+		case ELF_BR_OFFSET(6):
+			ptr = &pt->b6;
+			break;
+		case ELF_BR_OFFSET(7):
+			ptr = &pt->b7;
 	}
+	if (write_access)
+		*ptr = *data;
+	else
+		*data = *ptr;
+	return 0;
+}
 
-	if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
-	    || access_uarea(child, PT_AR_EC, &ec, 0) < 0
-	    || access_uarea(child, PT_AR_LC, &lc, 0) < 0
-	    || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
-	    || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
-	    || access_uarea(child, PT_CFM, &cfm, 0)
-	    || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
-		return -EIO;
+static int
+access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
+		unsigned long addr, unsigned long *data, int write_access)
+{
+	struct pt_regs *pt;
+	unsigned long cfm, urbs_end, rnat_addr;
+	unsigned long *ptr = NULL;
 
-	/* control regs */
+	pt = task_pt_regs(target);
+	if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
+		switch (addr) {
+			case ELF_AR_RSC_OFFSET:
+				/* force PL3 */
+				if (write_access)
+					pt->ar_rsc = *data | (3 << 2);
+				else
+					*data = pt->ar_rsc;
+				return 0;
+			case ELF_AR_BSP_OFFSET:
+				/*
+				 * By convention, we use PT_AR_BSP to refer to
+				 * the end of the user-level backing store.
+				 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
+				 * to get the real value of ar.bsp at the time
+				 * the kernel was entered.
+				 *
+				 * Furthermore, when changing the contents of
+				 * PT_AR_BSP (or PT_CFM) we MUST copy any
+				 * users-level stacked registers that are
+				 * stored on the kernel stack back to
+				 * user-space because otherwise, we might end
+				 * up clobbering kernel stacked registers.
+				 * Also, if this happens while the task is
+				 * blocked in a system call, which convert the
+				 * state such that the non-system-call exit
+				 * path is used.  This ensures that the proper
+				 * state will be picked up when resuming
+				 * execution.  However, it *also* means that
+				 * once we write PT_AR_BSP/PT_CFM, it won't be
+				 * possible to modify the syscall arguments of
+				 * the pending system call any longer.  This
+				 * shouldn't be an issue because modifying
+				 * PT_AR_BSP/PT_CFM generally implies that
+				 * we're either abandoning the pending system
+				 * call or that we defer it's re-execution
+				 * (e.g., due to GDB doing an inferior
+				 * function call).
+				 */
+				urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
+				if (write_access) {
+					if (*data != urbs_end) {
+						if (ia64_sync_user_rbs(target, info->sw,
+									pt->ar_bspstore,
+									urbs_end) < 0)
+							return -1;
+						if (in_syscall(pt))
+							convert_to_non_syscall(target,
+									pt,
+									cfm);
+						/*
+						 * Simulate user-level write
+						 * of ar.bsp:
+						 */
+						pt->loadrs = 0;
+						pt->ar_bspstore = *data;
+					}
+				} else
+					*data = urbs_end;
+				return 0;
+			case ELF_AR_BSPSTORE_OFFSET: // ar_bsp_store
+				ptr = &pt->ar_bspstore;
+				break;
+			case ELF_AR_RNAT_OFFSET:  // ar_rnat
+				urbs_end = ia64_get_user_rbs_end(target, pt, NULL);
+				rnat_addr = (long) ia64_rse_rnat_addr((long *)
+						urbs_end);
+				if (write_access)
+					return ia64_poke(target, info->sw, urbs_end,
+							rnat_addr, *data);
+				else
+					return ia64_peek(target, info->sw, urbs_end,
+							rnat_addr, data);
+			case ELF_AR_CCV_OFFSET:   // ar_ccv
+				ptr = &pt->ar_ccv;
+				break;
+			case ELF_AR_UNAT_OFFSET:	// ar_unat
+				ptr = &pt->ar_unat;
+				break;
+			case ELF_AR_FPSR_OFFSET:  // ar_fpsr
+				ptr = &pt->ar_fpsr;
+				break;
+			case ELF_AR_PFS_OFFSET:  // ar_pfs
+				ptr = &pt->ar_pfs;
+				break;
+			case ELF_AR_LC_OFFSET:   // ar_lc
+				return unw_access_ar(info, UNW_AR_LC, data,
+						write_access);
+			case ELF_AR_EC_OFFSET:    // ar_ec
+				return unw_access_ar(info, UNW_AR_EC, data,
+						write_access);
+			case ELF_AR_CSD_OFFSET:   // ar_csd
+				ptr = &pt->ar_csd;
+				break;
+			case ELF_AR_SSD_OFFSET:   // ar_ssd
+				ptr = &pt->ar_ssd;
+		}
+	} else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
+		switch (addr) {
+			case ELF_CR_IIP_OFFSET:
+				ptr = &pt->cr_iip;
+				break;
+			case ELF_CFM_OFFSET:
+				urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
+				if (write_access) {
+					if (((cfm ^ *data) & PFM_MASK) != 0) {
+						if (ia64_sync_user_rbs(target, info->sw,
+									pt->ar_bspstore,
+									urbs_end) < 0)
+							return -1;
+						if (in_syscall(pt))
+							convert_to_non_syscall(target,
+									pt,
+									cfm);
+						pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
+								| (*data & PFM_MASK));
+					}
+				} else
+					*data = cfm;
+				return 0;
+			case ELF_CR_IPSR_OFFSET:
+				if (write_access)
+					pt->cr_ipsr = ((*data & IPSR_MASK)
+							| (pt->cr_ipsr & ~IPSR_MASK));
+				else
+					*data = (pt->cr_ipsr & IPSR_MASK);
+				return 0;
+		}
+	} else if (addr == ELF_NAT_OFFSET)
+			return access_nat_bits(target, pt, info,
+					data, write_access);
+	else if (addr == ELF_PR_OFFSET)
+			ptr = &pt->pr;
+	else
+		return -1;
 
-	retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
-	retval |= __put_user(psr, &ppr->cr_ipsr);
+	if (write_access)
+		*ptr = *data;
+	else
+		*data = *ptr;
 
-	/* app regs */
+	return 0;
+}
 
-	retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
-	retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
-	retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
-	retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
-	retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
-	retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
+static int
+access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
+		unsigned long addr, unsigned long *data, int write_access)
+{
+	if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
+		return access_elf_gpreg(target, info, addr, data, write_access);
+	else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
+		return access_elf_breg(target, info, addr, data, write_access);
+	else
+		return access_elf_areg(target, info, addr, data, write_access);
+}
 
-	retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
-	retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
-	retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
-	retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
-	retval |= __put_user(cfm, &ppr->cfm);
+void do_gpregs_get(struct unw_frame_info *info, void *arg)
+{
+	struct pt_regs *pt;
+	utrace_getset_t *dst = arg;
+	elf_greg_t tmp[16];
+	unsigned int i, index, min_copy;
 
-	/* gr1-gr3 */
+	if (unw_unwind_to_user(info) < 0)
+		return;
 
-	retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
-	retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
+	/*
+	 * coredump format:
+	 *      r0-r31
+	 *      NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
+	 *      predicate registers (p0-p63)
+	 *      b0-b7
+	 *      ip cfm user-mask
+	 *      ar.rsc ar.bsp ar.bspstore ar.rnat
+	 *      ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
+	 */
 
-	/* gr4-gr7 */
 
-	for (i = 4; i < 8; i++) {
-		if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
-			return -EIO;
-		retval |= __put_user(val, &ppr->gr[i]);
+	/* Skip r0 */
+	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
+		dst->ret = utrace_regset_copyout_zero(&dst->pos, &dst->count,
+						      &dst->u.get.kbuf,
+						      &dst->u.get.ubuf,
+						      0, ELF_GR_OFFSET(1));
+		if (dst->ret || dst->count == 0)
+			return;
 	}
 
-	/* gr8-gr11 */
-
-	retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
-
-	/* gr12-gr15 */
-
-	retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
-	retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
-	retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
-
-	/* gr16-gr31 */
-
-	retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
-
-	/* b0 */
-
-	retval |= __put_user(pt->b0, &ppr->br[0]);
-
-	/* b1-b5 */
-
-	for (i = 1; i < 6; i++) {
-		if (unw_access_br(&info, i, &val, 0) < 0)
-			return -EIO;
-		__put_user(val, &ppr->br[i]);
+	/* gr1 - gr15 */
+	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
+		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
+		min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
+			 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
+		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), index++)
+			if (access_elf_reg(dst->target, info, i,
+						&tmp[index], 0) < 0) {
+				dst->ret = -EIO;
+				return;
+			}
+		dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
+				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
+		if (dst->ret || dst->count == 0)
+			return;
 	}
 
-	/* b6-b7 */
-
-	retval |= __put_user(pt->b6, &ppr->br[6]);
-	retval |= __put_user(pt->b7, &ppr->br[7]);
-
-	/* fr2-fr5 */
-
-	for (i = 2; i < 6; i++) {
-		if (unw_get_fr(&info, i, &fpval) < 0)
-			return -EIO;
-		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
+	/* r16-r31 */
+	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
+		pt = task_pt_regs(dst->target);
+		dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
+				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
+		if (dst->ret || dst->count == 0)
+			return;
 	}
 
-	/* fr6-fr11 */
-
-	retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
-				 sizeof(struct ia64_fpreg) * 6);
-
-	/* fp scratch regs(12-15) */
-
-	retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
-				 sizeof(struct ia64_fpreg) * 4);
-
-	/* fr16-fr31 */
-
-	for (i = 16; i < 32; i++) {
-		if (unw_get_fr(&info, i, &fpval) < 0)
-			return -EIO;
-		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
+	/* nat, pr, b0 - b7 */
+	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
+		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
+		min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
+			 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
+		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), index++)
+			if (access_elf_reg(dst->target, info, i,
+						&tmp[index], 0) < 0) {
+				dst->ret = -EIO;
+				return;
+			}
+		dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
+				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
+		if (dst->ret || dst->count == 0)
+			return;
 	}
 
-	/* fph */
-
-	ia64_flush_fph(child);
-	retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
-				 sizeof(ppr->fr[32]) * 96);
-
-	/*  preds */
-
-	retval |= __put_user(pt->pr, &ppr->pr);
-
-	/* nat bits */
-
-	retval |= __put_user(nat_bits, &ppr->nat);
-
-	ret = retval ? -EIO : 0;
-	return ret;
+ 	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
+	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
+	 */
+	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
+		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
+		min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
+			 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
+		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), index++)
+			if (access_elf_reg(dst->target, info, i,
+						&tmp[index], 0) < 0) {
+				dst->ret = -EIO;
+				return;
+			}
+		dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
+				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
+        }
 }
 
-static long
-ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
+void do_gpregs_set(struct unw_frame_info *info, void *arg)
 {
-	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
-	struct unw_frame_info info;
-	struct switch_stack *sw;
-	struct ia64_fpreg fpval;
 	struct pt_regs *pt;
-	long ret, retval = 0;
-	int i;
+	utrace_getset_t *dst = arg;
+	elf_greg_t tmp[16];
+	unsigned int i, index;
 
-	memset(&fpval, 0, sizeof(fpval));
-
-	if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
-		return -EIO;
+	if (unw_unwind_to_user(info) < 0)
+		return;
 
-	pt = task_pt_regs(child);
-	sw = (struct switch_stack *) (child->thread.ksp + 16);
-	unw_init_from_blocked_task(&info, child);
-	if (unw_unwind_to_user(&info) < 0) {
-		return -EIO;
+	/* Skip r0 */
+	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
+		dst->ret = utrace_regset_copyin_ignore(&dst->pos, &dst->count,
+						       &dst->u.set.kbuf,
+						       &dst->u.set.ubuf,
+						       0, ELF_GR_OFFSET(1));
+		if (dst->ret || dst->count == 0)
+			return;
 	}
 
-	if (((unsigned long) ppr & 0x7) != 0) {
-		dprintk("ptrace:unaligned register address %p\n", ppr);
-		return -EIO;
+	/* gr1-gr15 */
+	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
+		i = dst->pos;
+		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
+		dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
+				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
+		if (dst->ret)
+			return;
+		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
+			if (access_elf_reg(dst->target, info, i,
+						&tmp[index], 1) < 0) {
+				dst->ret = -EIO;
+				return;
+			}
+		if (dst->count == 0)
+			return;
 	}
 
-	/* control regs */
-
-	retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
-	retval |= __get_user(psr, &ppr->cr_ipsr);
-
-	/* app regs */
-
-	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
-	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
-	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
-	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
-	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
-	retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
-
-	retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
-	retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
-	retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
-	retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
-	retval |= __get_user(cfm, &ppr->cfm);
-
-	/* gr1-gr3 */
-
-	retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
-	retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
-
-	/* gr4-gr7 */
-
-	for (i = 4; i < 8; i++) {
-		retval |= __get_user(val, &ppr->gr[i]);
-		/* NaT bit will be set via PT_NAT_BITS: */
-		if (unw_set_gr(&info, i, val, 0) < 0)
-			return -EIO;
+	/* gr16-gr31 */
+	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
+		pt = task_pt_regs(dst->target);
+		dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
+				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
+		if (dst->ret || dst->count == 0)
+			return;
 	}
 
-	/* gr8-gr11 */
-
-	retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
-
-	/* gr12-gr15 */
-
-	retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
-	retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
-	retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
-
-	/* gr16-gr31 */
+	/* nat, pr, b0 - b7 */
+	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
+		i = dst->pos;
+		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
+		dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
+				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
+		if (dst->ret)
+			return;
+		for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
+			if (access_elf_reg(dst->target, info, i,
+						&tmp[index], 1) < 0) {
+				dst->ret = -EIO;
+				return;
+			}
+		if (dst->count == 0)
+			return;
+	}
 
-	retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
+ 	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
+	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
+	 */
+	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
+		i = dst->pos;
+		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
+		dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
+				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
+		if (dst->ret)
+			return;
+		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
+			if (access_elf_reg(dst->target, info, i,
+						&tmp[index], 1) < 0) {
+				dst->ret = -EIO;
+				return;
+			}
+	}
+}
 
-	/* b0 */
+#define ELF_FP_OFFSET(i)	(i * sizeof(elf_fpreg_t))
 
-	retval |= __get_user(pt->b0, &ppr->br[0]);
+void do_fpregs_get(struct unw_frame_info *info, void *arg)
+{
+	utrace_getset_t *dst = arg;
+	struct task_struct *task = dst->target;
+	elf_fpreg_t tmp[30];
+	int index, min_copy, i;
 
-	/* b1-b5 */
+	if (unw_unwind_to_user(info) < 0)
+		return;
 
-	for (i = 1; i < 6; i++) {
-		retval |= __get_user(val, &ppr->br[i]);
-		unw_set_br(&info, i, val);
+	/* Skip pos 0 and 1 */
+	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
+		dst->ret = utrace_regset_copyout_zero(&dst->pos, &dst->count,
+						      &dst->u.get.kbuf,
+						      &dst->u.get.ubuf,
+						      0, ELF_FP_OFFSET(2));
+		if (dst->count == 0 || dst->ret)
+			return;
 	}
 
-	/* b6-b7 */
-
-	retval |= __get_user(pt->b6, &ppr->br[6]);
-	retval |= __get_user(pt->b7, &ppr->br[7]);
-
-	/* fr2-fr5 */
-
-	for (i = 2; i < 6; i++) {
-		retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
-		if (unw_set_fr(&info, i, fpval) < 0)
-			return -EIO;
+	/* fr2-fr31 */
+	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
+		index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
+		min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
+				dst->pos + dst->count);
+		for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t), index++)
+			if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
+					 &tmp[index])) {
+				dst->ret = -EIO;
+				return;
+			}
+		dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
+				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
+		if (dst->count == 0 || dst->ret)
+			return;
 	}
 
-	/* fr6-fr11 */
+	/* fph */
+	if (dst->count > 0) {
+		ia64_flush_fph(dst->target);
+		if (task->thread.flags & IA64_THREAD_FPH_VALID)
+			dst->ret = utrace_regset_copyout(
+				&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf,
+				&dst->target->thread.fph,
+				ELF_FP_OFFSET(32), -1);
+		else
+			/* Zero fill instead.  */
+			dst->ret = utrace_regset_copyout_zero(
+				&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf,
+				ELF_FP_OFFSET(32), -1);
+	}
+}
 
-	retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
-				   sizeof(ppr->fr[6]) * 6);
+void do_fpregs_set(struct unw_frame_info *info, void *arg)
+{
+	utrace_getset_t *dst = arg;
+	elf_fpreg_t fpreg, tmp[30];
+	int index, start, end;
 
-	/* fp scratch regs(12-15) */
+	if (unw_unwind_to_user(info) < 0)
+		return;
 
-	retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
-				   sizeof(ppr->fr[12]) * 4);
+	/* Skip pos 0 and 1 */
+	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
+		dst->ret = utrace_regset_copyin_ignore(&dst->pos, &dst->count,
+						       &dst->u.set.kbuf,
+						       &dst->u.set.ubuf,
+						       0, ELF_FP_OFFSET(2));
+		if (dst->count == 0 || dst->ret)
+			return;
+	}
 
-	/* fr16-fr31 */
+	/* fr2-fr31 */
+	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
+		start = dst->pos;
+		end = min(((unsigned int)ELF_FP_OFFSET(32)),
+			 dst->pos + dst->count);
+		dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
+				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
+		if (dst->ret)
+			return;
 
-	for (i = 16; i < 32; i++) {
-		retval |= __copy_from_user(&fpval, &ppr->fr[i],
-					   sizeof(fpval));
-		if (unw_set_fr(&info, i, fpval) < 0)
-			return -EIO;
+		if (start & 0xF) { //  only write high part
+			if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
+					 &fpreg)) {
+				dst->ret = -EIO;
+				return;
+			}
+			tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
+				= fpreg.u.bits[0];
+			start &= ~0xFUL;
+		}
+		if (end & 0xF) { // only write low part
+			if (unw_get_fr(info, end / sizeof(elf_fpreg_t), &fpreg)) {
+				dst->ret = -EIO;
+				return;
+                       	}
+			tmp[end / sizeof(elf_fpreg_t) -2].u.bits[1]
+				= fpreg.u.bits[1];
+			end = (end + 0xF) & ~0xFUL;
+		}
+
+		for ( ;	start < end ; start += sizeof(elf_fpreg_t)) {
+			index = start / sizeof(elf_fpreg_t);
+			if (unw_set_fr(info, index, tmp[index - 2])){
+				dst->ret = -EIO;
+				return;
+			}
+		}
+		if (dst->ret || dst->count == 0)
+			return;
 	}
 
 	/* fph */
+	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
+		ia64_sync_fph(dst->target);
+		dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
+						&dst->u.set.kbuf,
+						&dst->u.set.ubuf,
+						&dst->target->thread.fph,
+						ELF_FP_OFFSET(32), -1);
+	}
+}
 
-	ia64_sync_fph(child);
-	retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
-				   sizeof(ppr->fr[32]) * 96);
+static int
+do_regset_call(void (*call)(struct unw_frame_info *, void *),
+	       struct task_struct *target,
+	       const struct utrace_regset *regset,
+	       unsigned int pos, unsigned int count,
+	       const void *kbuf, const void __user *ubuf)
+{
+	utrace_getset_t info = { .target = target, .regset = regset,
+				 .pos = pos, .count = count,
+				 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
+				 .ret = 0 };
 
-	/* preds */
+	if (target == current)
+		unw_init_running(call, &info);
+	else {
+		struct unw_frame_info ufi;
+		memset(&ufi, 0, sizeof(ufi));
+		unw_init_from_blocked_task(&ufi, target);
+		(*call)(&ufi, &info);
+	}
 
-	retval |= __get_user(pt->pr, &ppr->pr);
+	return info.ret;
+}
 
-	/* nat bits */
+static int
+gpregs_get(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   void *kbuf, void __user *ubuf)
+{
+	return do_regset_call(do_gpregs_get, target, regset, pos, count, kbuf, ubuf);
+}
 
-	retval |= __get_user(nat_bits, &ppr->nat);
+static int gpregs_set(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	return do_regset_call(do_gpregs_set, target, regset, pos, count, kbuf, ubuf);
+}
 
-	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
-	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
-	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
-	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
-	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
-	retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
-	retval |= access_uarea(child, PT_CFM, &cfm, 1);
-	retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
+static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
+{
+	struct pt_regs *pt;
+	utrace_getset_t *dst = arg;
+	unsigned long urbs_end;
 
-	ret = retval ? -EIO : 0;
-	return ret;
+	if (unw_unwind_to_user(info) < 0)
+		return;
+	pt = task_pt_regs(dst->target);
+	urbs_end = ia64_get_user_rbs_end(dst->target, pt, NULL);
+	dst->ret = ia64_sync_user_rbs(dst->target, info->sw, pt->ar_bspstore, urbs_end);
 }
-
 /*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure the single step bit is not set.
+ * This is called to write back the register backing store.
+ * ptrace does this before it stops, so that a tracer reading the user
+ * memory after the thread stops will get the current register data.
  */
-void
-ptrace_disable (struct task_struct *child)
+static int
+gpregs_writeback(struct task_struct *target,
+		 const struct utrace_regset *regset,
+		 int now)
 {
-	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
-
-	/* make sure the single step/taken-branch trap bits are not set: */
-	child_psr->ss = 0;
-	child_psr->tb = 0;
+	return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, NULL, NULL);
 }
 
-asmlinkage long
-sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
+static int
+fpregs_active(struct task_struct *target, const struct utrace_regset *regset)
 {
-	struct pt_regs *pt;
-	unsigned long urbs_end, peek_or_poke;
-	struct task_struct *child;
-	struct switch_stack *sw;
-	long ret;
-
-	lock_kernel();
-	ret = -EPERM;
-	if (request == PTRACE_TRACEME) {
-		ret = ptrace_traceme();
-		goto out;
-	}
-
-	peek_or_poke = (request == PTRACE_PEEKTEXT
-			|| request == PTRACE_PEEKDATA
-			|| request == PTRACE_POKETEXT
-			|| request == PTRACE_POKEDATA);
-	ret = -ESRCH;
-	read_lock(&tasklist_lock);
-	{
-		child = find_task_by_pid(pid);
-		if (child) {
-			if (peek_or_poke)
-				child = find_thread_for_addr(child, addr);
-			get_task_struct(child);
-		}
-	}
-	read_unlock(&tasklist_lock);
-	if (!child)
-		goto out;
-	ret = -EPERM;
-	if (pid == 1)		/* no messing around with init! */
-		goto out_tsk;
-
-	if (request == PTRACE_ATTACH) {
-		ret = ptrace_attach(child);
-		goto out_tsk;
-	}
-
-	ret = ptrace_check_attach(child, request == PTRACE_KILL);
-	if (ret < 0)
-		goto out_tsk;
-
-	pt = task_pt_regs(child);
-	sw = (struct switch_stack *) (child->thread.ksp + 16);
-
-	switch (request) {
-	      case PTRACE_PEEKTEXT:
-	      case PTRACE_PEEKDATA:
-		/* read word at location addr */
-		urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
-		ret = ia64_peek(child, sw, urbs_end, addr, &data);
-		if (ret == 0) {
-			ret = data;
-			/* ensure "ret" is not mistaken as an error code: */
-			force_successful_syscall_return();
-		}
-		goto out_tsk;
-
-	      case PTRACE_POKETEXT:
-	      case PTRACE_POKEDATA:
-		/* write the word at location addr */
-		urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
-		ret = ia64_poke(child, sw, urbs_end, addr, data);
-		goto out_tsk;
-
-	      case PTRACE_PEEKUSR:
-		/* read the word at addr in the USER area */
-		if (access_uarea(child, addr, &data, 0) < 0) {
-			ret = -EIO;
-			goto out_tsk;
-		}
-		ret = data;
-		/* ensure "ret" is not mistaken as an error code */
-		force_successful_syscall_return();
-		goto out_tsk;
-
-	      case PTRACE_POKEUSR:
-		/* write the word at addr in the USER area */
-		if (access_uarea(child, addr, &data, 1) < 0) {
-			ret = -EIO;
-			goto out_tsk;
-		}
-		ret = 0;
-		goto out_tsk;
-
-	      case PTRACE_OLD_GETSIGINFO:
-		/* for backwards-compatibility */
-		ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
-		goto out_tsk;
-
-	      case PTRACE_OLD_SETSIGINFO:
-		/* for backwards-compatibility */
-		ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
-		goto out_tsk;
-
-	      case PTRACE_SYSCALL:
-		/* continue and stop at next (return from) syscall */
-	      case PTRACE_CONT:
-		/* restart after signal. */
-		ret = -EIO;
-		if (!valid_signal(data))
-			goto out_tsk;
-		if (request == PTRACE_SYSCALL)
-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		else
-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		child->exit_code = data;
-
-		/*
-		 * Make sure the single step/taken-branch trap bits
-		 * are not set:
-		 */
-		ia64_psr(pt)->ss = 0;
-		ia64_psr(pt)->tb = 0;
-
-		wake_up_process(child);
-		ret = 0;
-		goto out_tsk;
-
-	      case PTRACE_KILL:
-		/*
-		 * Make the child exit.  Best I can do is send it a
-		 * sigkill.  Perhaps it should be put in the status
-		 * that it wants to exit.
-		 */
-		if (child->exit_state == EXIT_ZOMBIE)
-			/* already dead */
-			goto out_tsk;
-		child->exit_code = SIGKILL;
-
-		ptrace_disable(child);
-		wake_up_process(child);
-		ret = 0;
-		goto out_tsk;
-
-	      case PTRACE_SINGLESTEP:
-		/* let child execute for one instruction */
-	      case PTRACE_SINGLEBLOCK:
-		ret = -EIO;
-		if (!valid_signal(data))
-			goto out_tsk;
-
-		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		if (request == PTRACE_SINGLESTEP) {
-			ia64_psr(pt)->ss = 1;
-		} else {
-			ia64_psr(pt)->tb = 1;
-		}
-		child->exit_code = data;
+	return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
+}
 
-		/* give it a chance to run. */
-		wake_up_process(child);
-		ret = 0;
-		goto out_tsk;
-
-	      case PTRACE_DETACH:
-		/* detach a process that was attached. */
-		ret = ptrace_detach(child, data);
-		goto out_tsk;
-
-	      case PTRACE_GETREGS:
-		ret = ptrace_getregs(child,
-				     (struct pt_all_user_regs __user *) data);
-		goto out_tsk;
-
-	      case PTRACE_SETREGS:
-		ret = ptrace_setregs(child,
-				     (struct pt_all_user_regs __user *) data);
-		goto out_tsk;
-
-	      default:
-		ret = ptrace_request(child, request, addr, data);
-		goto out_tsk;
-	}
-  out_tsk:
-	put_task_struct(child);
-  out:
-	unlock_kernel();
-	return ret;
+static int fpregs_get(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		void *kbuf, void __user *ubuf)
+{
+	return do_regset_call(do_fpregs_get, target, regset, pos, count, kbuf, ubuf);
 }
 
+static int fpregs_set(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	return do_regset_call(do_fpregs_set, target, regset, pos, count, kbuf, ubuf);
+}
 
-void
-syscall_trace (void)
+static int dbregs_get(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		void *kbuf, void __user *ubuf)
 {
-	if (!test_thread_flag(TIF_SYSCALL_TRACE))
-		return;
-	if (!(current->ptrace & PT_PTRACED))
-		return;
-	/*
-	 * The 0x80 provides a way for the tracing parent to
-	 * distinguish between a syscall stop and SIGTRAP delivery.
-	 */
-	ptrace_notify(SIGTRAP
-		      | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
+	int ret;
 
+#ifdef CONFIG_PERFMON
 	/*
-	 * This isn't the same as continuing with a signal, but it
-	 * will do for normal use.  strace only continues with a
-	 * signal if the stopping signal is not SIGTRAP.  -brl
+	 * Check if debug registers are used by perfmon. This
+	 * test must be done once we know that we can do the
+	 * operation, i.e. the arguments are all valid, but
+	 * before we start modifying the state.
+	 *
+	 * Perfmon needs to keep a count of how many processes
+	 * are trying to modify the debug registers for system
+	 * wide monitoring sessions.
+	 *
+	 * We also include read access here, because they may
+	 * cause the PMU-installed debug register state
+	 * (dbr[], ibr[]) to be reset. The two arrays are also
+	 * used by perfmon, but we do not use
+	 * IA64_THREAD_DBG_VALID. The registers are restored
+	 * by the PMU context switch code.
 	 */
-	if (current->exit_code) {
-		send_sig(current->exit_code, current, 1);
-		current->exit_code = 0;
+	if (pfm_use_debug_registers(target))
+		return -EIO;
+#endif
+
+	if (!(target->thread.flags & IA64_THREAD_DBG_VALID))
+		ret = utrace_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+						 0, -1);
+	else {
+		preempt_disable();
+		if (target == current)
+			ia64_load_debug_regs(&target->thread.dbr[0]);
+		preempt_enable_no_resched();
+		ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    &target->thread.dbr, 0, -1);
 	}
-}
 
-/* "asmlinkage" so the input arguments are preserved... */
+	return ret;
+}
 
-asmlinkage void
-syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
-		     long arg4, long arg5, long arg6, long arg7,
-		     struct pt_regs regs)
+static int dbregs_set(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
 {
-	if (test_thread_flag(TIF_SYSCALL_TRACE) 
-	    && (current->ptrace & PT_PTRACED))
-		syscall_trace();
-
-	if (unlikely(current->audit_context)) {
-		long syscall;
-		int arch;
+	int i, ret;
 
-		if (IS_IA32_PROCESS(&regs)) {
-			syscall = regs.r1;
-			arch = AUDIT_ARCH_I386;
-		} else {
-			syscall = regs.r15;
-			arch = AUDIT_ARCH_IA64;
-		}
+#ifdef CONFIG_PERFMON
+	if (pfm_use_debug_registers(target))
+		return -EIO;
+#endif
 
-		audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
+	ret = 0;
+	if (!(target->thread.flags & IA64_THREAD_DBG_VALID)){
+		target->thread.flags |= IA64_THREAD_DBG_VALID;
+		memset(target->thread.dbr, 0, 2 * sizeof(target->thread.dbr));
+	} else if (target == current){
+		preempt_disable();
+		ia64_save_debug_regs(&target->thread.dbr[0]);
+		preempt_enable_no_resched();
 	}
 
-}
+	ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				   &target->thread.dbr, 0, -1);
 
-/* "asmlinkage" so the input arguments are preserved... */
+	for (i = 1; i < IA64_NUM_DBG_REGS; i += 2) {
+		target->thread.dbr[i] &= ~(7UL << 56);
+		target->thread.ibr[i] &= ~(7UL << 56);
+	}
 
-asmlinkage void
-syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
-		     long arg4, long arg5, long arg6, long arg7,
-		     struct pt_regs regs)
-{
-	if (unlikely(current->audit_context)) {
-		int success = AUDITSC_RESULT(regs.r10);
-		long result = regs.r8;
+	if (ret)
+		return ret;
 
-		if (success != AUDITSC_SUCCESS)
-			result = -result;
-		audit_syscall_exit(success, result);
+	if (target == current){
+		preempt_disable();
+		ia64_load_debug_regs(&target->thread.dbr[0]);
+		preempt_enable_no_resched();
 	}
+	return 0;
+}
 
-	if (test_thread_flag(TIF_SYSCALL_TRACE)
-	    && (current->ptrace & PT_PTRACED))
-		syscall_trace();
+static const struct utrace_regset native_regsets[] = {
+	{
+		.n = ELF_NGREG,
+		.size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
+		.get = gpregs_get, .set = gpregs_set,
+		.writeback = gpregs_writeback
+	},
+	{
+		.n = ELF_NFPREG,
+		.size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
+		.get = fpregs_get, .set = fpregs_set, .active = fpregs_active
+	},
+	{
+		.n = 2 * IA64_NUM_DBG_REGS, .size = sizeof(long),
+		.align = sizeof(long),
+		.get = dbregs_get, .set = dbregs_set
+	}
+};
+
+const struct utrace_regset_view utrace_ia64_native = {
+	.name = "ia64",
+	.e_machine = EM_IA_64,
+	.regsets = native_regsets,
+	.n = sizeof native_regsets / sizeof native_regsets[0],
+};
+EXPORT_SYMBOL_GPL(utrace_ia64_native);
+
+#endif	/* CONFIG_UTRACE */
+
+
+#ifdef CONFIG_PTRACE
+
+#define WORD(member, num) \
+	offsetof(struct pt_all_user_regs, member), \
+	offsetof(struct pt_all_user_regs, member) + num * sizeof(long)
+static const struct ptrace_layout_segment pt_all_user_regs_layout[] = {
+	{WORD(nat, 1),			0,	ELF_NAT_OFFSET},
+	{WORD(cr_iip, 1),		0,	ELF_CR_IIP_OFFSET},
+	{WORD(cfm, 1),			0,	ELF_CFM_OFFSET},
+	{WORD(cr_ipsr, 1),		0,	ELF_CR_IPSR_OFFSET},
+	{WORD(pr, 1),			0,	ELF_PR_OFFSET},
+	{WORD(gr[0], 32),		0,	ELF_GR_OFFSET(0)},
+	{WORD(br[0], 8),		0, 	ELF_BR_OFFSET(0)},
+	{WORD(ar[0], 16),		-1,	0},
+	{WORD(ar[PT_AUR_RSC], 4),	0,	ELF_AR_RSC_OFFSET},
+	{WORD(ar[PT_AUR_RNAT+1], 12),	-1,	0},
+	{WORD(ar[PT_AUR_CCV], 1),	0,	ELF_AR_CCV_OFFSET},
+	{WORD(ar[PT_AUR_CCV+1], 3),	-1,	0},
+	{WORD(ar[PT_AUR_UNAT], 1), 	0,	ELF_AR_UNAT_OFFSET},
+	{WORD(ar[PT_AUR_UNAT+1], 3),	-1,	0},
+	{WORD(ar[PT_AUR_FPSR], 1), 	0,	ELF_AR_FPSR_OFFSET},
+	{WORD(ar[PT_AUR_FPSR+1], 24), 	-1,	0},
+	{WORD(ar[PT_AUR_PFS], 3),  	0,	ELF_AR_PFS_OFFSET},
+	{WORD(ar[PT_AUR_EC+1], 62),	-1,	0},
+	{offsetof(struct pt_all_user_regs, fr[0]),
+	 offsetof(struct pt_all_user_regs, fr[128]),
+	 1, 0},
+	{0, 0, -1, 0}
+};
+#undef WORD
+
+#define NEXT(addr, sum)	(addr + sum * sizeof(long))
+static const struct ptrace_layout_segment pt_uarea_layout[] = {
+	{PT_F32,	PT_NAT_BITS,		1,	ELF_FP_OFFSET(32)},
+	{PT_NAT_BITS,	NEXT(PT_NAT_BITS, 1),	0,	ELF_NAT_OFFSET},
+	{PT_F2, 	PT_F10,			1,	ELF_FP_OFFSET(2)},
+	{PT_F10, 	PT_R4, 			1,	ELF_FP_OFFSET(10)},
+	{PT_R4, 	PT_B1, 			0,	ELF_GR_OFFSET(4)},
+	{PT_B1, 	PT_AR_EC, 		0,	ELF_BR_OFFSET(1)},
+	{PT_AR_EC, 	PT_AR_LC,	 	0,	ELF_AR_EC_OFFSET},
+	{PT_AR_LC, 	NEXT(PT_AR_LC, 1), 	0,	ELF_AR_LC_OFFSET},
+	{PT_CR_IPSR,	PT_CR_IIP,		0,	ELF_CR_IPSR_OFFSET},
+	{PT_CR_IIP,	PT_AR_UNAT,		0,	ELF_CR_IIP_OFFSET},
+	{PT_AR_UNAT,	PT_AR_PFS,		0, 	ELF_AR_UNAT_OFFSET},
+	{PT_AR_PFS,	PT_AR_RSC,		0,	ELF_AR_PFS_OFFSET},
+	{PT_AR_RSC,	PT_AR_RNAT,		0,	ELF_AR_RSC_OFFSET},
+	{PT_AR_RNAT,	PT_AR_BSPSTORE,		0,	ELF_AR_RNAT_OFFSET},
+	{PT_AR_BSPSTORE,PT_PR,			0,	ELF_AR_BSPSTORE_OFFSET},
+	{PT_PR,		PT_B6,			0,	ELF_PR_OFFSET},
+	{PT_B6,		PT_AR_BSP,		0,	ELF_BR_OFFSET(6)},
+	{PT_AR_BSP,	PT_R1,			0,	ELF_AR_BSP_OFFSET},
+	{PT_R1,		PT_R12,			0,	ELF_GR_OFFSET(1)},
+	{PT_R12,	PT_R8,			0,	ELF_GR_OFFSET(12)},
+	{PT_R8,		PT_R16,			0,	ELF_GR_OFFSET(8)},
+	{PT_R16,	PT_AR_CCV,		0,	ELF_GR_OFFSET(16)},
+	{PT_AR_CCV,	PT_AR_FPSR,		0,	ELF_AR_CCV_OFFSET},
+	{PT_AR_FPSR,	PT_B0,			0,	ELF_AR_FPSR_OFFSET},
+	{PT_B0,		PT_B7,			0,	ELF_BR_OFFSET(0)},
+	{PT_B7,		PT_F6,			0,	ELF_BR_OFFSET(7)},
+	{PT_F6,		PT_AR_CSD,		1,	ELF_FP_OFFSET(6)},
+	{PT_AR_CSD,	NEXT(PT_AR_CSD, 2),	0,	ELF_AR_CSD_OFFSET},
+	{PT_DBR,	NEXT(PT_DBR, 8), 	2,	0},
+	{PT_IBR,	NEXT(PT_IBR, 8),	2,	8 * sizeof(long)},
+	{0, 0, -1, 0}
+};
+#undef NEXT
+
+fastcall int arch_ptrace(long *request, struct task_struct *child,
+			 struct utrace_attached_engine *engine,
+			 unsigned long addr, unsigned long data, long *val)
+{
+	int ret = -ENOSYS;
+	switch (*request) {
+	case PTRACE_OLD_GETSIGINFO:
+		*request = PTRACE_GETSIGINFO;
+		break;
+	case PTRACE_OLD_SETSIGINFO:
+		*request = PTRACE_SETSIGINFO;
+		break;
+
+	case PTRACE_PEEKTEXT: /* read word at location addr. */
+	case PTRACE_PEEKDATA:
+		ret = access_process_vm(child, addr, val, sizeof(*val), 0);
+		ret = ret == sizeof(*val) ? 0 : -EIO;
+		break;
+
+	case PTRACE_PEEKUSR:
+		return ptrace_layout_access(child, engine,
+					    utrace_native_view(current),
+					    pt_uarea_layout,
+					    addr, sizeof(long),
+					    NULL, val, 0);
+	case PTRACE_POKEUSR:
+		return ptrace_pokeusr(child, engine,
+				      pt_uarea_layout, addr, data);
+
+	case PTRACE_GETREGS:
+	case PTRACE_SETREGS:
+		return ptrace_layout_access(child, engine,
+					    utrace_native_view(current),
+					    pt_all_user_regs_layout,
+					    0, sizeof(struct pt_all_user_regs),
+					    (void __user *) data, NULL,
+					    *request == PTRACE_SETREGS);
+	}
+	return ret;
 }
+
+#endif	/* CONFIG_PTRACE */
--- linux-2.6/arch/ia64/kernel/asm-offsets.c.utrace-ptrace-compat
+++ linux-2.6/arch/ia64/kernel/asm-offsets.c
@@ -44,7 +44,7 @@ void foo(void)
 	DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
 	DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending));
 	DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid));
-	DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent));
+	DEFINE(IA64_TASK_PARENT_OFFSET, offsetof (struct task_struct, parent));
 	DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct, sighand));
 	DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal));
 	DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
--- linux-2.6/arch/ia64/ia32/ia32_entry.S.utrace-ptrace-compat
+++ linux-2.6/arch/ia64/ia32/ia32_entry.S
@@ -236,7 +236,7 @@ ia32_syscall_table:
 	data8 sys_setuid	/* 16-bit version */
 	data8 sys_getuid	/* 16-bit version */
 	data8 compat_sys_stime    /* 25 */
-	data8 sys32_ptrace
+	data8 compat_sys_ptrace
 	data8 sys32_alarm
 	data8 sys_ni_syscall
 	data8 sys32_pause
--- linux-2.6/arch/ia64/ia32/sys_ia32.c.utrace-ptrace-compat
+++ linux-2.6/arch/ia64/ia32/sys_ia32.c
@@ -44,6 +44,7 @@
 #include <linux/eventpoll.h>
 #include <linux/personality.h>
 #include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/stat.h>
 #include <linux/ipc.h>
 #include <linux/capability.h>
@@ -1419,25 +1420,6 @@ sys32_waitpid (int pid, unsigned int *st
 	return compat_sys_wait4(pid, stat_addr, options, NULL);
 }
 
-static unsigned int
-ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
-{
-	size_t copied;
-	unsigned int ret;
-
-	copied = access_process_vm(child, addr, val, sizeof(*val), 0);
-	return (copied != sizeof(ret)) ? -EIO : 0;
-}
-
-static unsigned int
-ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
-{
-
-	if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
-		return -EIO;
-	return 0;
-}
-
 /*
  *  The order in which registers are stored in the ptrace regs structure
  */
@@ -1735,6 +1717,7 @@ restore_ia32_fpxstate (struct task_struc
 	return 0;
 }
 
+#if 0				/* XXX */
 asmlinkage long
 sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
 {
@@ -1842,9 +1825,11 @@ sys32_ptrace (int request, pid_t pid, un
 					    compat_ptr(data));
 		break;
 
+#if 0				/* XXX */
 	      case PTRACE_GETEVENTMSG:   
 		ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data));
 		break;
+#endif
 
 	      case PTRACE_SYSCALL:	/* continue, stop after next syscall */
 	      case PTRACE_CONT:		/* restart after signal. */
@@ -1865,6 +1850,520 @@ sys32_ptrace (int request, pid_t pid, un
 	unlock_kernel();
 	return ret;
 }
+#endif
+
+#ifdef CONFIG_UTRACE
+typedef struct utrace_get {
+	void *kbuf;
+	void __user *ubuf;
+} utrace_get_t;
+
+typedef struct utrace_set {
+	const void *kbuf;
+	const void __user *ubuf;
+} utrace_set_t;
+
+typedef struct utrace_getset {
+	struct task_struct *target;
+	const struct utrace_regset *regset;
+	union {
+		utrace_get_t get;
+		utrace_set_t set;
+	} u;
+	unsigned int pos;
+	unsigned int count;
+	int ret;
+} utrace_getset_t;
+
+static void getfpreg(struct task_struct *task, int regno,int *val)
+{
+	switch (regno / sizeof(int)) {
+		case 0: *val = task->thread.fcr & 0xffff; break;
+		case 1: *val = task->thread.fsr & 0xffff; break;
+		case 2: *val = (task->thread.fsr>>16) & 0xffff; break;
+		case 3: *val = task->thread.fir; break;
+		case 4: *val = (task->thread.fir>>32) & 0xffff; break;
+		case 5: *val = task->thread.fdr; break;
+		case 6: *val = (task->thread.fdr >> 32) & 0xffff; break;
+	}
+}
+
+static void setfpreg(struct task_struct *task, int regno, int val)
+{
+	switch (regno / sizeof(int)) {
+		case 0:
+			task->thread.fcr = (task->thread.fcr & (~0x1f3f))
+				| (val & 0x1f3f);
+			break;
+		case 1:
+			task->thread.fsr = (task->thread.fsr & (~0xffff)) | val;
+			break;
+		case 2:
+			task->thread.fsr = (task->thread.fsr & (~0xffff0000))
+				| (val << 16);
+			break;
+		case 3:
+			task->thread.fir = (task->thread.fir & (~0xffffffff)) | val;
+			break;
+		case 5:
+			task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | val;
+			break;
+	}
+}
+
+static void access_fpreg_ia32(int regno, void *reg,
+		struct pt_regs *pt, struct switch_stack *sw,
+		int tos, int write)
+{
+	void *f;
+
+	if ((regno += tos) >= 8)
+		regno -= 8;
+	if (regno <= 4)
+		f = &pt->f8 + regno;
+	else if (regno <= 7)
+		f = &sw->f12 + (regno - 4);
+	else {
+		printk(" regno must be less than 7 \n");
+		 return;
+	}
+
+	if (write)
+		memcpy(f, reg, sizeof(struct _fpreg_ia32));
+	else
+		memcpy(reg, f, sizeof(struct _fpreg_ia32));
+}
+
+static void do_fpregs_get(struct unw_frame_info *info, void *arg)
+{
+	utrace_getset_t *dst = arg;
+	struct task_struct *task = dst->target;
+	struct pt_regs *pt;
+	int start, end, tos;
+	char buf[80];
+
+	if (dst->count == 0 || unw_unwind_to_user(info) < 0)
+		return;
+	if (dst->pos < 7 * sizeof(int)) {
+		end = min((dst->pos + dst->count), (unsigned int)(7 * sizeof(int)));
+		for (start = dst->pos; start < end; start += sizeof(int))
+			getfpreg(task, start,(int *)( buf + start));
+		dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, buf,
+				0, 7 * sizeof(int));
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+	if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
+		pt = task_pt_regs(task);
+		tos = (task->thread.fsr >> 11) & 7;
+		end = min(dst->pos + dst->count, 
+				(unsigned int)(sizeof(struct ia32_user_i387_struct)));
+		start = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
+		end = (end - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
+		for (; start < end; start++)
+			access_fpreg_ia32(start, (struct _fpreg_ia32 *)buf + start,
+					pt, info->sw, tos, 0);
+		dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf,
+				buf, 7 * sizeof(int),
+				sizeof(struct ia32_user_i387_struct));
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+}
+
+static void do_fpregs_set(struct unw_frame_info *info, void *arg)
+{
+	utrace_getset_t *dst = arg;
+	struct task_struct *task = dst->target;
+	struct pt_regs *pt;
+	char buf[80];
+	int end, start, tos;
+
+	if (dst->count == 0 || unw_unwind_to_user(info) < 0)
+		return;
+
+	if (dst->pos < 7 * sizeof(int)) {
+		start = dst->pos;
+		dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf, buf,
+				0, 7 * sizeof(int));
+		if (dst->ret)
+			return;
+		for (; start < dst->pos; start += sizeof(int))
+			setfpreg(task, start, *((int*)(buf + start)));
+		if (dst->count == 0)
+			return;
+	}
+	if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
+		start = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
+		dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf,
+				buf, 7 * sizeof(int),
+				sizeof(struct ia32_user_i387_struct));
+		if (dst->ret)
+			return;
+		pt = task_pt_regs(task);
+		tos = (task->thread.fsr >> 11) & 7;
+		end = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
+		for (; start < end; start++)
+			access_fpreg_ia32(start, (struct _fpreg_ia32 *)buf + start,
+					pt, info->sw, tos, 0);
+		if (dst->count == 0)
+			return;
+	}
+}
+
+#define OFFSET(member) ((int)(offsetof(struct ia32_user_fxsr_struct, member)))
+static void getfpxreg(struct task_struct *task, int start, int end, char *buf)
+{
+	int min_val;
+
+	min_val = min(end, OFFSET(fop));
+	while (start < min_val) {
+		if (start == OFFSET(cwd))
+			*((short *)buf) = task->thread.fcr & 0xffff;
+		else if (start == OFFSET(swd))
+			*((short *)buf) = task->thread.fsr & 0xffff;
+		else if (start == OFFSET(twd))
+			*((short *)buf) = (task->thread.fsr>>16) & 0xffff;
+		buf += 2;
+		start += 2;
+	}
+	/* skip fop element */
+	if (start == OFFSET(fop)) {
+		start += 2;
+		buf += 2;
+	}
+	while (start < end) {
+		if (start == OFFSET(fip))
+			*((int *)buf) = task->thread.fir;
+		else if (start == OFFSET(fcs))
+			*((int *)buf) = (task->thread.fir>>32) & 0xffff;
+		else if (start == OFFSET(foo))
+			*((int *)buf) = task->thread.fdr;
+		else if (start == OFFSET(fos))
+			*((int *)buf) = (task->thread.fdr>>32) & 0xffff;
+		else if (start == OFFSET(mxcsr))
+			*((int *)buf) = ((task->thread.fcr>>32) & 0xff80)
+					 | ((task->thread.fsr>>32) & 0x3f);
+		buf += 4;
+		start += 4;
+	}
+}
+
+static void setfpxreg(struct task_struct *task, int start, int end, char *buf)
+{
+	int min_val, num32;
+	short num;
+	unsigned long num64;
+
+	min_val = min(end, OFFSET(fop));
+	while (start < min_val) {
+		num = *((short *)buf);
+		if (start == OFFSET(cwd)) {
+			task->thread.fcr = (task->thread.fcr & (~0x1f3f))
+						| (num & 0x1f3f);
+		} else if (start == OFFSET(swd)) {
+			task->thread.fsr = (task->thread.fsr & (~0xffff)) | num;
+		} else if (start == OFFSET(twd)) {
+			task->thread.fsr = (task->thread.fsr & (~0xffff0000)) | num;
+		}
+		buf += 2;
+		start += 2;
+	}
+	/* skip fop element */
+	if (start == OFFSET(fop)) {
+		start += 2;
+		buf += 2;
+	}
+	while (start < end) {
+		num32 = *((int *)buf);
+		if (start == OFFSET(fip))
+			task->thread.fir = (task->thread.fir & (~0xffffffff))
+						 | num32;
+		else if (start == OFFSET(foo))
+			task->thread.fdr = (task->thread.fdr & (~0xffffffff))
+						 | num32;
+		else if (start == OFFSET(mxcsr)) {
+			num64 = num32 & 0xff10;
+			task->thread.fcr = (task->thread.fcr & (~0xff1000000000UL))
+						 | (num64<<32);
+			num64 = num32 & 0x3f;
+			task->thread.fsr = (task->thread.fsr & (~0x3f00000000UL))
+						 | (num64<<32);
+		}
+		buf += 4;
+		start += 4;
+	}
+}
+
+static void do_fpxregs_get(struct unw_frame_info *info, void *arg)
+{
+	utrace_getset_t *dst = arg;
+	struct task_struct *task = dst->target;
+	struct pt_regs *pt;
+	char buf[128];
+	int start, end, tos;
+
+	if (dst->count == 0 || unw_unwind_to_user(info) < 0)
+		return;
+	if (dst->pos < OFFSET(st_space[0])) {
+		end = min(dst->pos + dst->count, (unsigned int)32);
+		getfpxreg(task, dst->pos, end, buf);
+		dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf, buf,
+				0, OFFSET(st_space[0]));
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+	if (dst->pos < OFFSET(xmm_space[0])) {
+		pt = task_pt_regs(task);
+		tos = (task->thread.fsr >> 11) & 7;
+		end = min(dst->pos + dst->count, 
+				(unsigned int)OFFSET(xmm_space[0]));
+		start = (dst->pos - OFFSET(st_space[0])) / 16;
+		end = (end - OFFSET(st_space[0])) / 16;
+		for (; start < end; start++)
+			access_fpreg_ia32(start, buf + 16 * start, pt, 
+						info->sw, tos, 0);
+		dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf,
+				buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
+		if (dst->ret || dst->count == 0)
+			return;
+	}
+	if (dst->pos < OFFSET(padding[0]))
+		dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
+				&dst->u.get.kbuf, &dst->u.get.ubuf,
+				&info->sw->f16, OFFSET(xmm_space[0]),
+				OFFSET(padding[0]));
+}
+
+static void do_fpxregs_set(struct unw_frame_info *info, void *arg)
+{
+	utrace_getset_t *dst = arg;
+	struct task_struct *task = dst->target;
+	char buf[128];
+	int start, end;
+
+	if (dst->count == 0 || unw_unwind_to_user(info) < 0)
+		return;
+
+	if (dst->pos < OFFSET(st_space[0])) {
+		start = dst->pos;
+		dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf,
+				buf, 0, OFFSET(st_space[0]));
+		if (dst->ret)
+			return;
+		setfpxreg(task, start, dst->pos, buf);
+		if (dst->count == 0)
+			return;
+	}
+	if (dst->pos < OFFSET(xmm_space[0])) {
+		struct pt_regs *pt;
+		int tos;
+		pt = task_pt_regs(task);
+		tos = (task->thread.fsr >> 11) & 7;
+		start = (dst->pos - OFFSET(st_space[0])) / 16;
+		dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf,
+				buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
+		if (dst->ret)
+			return;
+		end = (dst->pos - OFFSET(st_space[0])) / 16;
+		for (; start < end; start++)
+			access_fpreg_ia32(start, buf + 16 * start, pt, info->sw,
+						 tos, 1);
+		if (dst->count == 0)
+			return;
+	}
+	if (dst->pos < OFFSET(padding[0]))
+		dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
+				&dst->u.set.kbuf, &dst->u.set.ubuf,
+				&info->sw->f16, OFFSET(xmm_space[0]),
+				 OFFSET(padding[0]));
+}
+#undef OFFSET
+
+static int do_regset_call(void (*call)(struct unw_frame_info *, void *),
+		struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	utrace_getset_t info = { .target = target, .regset = regset,
+		.pos = pos, .count = count,
+		.u.set = { .kbuf = kbuf, .ubuf = ubuf },
+		.ret = 0 };
+
+	if (target == current)
+		unw_init_running(call, &info);
+	else {
+		struct unw_frame_info ufi;
+		memset(&ufi, 0, sizeof(ufi));
+		unw_init_from_blocked_task(&ufi, target);
+		(*call)(&ufi, &info);
+	}
+
+	return info.ret;
+}
+
+static int ia32_fpregs_get(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		void *kbuf, void __user *ubuf)
+{
+	return do_regset_call(do_fpregs_get, target, regset, pos, count, kbuf, ubuf);
+}
+
+static int ia32_fpregs_set(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	return do_regset_call(do_fpregs_set, target, regset, pos, count, kbuf, ubuf);
+}
+
+static int ia32_fpxregs_get(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		void *kbuf, void __user *ubuf)
+{
+	return do_regset_call(do_fpxregs_get, target, regset, pos, count, kbuf, ubuf);
+}
+
+static int ia32_fpxregs_set(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	return do_regset_call(do_fpxregs_set, target, regset, pos, count, kbuf, ubuf);
+}
+
+static int ia32_genregs_get(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		void *kbuf, void __user *ubuf)
+{
+	if (kbuf) {
+		u32 *kp = kbuf;
+		while (count > 0) {
+			*kp++ = getreg(target, pos);
+			pos += 4;
+			count -= 4;
+		}
+	} else {
+		u32 __user *up = ubuf;
+		while (count > 0) {
+			if (__put_user(getreg(target, pos), up++))
+				return -EFAULT;
+			pos += 4;
+			count -= 4;
+		}
+	}
+	return 0;
+}
+
+static int ia32_genregs_set(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	int ret = 0;
+
+	if (kbuf) {
+		const u32 *kp = kbuf;
+		while (!ret && count > 0) {
+			putreg(target, pos, *kp++);
+			pos += 4;
+			count -= 4;
+		}
+	} else {
+		const u32 __user *up = ubuf;
+		u32 val;
+		while (!ret && count > 0) {
+			ret = __get_user(val, up++);
+			if (!ret)
+				putreg(target, pos, val);
+			pos += 4;
+			count -= 4;
+		}
+	}
+	return ret;
+}
+
+/*
+ * This should match arch/i386/kernel/ptrace.c:native_regsets.
+ * XXX ioperm? vm86?
+ */
+static const struct utrace_regset ia32_regsets[] = {
+	{
+		.n = sizeof(struct user_regs_struct32)/4,
+		.size = 4, .align = 4,
+		.get = ia32_genregs_get, .set = ia32_genregs_set
+	},
+	{
+		.n = sizeof(struct ia32_user_i387_struct) / 4,
+		.size = 4, .align = 4,
+		.get = ia32_fpregs_get, .set = ia32_fpregs_set
+	},
+	{
+		.n = sizeof(struct ia32_user_fxsr_struct) / 4,
+		.size = 4, .align = 4,
+		.get = ia32_fpxregs_get, .set = ia32_fpxregs_set
+	},
+};
+
+const struct utrace_regset_view utrace_ia32_view = {
+	.name = "i386", .e_machine = EM_386,
+	.regsets = ia32_regsets,
+	.n = sizeof ia32_regsets / sizeof ia32_regsets[0],
+};
+EXPORT_SYMBOL_GPL(utrace_ia32_view);
+#endif
+
+#ifdef CONFIG_PTRACE
+/*
+ * This matches the arch/i386/kernel/ptrace.c definitions.
+ */
+
+static const struct ptrace_layout_segment ia32_uarea[] = {
+	{0, sizeof(struct user_regs_struct32), 0, 0},
+	{0, 0, -1, 0}
+};
+
+fastcall int arch_compat_ptrace(compat_long_t *request,
+		struct task_struct *child,
+		struct utrace_attached_engine *engine,
+		compat_ulong_t addr, compat_ulong_t data,
+		compat_long_t *retval)
+{
+	switch (*request) {
+		case PTRACE_PEEKUSR:
+			return ptrace_compat_peekusr(child, engine, ia32_uarea,
+					addr, data);
+		case PTRACE_POKEUSR:
+			return ptrace_compat_pokeusr(child, engine, ia32_uarea,
+					addr, data);
+		case IA32_PTRACE_GETREGS:
+			return ptrace_whole_regset(child, engine, data, 0, 0);
+		case IA32_PTRACE_SETREGS:
+			return ptrace_whole_regset(child, engine, data, 0, 1);
+		case IA32_PTRACE_GETFPREGS:
+			return ptrace_whole_regset(child, engine, data, 1, 0);
+		case IA32_PTRACE_SETFPREGS:
+			return ptrace_whole_regset(child, engine, data, 1, 1);
+		case IA32_PTRACE_GETFPXREGS:
+			return ptrace_whole_regset(child, engine, data, 2, 0);
+		case IA32_PTRACE_SETFPXREGS:
+			return ptrace_whole_regset(child, engine, data, 2, 1);
+	}
+	return -ENOSYS;
+}
+#endif
 
 typedef struct {
 	unsigned int	ss_sp;
--- linux-2.6/arch/ppc/kernel/asm-offsets.c.utrace-ptrace-compat
+++ linux-2.6/arch/ppc/kernel/asm-offsets.c
@@ -37,7 +37,6 @@ main(void)
 	DEFINE(THREAD, offsetof(struct task_struct, thread));
 	DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
 	DEFINE(MM, offsetof(struct task_struct, mm));
-	DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
 	DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
@@ -47,7 +46,6 @@ main(void)
 	DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 	DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
-	DEFINE(PT_PTRACED, PT_PTRACED);
 #endif
 #ifdef CONFIG_ALTIVEC
 	DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
--- linux-2.6/arch/s390/kernel/signal.c.utrace-ptrace-compat
+++ linux-2.6/arch/s390/kernel/signal.c
@@ -25,6 +25,7 @@
 #include <linux/tty.h>
 #include <linux/personality.h>
 #include <linux/binfmts.h>
+#include <linux/tracehook.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 #include <asm/lowcore.h>
@@ -403,6 +404,8 @@ handle_signal(unsigned long sig, struct 
 			sigaddset(&current->blocked,sig);
 		recalc_sigpending();
 		spin_unlock_irq(&current->sighand->siglock);
+
+		tracehook_report_handle_signal(sig, ka, oldset, regs);
 	}
 
 	return ret;
--- linux-2.6/arch/s390/kernel/compat_signal.c.utrace-ptrace-compat
+++ linux-2.6/arch/s390/kernel/compat_signal.c
@@ -28,6 +28,7 @@
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 #include <asm/lowcore.h>
+#include <linux/tracehook.h>
 #include "compat_linux.h"
 #include "compat_ptrace.h"
 
@@ -579,7 +580,9 @@ handle_signal32(unsigned long sig, struc
 			sigaddset(&current->blocked,sig);
 		recalc_sigpending();
 		spin_unlock_irq(&current->sighand->siglock);
+
+		tracehook_report_handle_signal(sig, ka, oldset, regs);
 	}
+
 	return ret;
 }
-
--- linux-2.6/arch/s390/kernel/process.c.utrace-ptrace-compat
+++ linux-2.6/arch/s390/kernel/process.c
@@ -331,9 +331,6 @@ asmlinkage long sys_execve(struct pt_reg
         error = do_execve(filename, (char __user * __user *) regs.gprs[3],
 			  (char __user * __user *) regs.gprs[4], &regs);
 	if (error == 0) {
-		task_lock(current);
-		current->ptrace &= ~PT_DTRACE;
-		task_unlock(current);
 		current->thread.fp_regs.fpc = 0;
 		if (MACHINE_HAS_IEEE)
 			asm volatile("sfpc %0,%0" : : "d" (0));
--- linux-2.6/arch/s390/kernel/compat_linux.c.utrace-ptrace-compat
+++ linux-2.6/arch/s390/kernel/compat_linux.c
@@ -540,9 +540,6 @@ sys32_execve(struct pt_regs regs)
 				 compat_ptr(regs.gprs[4]), &regs);
 	if (error == 0)
 	{
-		task_lock(current);
-		current->ptrace &= ~PT_DTRACE;
-		task_unlock(current);
 		current->thread.fp_regs.fpc=0;
 		__asm__ __volatile__
 		        ("sr  0,0\n\t"
--- linux-2.6/arch/s390/kernel/traps.c.utrace-ptrace-compat
+++ linux-2.6/arch/s390/kernel/traps.c
@@ -18,7 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/errno.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/timer.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
@@ -338,7 +338,7 @@ static inline void __user *get_check_add
 
 void do_single_step(struct pt_regs *regs)
 {
-	if ((current->ptrace & PT_PTRACED) != 0)
+	if (tracehook_consider_fatal_signal(current, SIGTRAP))
 		force_sig(SIGTRAP, current);
 }
 
@@ -439,7 +439,7 @@ asmlinkage void illegal_op(struct pt_reg
 	if (regs->psw.mask & PSW_MASK_PSTATE) {
 		get_user(*((__u16 *) opcode), (__u16 __user *) location);
 		if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
-			if (current->ptrace & PT_PTRACED)
+			if (tracehook_consider_fatal_signal(current, SIGTRAP))
 				force_sig(SIGTRAP, current);
 			else
 				signal = SIGILL;
--- linux-2.6/arch/s390/kernel/ptrace.c.utrace-ptrace-compat
+++ linux-2.6/arch/s390/kernel/ptrace.c
@@ -29,6 +29,8 @@
 #include <linux/smp_lock.h>
 #include <linux/errno.h>
 #include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/module.h>
 #include <linux/user.h>
 #include <linux/security.h>
 #include <linux/audit.h>
@@ -41,6 +43,7 @@
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
+#include <asm/elf.h>
 
 #ifdef CONFIG_COMPAT
 #include "compat_ptrace.h"
@@ -84,651 +87,598 @@ FixPerRegisters(struct task_struct *task
 		per_info->control_regs.bits.storage_alt_space_ctl = 1;
 	else
 		per_info->control_regs.bits.storage_alt_space_ctl = 0;
+
+	if (task == current)
+		/*
+		 * These registers are loaded in __switch_to on
+		 * context switch.  We must load them now if
+		 * touching the current thread.
+		 */
+		__ctl_load(per_info->control_regs.words.cr, 9, 11);
 }
 
 void
-set_single_step(struct task_struct *task)
+tracehook_enable_single_step(struct task_struct *task)
 {
 	task->thread.per_info.single_step = 1;
 	FixPerRegisters(task);
 }
 
 void
-clear_single_step(struct task_struct *task)
+tracehook_disable_single_step(struct task_struct *task)
 {
 	task->thread.per_info.single_step = 0;
 	FixPerRegisters(task);
+	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
 }
 
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
- */
-void
-ptrace_disable(struct task_struct *child)
+int
+tracehook_single_step_enabled(struct task_struct *task)
 {
-	/* make sure the single step bit is not set. */
-	clear_single_step(child);
+	return task->thread.per_info.single_step;
 }
 
-#ifndef CONFIG_64BIT
-# define __ADDR_MASK 3
-#else
-# define __ADDR_MASK 7
-#endif
 
-/*
- * Read the word at offset addr from the user area of a process. The
- * trouble here is that the information is littered over different
- * locations. The process registers are found on the kernel stack,
- * the floating point stuff and the trace settings are stored in
- * the task structure. In addition the different structures in
- * struct user contain pad bytes that should be read as zeroes.
- * Lovely...
- */
 static int
-peek_user(struct task_struct *child, addr_t addr, addr_t data)
+genregs_get(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    void *kbuf, void __user *ubuf)
 {
-	struct user *dummy = NULL;
-	addr_t offset, tmp, mask;
-
-	/*
-	 * Stupid gdb peeks/pokes the access registers in 64 bit with
-	 * an alignment of 4. Programmers from hell...
-	 */
-	mask = __ADDR_MASK;
-#ifdef CONFIG_64BIT
-	if (addr >= (addr_t) &dummy->regs.acrs &&
-	    addr < (addr_t) &dummy->regs.orig_gpr2)
-		mask = 3;
-#endif
-	if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
-		return -EIO;
-
-	if (addr < (addr_t) &dummy->regs.acrs) {
-		/*
-		 * psw and gprs are stored on the stack
-		 */
-		tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
-		if (addr == (addr_t) &dummy->regs.psw.mask)
-			/* Remove per bit from user psw. */
-			tmp &= ~PSW_MASK_PER;
-
-	} else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
-		/*
-		 * access registers are stored in the thread structure
-		 */
-		offset = addr - (addr_t) &dummy->regs.acrs;
-#ifdef CONFIG_64BIT
-		/*
-		 * Very special case: old & broken 64 bit gdb reading
-		 * from acrs[15]. Result is a 64 bit value. Read the
-		 * 32 bit acrs[15] value and shift it by 32. Sick...
-		 */
-		if (addr == (addr_t) &dummy->regs.acrs[15])
-			tmp = ((unsigned long) child->thread.acrs[15]) << 32;
-		else
-#endif
-		tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
-
-	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
-		/*
-		 * orig_gpr2 is stored on the kernel stack
-		 */
-		tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
-
-	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
-		/* 
-		 * floating point regs. are stored in the thread structure
-		 */
-		offset = addr - (addr_t) &dummy->regs.fp_regs;
-		tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
-		if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
-			tmp &= (unsigned long) FPC_VALID_MASK
-				<< (BITS_PER_LONG - 32);
+	struct pt_regs *regs = task_pt_regs(target);
+	unsigned long pswmask;
+	int ret;
 
-	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
-		/*
-		 * per_info is found in the thread structure
-		 */
-		offset = addr - (addr_t) &dummy->regs.per_info;
-		tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
+	/* Remove per bit from user psw. */
+	pswmask = regs->psw.mask & ~PSW_MASK_PER;
+	ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				    &pswmask, PT_PSWMASK, PT_PSWADDR);
+
+	/* The rest of the PSW and the GPRs are directly on the stack. */
+	if (ret == 0)
+		ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    &regs->psw.addr, PT_PSWADDR,
+					    PT_ACR0);
+
+	/* The ACRs are kept in the thread_struct.  */
+	if (ret == 0 && count > 0 && pos < PT_ORIGGPR2) {
+		if (target == current)
+			save_access_regs(target->thread.acrs);
+
+		ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    target->thread.acrs,
+					    PT_ACR0, PT_ORIGGPR2);
+	}
 
-	} else
-		tmp = 0;
+	/* Finally, the ORIG_GPR2 value.  */
+	if (ret == 0)
+		ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    &regs->orig_gpr2, PT_ORIGGPR2, -1);
 
-	return put_user(tmp, (addr_t __user *) data);
+	return ret;
 }
 
-/*
- * Write a word to the user area of a process at location addr. This
- * operation does have an additional problem compared to peek_user.
- * Stores to the program status word and on the floating point
- * control register needs to get checked for validity.
- */
 static int
-poke_user(struct task_struct *child, addr_t addr, addr_t data)
-{
-	struct user *dummy = NULL;
-	addr_t offset, mask;
-
-	/*
-	 * Stupid gdb peeks/pokes the access registers in 64 bit with
-	 * an alignment of 4. Programmers from hell indeed...
-	 */
-	mask = __ADDR_MASK;
-#ifdef CONFIG_64BIT
-	if (addr >= (addr_t) &dummy->regs.acrs &&
-	    addr < (addr_t) &dummy->regs.orig_gpr2)
-		mask = 3;
-#endif
-	if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
-		return -EIO;
-
-	if (addr < (addr_t) &dummy->regs.acrs) {
-		/*
-		 * psw and gprs are stored on the stack
-		 */
-		if (addr == (addr_t) &dummy->regs.psw.mask &&
+genregs_set(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    const void *kbuf, const void __user *ubuf)
+{
+	struct pt_regs *regs = task_pt_regs(target);
+	int ret = 0;
+
+	/* Check for an invalid PSW mask.  */
+	if (count > 0 && pos == PT_PSWMASK) {
+		unsigned long pswmask = regs->psw.mask;
+		ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					   &pswmask, PT_PSWMASK, PT_PSWADDR);
+		if (pswmask != PSW_MASK_MERGE(PSW_USER_BITS, pswmask)
 #ifdef CONFIG_COMPAT
-		    data != PSW_MASK_MERGE(PSW_USER32_BITS, data) &&
+		    && pswmask != PSW_MASK_MERGE(PSW_USER32_BITS, pswmask)
 #endif
-		    data != PSW_MASK_MERGE(PSW_USER_BITS, data))
+			)
 			/* Invalid psw mask. */
 			return -EINVAL;
+		regs->psw.mask = pswmask;
+		FixPerRegisters(target);
+	}
+
+	/* The rest of the PSW and the GPRs are directly on the stack. */
+	if (ret == 0) {
+		ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					   &regs->psw.addr, PT_PSWADDR,
+					   PT_ACR0);
 #ifndef CONFIG_64BIT
-		if (addr == (addr_t) &dummy->regs.psw.addr)
-			/* I'd like to reject addresses without the
-			   high order bit but older gdb's rely on it */
-			data |= PSW_ADDR_AMODE;
+		/* I'd like to reject addresses without the
+		   high order bit but older gdb's rely on it */
+		regs->psw.addr |= PSW_ADDR_AMODE;
 #endif
-		*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
+	}
 
-	} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
-		/*
-		 * access registers are stored in the thread structure
-		 */
-		offset = addr - (addr_t) &dummy->regs.acrs;
-#ifdef CONFIG_64BIT
-		/*
-		 * Very special case: old & broken 64 bit gdb writing
-		 * to acrs[15] with a 64 bit value. Ignore the lower
-		 * half of the value and write the upper 32 bit to
-		 * acrs[15]. Sick...
-		 */
-		if (addr == (addr_t) &dummy->regs.acrs[15])
-			child->thread.acrs[15] = (unsigned int) (data >> 32);
-		else
-#endif
-		*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
+	/* The ACRs are kept in the thread_struct.  */
+	if (ret == 0 && count > 0 && pos < PT_ORIGGPR2) {
+		if (target == current
+		    && (pos != PT_ACR0 || count < sizeof(target->thread.acrs)))
+			save_access_regs(target->thread.acrs);
+
+		ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					   target->thread.acrs,
+					   PT_ACR0, PT_ORIGGPR2);
+		if (ret == 0 && target == current)
+			restore_access_regs(target->thread.acrs);
+	}
 
-	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
-		/*
-		 * orig_gpr2 is stored on the kernel stack
-		 */
-		task_pt_regs(child)->orig_gpr2 = data;
+	/* Finally, the ORIG_GPR2 value.  */
+	if (ret == 0)
+		ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					   &regs->orig_gpr2, PT_ORIGGPR2, -1);
 
-	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
-		/*
-		 * floating point regs. are stored in the thread structure
-		 */
-		if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
-		    (data & ~((unsigned long) FPC_VALID_MASK
-			      << (BITS_PER_LONG - 32))) != 0)
-			return -EINVAL;
-		offset = addr - (addr_t) &dummy->regs.fp_regs;
-		*(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
+	return ret;
+}
 
-	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
-		/*
-		 * per_info is found in the thread structure 
-		 */
-		offset = addr - (addr_t) &dummy->regs.per_info;
-		*(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
+static int
+fpregs_get(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   void *kbuf, void __user *ubuf)
+{
+	if (target == current)
+		save_fp_regs(&target->thread.fp_regs);
+
+	return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				     &target->thread.fp_regs, 0, -1);
+}
+
+static int
+fpregs_set(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    const void *kbuf, const void __user *ubuf)
+{
+	int ret = 0;
+
+	if (target == current && (pos != 0 || count != sizeof(s390_fp_regs)))
+		save_fp_regs(&target->thread.fp_regs);
+
+	/* If setting FPC, must validate it first. */
+	if (count > 0 && pos == 0) {
+		unsigned long fpc;
+		ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					   &fpc, 0, sizeof(fpc));
+		if (ret)
+			return ret;
+
+		if ((fpc & ~((unsigned long) FPC_VALID_MASK
+			     << (BITS_PER_LONG - 32))) != 0)
+			return -EINVAL;
 
+		memcpy(&target->thread.fp_regs, &fpc, sizeof(fpc));
 	}
 
-	FixPerRegisters(child);
-	return 0;
+	if (ret == 0)
+		ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					   &target->thread.fp_regs, 0, -1);
+
+	if (ret == 0 && target == current)
+		restore_fp_regs(&target->thread.fp_regs);
+
+	return ret;
 }
 
 static int
-do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
+per_info_get(struct task_struct *target,
+	     const struct utrace_regset *regset,
+	     unsigned int pos, unsigned int count,
+	     void *kbuf, void __user *ubuf)
 {
-	unsigned long tmp;
-	ptrace_area parea; 
-	int copied, ret;
-
-	switch (request) {
-	case PTRACE_PEEKTEXT:
-	case PTRACE_PEEKDATA:
-		/* Remove high order bit from address (only for 31 bit). */
-		addr &= PSW_ADDR_INSN;
-		/* read word at location addr. */
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		if (copied != sizeof(tmp))
-			return -EIO;
-		return put_user(tmp, (unsigned long __user *) data);
-
-	case PTRACE_PEEKUSR:
-		/* read the word at location addr in the USER area. */
-		return peek_user(child, addr, data);
+	return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				     &target->thread.per_info, 0, -1);
+}
 
-	case PTRACE_POKETEXT:
-	case PTRACE_POKEDATA:
-		/* Remove high order bit from address (only for 31 bit). */
-		addr &= PSW_ADDR_INSN;
-		/* write the word at location addr. */
-		copied = access_process_vm(child, addr, &data, sizeof(data),1);
-		if (copied != sizeof(data))
-			return -EIO;
-		return 0;
+static int
+per_info_set(struct task_struct *target,
+	     const struct utrace_regset *regset,
+	     unsigned int pos, unsigned int count,
+	     const void *kbuf, const void __user *ubuf)
+{
+	int ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				       &target->thread.per_info, 0, -1);
 
-	case PTRACE_POKEUSR:
-		/* write the word at location addr in the USER area */
-		return poke_user(child, addr, data);
+	FixPerRegisters(target);
 
-	case PTRACE_PEEKUSR_AREA:
-	case PTRACE_POKEUSR_AREA:
-		if (copy_from_user(&parea, (void __user *) addr,
-							sizeof(parea)))
-			return -EFAULT;
-		addr = parea.kernel_addr;
-		data = parea.process_addr;
-		copied = 0;
-		while (copied < parea.len) {
-			if (request == PTRACE_PEEKUSR_AREA)
-				ret = peek_user(child, addr, data);
-			else {
-				addr_t tmp;
-				if (get_user (tmp, (addr_t __user *) data))
-					return -EFAULT;
-				ret = poke_user(child, addr, tmp);
-			}
-			if (ret)
-				return ret;
-			addr += sizeof(unsigned long);
-			data += sizeof(unsigned long);
-			copied += sizeof(unsigned long);
-		}
-		return 0;
-	}
-	return ptrace_request(child, request, addr, data);
+	return ret;
 }
 
-#ifdef CONFIG_COMPAT
-/*
- * Now the fun part starts... a 31 bit program running in the
- * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
- * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
- * to handle, the difference to the 64 bit versions of the requests
- * is that the access is done in multiples of 4 byte instead of
- * 8 bytes (sizeof(unsigned long) on 31/64 bit).
- * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
- * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
- * is a 31 bit program too, the content of struct user can be
- * emulated. A 31 bit program peeking into the struct user of
- * a 64 bit program is a no-no.
- */
 
 /*
- * Same as peek_user but for a 31 bit program.
+ * These are our native regset flavors.
  */
-static int
-peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
-{
-	struct user32 *dummy32 = NULL;
-	per_struct32 *dummy_per32 = NULL;
-	addr_t offset;
-	__u32 tmp;
-
-	if (!test_thread_flag(TIF_31BIT) ||
-	    (addr & 3) || addr > sizeof(struct user) - 3)
-		return -EIO;
+static const struct utrace_regset native_regsets[] = {
+	{
+		.size = sizeof(long), .align = sizeof(long),
+		.n = sizeof(s390_regs) / sizeof(long),
+		.get = genregs_get, .set = genregs_set
+	},
+	{
+		.size = sizeof(long), .align = sizeof(long),
+		.n = sizeof(s390_fp_regs) / sizeof(long),
+		.get = fpregs_get, .set = fpregs_set
+	},
+	{
+		.size = sizeof(long), .align = sizeof(long),
+		.n = sizeof(per_struct) / sizeof(long),
+		.get = per_info_get, .set = per_info_set
+	},
+};
+
+const struct utrace_regset_view utrace_s390_native_view = {
+	.name = UTS_MACHINE, .e_machine = ELF_ARCH,
+	.regsets = native_regsets,
+	.n = sizeof native_regsets / sizeof native_regsets[0],
+};
+EXPORT_SYMBOL_GPL(utrace_s390_native_view);
 
-	if (addr < (addr_t) &dummy32->regs.acrs) {
-		/*
-		 * psw and gprs are stored on the stack
-		 */
-		if (addr == (addr_t) &dummy32->regs.psw.mask) {
-			/* Fake a 31 bit psw mask. */
-			tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
-			tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp);
-		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
-			/* Fake a 31 bit psw address. */
-			tmp = (__u32) task_pt_regs(child)->psw.addr |
-				PSW32_ADDR_AMODE31;
-		} else {
-			/* gpr 0-15 */
-			tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
-					 addr*2 + 4);
-		}
-	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
-		/*
-		 * access registers are stored in the thread structure
-		 */
-		offset = addr - (addr_t) &dummy32->regs.acrs;
-		tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
 
-	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
-		/*
-		 * orig_gpr2 is stored on the kernel stack
-		 */
-		tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
+#ifdef CONFIG_COMPAT
+static int
+s390_genregs_get(struct task_struct *target,
+		 const struct utrace_regset *regset,
+		 unsigned int pos, unsigned int count,
+		 void *kbuf, void __user *ubuf)
+{
+	struct pt_regs *regs = task_pt_regs(target);
+	int ret = 0;
+
+	/* Fake a 31 bit psw mask. */
+	if (count > 0 && pos == PT_PSWMASK / 2) {
+		u32 pswmask = PSW32_MASK_MERGE(PSW32_USER_BITS,
+					       (u32) (regs->psw.mask >> 32));
+		ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    &pswmask, PT_PSWMASK / 2,
+					    PT_PSWADDR / 2);
+	}
 
-	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
-		/*
-		 * floating point regs. are stored in the thread structure 
-		 */
-	        offset = addr - (addr_t) &dummy32->regs.fp_regs;
-		tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
+	/* Fake a 31 bit psw address. */
+	if (ret == 0 && count > 0 && pos == PT_PSWADDR / 2) {
+		u32 pswaddr = (u32) regs->psw.addr | PSW32_ADDR_AMODE31;
+		ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    &pswaddr, PT_PSWADDR / 2,
+					    PT_GPR0 / 2);
+	}
 
-	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
-		/*
-		 * per_info is found in the thread structure
-		 */
-		offset = addr - (addr_t) &dummy32->regs.per_info;
-		/* This is magic. See per_struct and per_struct32. */
-		if ((offset >= (addr_t) &dummy_per32->control_regs &&
-		     offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
-		    (offset >= (addr_t) &dummy_per32->starting_addr &&
-		     offset <= (addr_t) &dummy_per32->ending_addr) ||
-		    offset == (addr_t) &dummy_per32->lowcore.words.address)
-			offset = offset*2 + 4;
+	/* The GPRs are directly on the stack.  Just truncate them.  */
+	while (ret == 0 && count > 0 && pos < PT_ACR0 / 2) {
+		u32 value = regs->gprs[(pos - PT_GPR0 / 2) / sizeof(u32)];
+		if (kbuf) {
+			*(u32 *) kbuf = value;
+			kbuf += sizeof(u32);
+		}
+		else if (put_user(value, (u32 __user *) ubuf))
+			ret = -EFAULT;
 		else
-			offset = offset*2;
-		tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+	}
 
-	} else
-		tmp = 0;
+	/* The ACRs are kept in the thread_struct.  */
+	if (ret == 0 && count > 0 && pos < PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE) {
+		if (target == current)
+			save_access_regs(target->thread.acrs);
+
+		ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    target->thread.acrs,
+					    PT_ACR0 / 2,
+					    PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE);
+	}
 
-	return put_user(tmp, (__u32 __user *) data);
+	/* Finally, the ORIG_GPR2 value.  */
+	if (count > 0) {
+		if (kbuf)
+			*(u32 *) kbuf = regs->orig_gpr2;
+		else if (put_user((u32) regs->orig_gpr2,
+				  (u32 __user *) ubuf))
+			return -EFAULT;
+	}
+
+	return 0;
 }
 
-/*
- * Same as poke_user but for a 31 bit program.
- */
 static int
-poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
-{
-	struct user32 *dummy32 = NULL;
-	per_struct32 *dummy_per32 = NULL;
-	addr_t offset;
-	__u32 tmp;
+s390_genregs_set(struct task_struct *target,
+		 const struct utrace_regset *regset,
+		 unsigned int pos, unsigned int count,
+		 const void *kbuf, const void __user *ubuf)
+{
+	struct pt_regs *regs = task_pt_regs(target);
+	int ret = 0;
+
+	/* Check for an invalid PSW mask.  */
+	if (count > 0 && pos == PT_PSWMASK / 2) {
+		u32 pswmask;
+		ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					   &pswmask, PT_PSWMASK / 2,
+					   PT_PSWADDR / 2);
+		if (ret)
+			return ret;
 
-	if (!test_thread_flag(TIF_31BIT) ||
-	    (addr & 3) || addr > sizeof(struct user32) - 3)
-		return -EIO;
+		if (pswmask != PSW_MASK_MERGE(PSW_USER32_BITS, pswmask))
+			/* Invalid psw mask. */
+			return -EINVAL;
 
-	tmp = (__u32) data;
+		/* Build a 64 bit psw mask from 31 bit mask. */
+		regs->psw.mask = PSW_MASK_MERGE(PSW_USER32_BITS,
+						(u64) pswmask << 32);
+		FixPerRegisters(target);
+	}
 
-	if (addr < (addr_t) &dummy32->regs.acrs) {
-		/*
-		 * psw, gprs, acrs and orig_gpr2 are stored on the stack
-		 */
-		if (addr == (addr_t) &dummy32->regs.psw.mask) {
+	/* Build a 64 bit psw address from 31 bit address. */
+	if (count > 0 && pos == PT_PSWADDR / 2) {
+		u32 pswaddr;
+		ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					   &pswaddr, PT_PSWADDR / 2,
+					   PT_GPR0 / 2);
+		if (ret == 0)
 			/* Build a 64 bit psw mask from 31 bit mask. */
-			if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp))
-				/* Invalid psw mask. */
-				return -EINVAL;
-			task_pt_regs(child)->psw.mask =
-				PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32);
-		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
-			/* Build a 64 bit psw address from 31 bit address. */
-			task_pt_regs(child)->psw.addr =
-				(__u64) tmp & PSW32_ADDR_INSN;
-		} else {
-			/* gpr 0-15 */
-			*(__u32*)((addr_t) &task_pt_regs(child)->psw
-				  + addr*2 + 4) = tmp;
+			regs->psw.addr = pswaddr & PSW32_ADDR_INSN;
+	}
+
+	/* The GPRs are directly onto the stack. */
+	while (ret == 0 && count > 0 && pos < PT_ACR0 / 2) {
+		u32 value;
+
+		if (kbuf) {
+			value = *(const u32 *) kbuf;
+			kbuf += sizeof(u32);
 		}
-	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
-		/*
-		 * access registers are stored in the thread structure
-		 */
-		offset = addr - (addr_t) &dummy32->regs.acrs;
-		*(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
+		else if (get_user(value, (const u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
 
-	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
-		/*
-		 * orig_gpr2 is stored on the kernel stack
-		 */
-		*(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
+		regs->gprs[(pos - PT_GPR0 / 2) / sizeof(u32)] = value;
+	}
 
-	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
-		/*
-		 * floating point regs. are stored in the thread structure 
-		 */
-		if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
-		    (tmp & ~FPC_VALID_MASK) != 0)
-			/* Invalid floating point control. */
-			return -EINVAL;
-	        offset = addr - (addr_t) &dummy32->regs.fp_regs;
-		*(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
+	/* The ACRs are kept in the thread_struct.  */
+	if (count > 0 && pos < PT_ORIGGPR2 / 2) {
+		if (target == current
+		    && (pos != PT_ACR0 / 2
+			|| count < sizeof(target->thread.acrs)))
+			save_access_regs(target->thread.acrs);
+
+		ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					   target->thread.acrs,
+					   PT_ACR0 / 2,
+					   PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE);
 
-	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
-		/*
-		 * per_info is found in the thread structure.
-		 */
-		offset = addr - (addr_t) &dummy32->regs.per_info;
-		/*
-		 * This is magic. See per_struct and per_struct32.
-		 * By incident the offsets in per_struct are exactly
-		 * twice the offsets in per_struct32 for all fields.
-		 * The 8 byte fields need special handling though,
-		 * because the second half (bytes 4-7) is needed and
-		 * not the first half.
-		 */
-		if ((offset >= (addr_t) &dummy_per32->control_regs &&
-		     offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
-		    (offset >= (addr_t) &dummy_per32->starting_addr &&
-		     offset <= (addr_t) &dummy_per32->ending_addr) ||
-		    offset == (addr_t) &dummy_per32->lowcore.words.address)
-			offset = offset*2 + 4;
-		else
-			offset = offset*2;
-		*(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
+		if (ret == 0 && target == current)
+			restore_access_regs(target->thread.acrs);
+	}
 
+	/* Finally, the ORIG_GPR2 value.  */
+	if (ret == 0 && count > 0) {
+		u32 value;
+		if (kbuf)
+			value = *(const u32 *) kbuf;
+		else if (get_user(value, (const u32 __user *) ubuf))
+			return -EFAULT;
+		regs->orig_gpr2 = value;
 	}
 
-	FixPerRegisters(child);
+	return ret;
+}
+
+
+/*
+ * This is magic. See per_struct and per_struct32.
+ * By incident the offsets in per_struct are exactly
+ * twice the offsets in per_struct32 for all fields.
+ * The 8 byte fields need special handling though,
+ * because the second half (bytes 4-7) is needed and
+ * not the first half.
+ */
+static unsigned int
+offset_from_per32(unsigned int offset)
+{
+	BUILD_BUG_ON(offsetof(per_struct32, control_regs) != 0);
+	if (offset - offsetof(per_struct32, control_regs) < 3*sizeof(u32)
+	    || (offset >= offsetof(per_struct32, starting_addr) &&
+		offset <= offsetof(per_struct32, ending_addr))
+	    || offset == offsetof(per_struct32, lowcore.words.address))
+		offset = offset*2 + 4;
+	else
+		offset = offset*2;
+	return offset;
+}
+
+static int
+s390_per_info_get(struct task_struct *target,
+		  const struct utrace_regset *regset,
+		  unsigned int pos, unsigned int count,
+		  void *kbuf, void __user *ubuf)
+{
+	while (count > 0) {
+		u32 val = *(u32 *) ((char *) &target->thread.per_info
+				    + offset_from_per32 (pos));
+		if (kbuf) {
+			*(u32 *) kbuf = val;
+			kbuf += sizeof(u32);
+		}
+		else if (put_user(val, (u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+	}
 	return 0;
 }
 
 static int
-do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
+s390_per_info_set(struct task_struct *target,
+		  const struct utrace_regset *regset,
+		  unsigned int pos, unsigned int count,
+		  const void *kbuf, const void __user *ubuf)
+{
+	while (count > 0) {
+		u32 val;
+
+		if (kbuf) {
+			val = *(const u32 *) kbuf;
+			kbuf += sizeof(u32);
+		}
+		else if (get_user(val, (const u32 __user *) ubuf))
+			return -EFAULT;
+		else
+			ubuf += sizeof(u32);
+		pos += sizeof(u32);
+		count -= sizeof(u32);
+
+		*(u32 *) ((char *) &target->thread.per_info
+			  + offset_from_per32 (pos)) = val;
+	}
+	return 0;
+}
+
+
+static const struct utrace_regset s390_compat_regsets[] = {
+	{
+		.size = sizeof(u32), .align = sizeof(u32),
+		.n = sizeof(s390_regs) / sizeof(long),
+		.get = s390_genregs_get, .set = s390_genregs_set
+	},
+	{
+		.size = sizeof(u32), .align = sizeof(u32),
+		.n = sizeof(s390_fp_regs) / sizeof(u32),
+		.get = fpregs_get, .set = fpregs_set
+	},
+	{
+		.size = sizeof(u32), .align = sizeof(u32),
+		.n = sizeof(per_struct) / sizeof(u32),
+		.get = s390_per_info_get, .set = s390_per_info_set
+	},
+};
+
+const struct utrace_regset_view utrace_s390_compat_view = {
+	.name = "s390", .e_machine = EM_S390,
+	.regsets = s390_compat_regsets,
+	.n = sizeof s390_compat_regsets / sizeof s390_compat_regsets[0],
+};
+EXPORT_SYMBOL_GPL(utrace_s390_compat_view);
+#endif	/* CONFIG_COMPAT */
+
+
+#ifdef CONFIG_PTRACE
+static const struct ptrace_layout_segment s390_uarea[] = {
+	{PT_PSWMASK, PT_FPC, 0, 0},
+	{PT_FPC, PT_CR_9, 1, 0},
+	{PT_CR_9, PT_IEEE_IP, 2, 0},
+	{PT_IEEE_IP, sizeof(struct user), -1, -1},
+	{0, 0, -1, 0}
+};
+
+fastcall int arch_ptrace(long *request, struct task_struct *child,
+			 struct utrace_attached_engine *engine,
+			 unsigned long addr, unsigned long data, long *val)
 {
-	unsigned int tmp;  /* 4 bytes !! */
-	ptrace_area_emu31 parea; 
-	int copied, ret;
+	ptrace_area parea;
+	unsigned long tmp;
+	int copied;
+
+	switch (*request) {
+	case PTRACE_PEEKUSR:
+		return ptrace_peekusr(child, engine, s390_uarea, addr, data);
+	case PTRACE_POKEUSR:
+		return ptrace_pokeusr(child, engine, s390_uarea, addr, data);
+
+	case PTRACE_PEEKUSR_AREA:
+	case PTRACE_POKEUSR_AREA:
+		if (copy_from_user(&parea, (ptrace_area __user *) addr,
+				   sizeof(parea)))
+			return -EFAULT;
+		if ((parea.kernel_addr | parea.len) & (sizeof(data) - 1))
+			return -EIO;
+		return ptrace_layout_access(child, engine,
+					    utrace_native_view(current),
+					    s390_uarea,
+					    parea.kernel_addr, parea.len,
+					    (void __user *) parea.process_addr,
+					    NULL,
+					    *request == PTRACE_POKEUSR_AREA);
 
-	switch (request) {
 	case PTRACE_PEEKTEXT:
 	case PTRACE_PEEKDATA:
+		/* Remove high order bit from address (only for 31 bit). */
+		addr &= PSW_ADDR_INSN;
 		/* read word at location addr. */
 		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
 		if (copied != sizeof(tmp))
 			return -EIO;
-		return put_user(tmp, (unsigned int __user *) data);
-
-	case PTRACE_PEEKUSR:
-		/* read the word at location addr in the USER area. */
-		return peek_user_emu31(child, addr, data);
+		return put_user(tmp, (unsigned long __user *) data);
 
 	case PTRACE_POKETEXT:
 	case PTRACE_POKEDATA:
+		/* Remove high order bit from address (only for 31 bit). */
+		addr &= PSW_ADDR_INSN;
 		/* write the word at location addr. */
-		tmp = data;
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1);
-		if (copied != sizeof(tmp))
+		copied = access_process_vm(child, addr, &data, sizeof(data),1);
+		if (copied != sizeof(data))
 			return -EIO;
 		return 0;
-
-	case PTRACE_POKEUSR:
-		/* write the word at location addr in the USER area */
-		return poke_user_emu31(child, addr, data);
-
-	case PTRACE_PEEKUSR_AREA:
-	case PTRACE_POKEUSR_AREA:
-		if (copy_from_user(&parea, (void __user *) addr,
-							sizeof(parea)))
-			return -EFAULT;
-		addr = parea.kernel_addr;
-		data = parea.process_addr;
-		copied = 0;
-		while (copied < parea.len) {
-			if (request == PTRACE_PEEKUSR_AREA)
-				ret = peek_user_emu31(child, addr, data);
-			else {
-				__u32 tmp;
-				if (get_user (tmp, (__u32 __user *) data))
-					return -EFAULT;
-				ret = poke_user_emu31(child, addr, tmp);
-			}
-			if (ret)
-				return ret;
-			addr += sizeof(unsigned int);
-			data += sizeof(unsigned int);
-			copied += sizeof(unsigned int);
-		}
-		return 0;
-	case PTRACE_GETEVENTMSG:
-		return put_user((__u32) child->ptrace_message,
-				(unsigned int __user *) data);
-	case PTRACE_GETSIGINFO:
-		if (child->last_siginfo == NULL)
-			return -EINVAL;
-		return copy_siginfo_to_user32((compat_siginfo_t __user *) data,
-					      child->last_siginfo);
-	case PTRACE_SETSIGINFO:
-		if (child->last_siginfo == NULL)
-			return -EINVAL;
-		return copy_siginfo_from_user32(child->last_siginfo,
-						(compat_siginfo_t __user *) data);
 	}
-	return ptrace_request(child, request, addr, data);
-}
-#endif
-
-#define PT32_IEEE_IP 0x13c
-
-static int
-do_ptrace(struct task_struct *child, long request, long addr, long data)
-{
-	int ret;
 
-	if (request == PTRACE_ATTACH)
-		return ptrace_attach(child);
+	return -ENOSYS;
+}
 
-	/*
-	 * Special cases to get/store the ieee instructions pointer.
-	 */
-	if (child == current) {
-		if (request == PTRACE_PEEKUSR && addr == PT_IEEE_IP)
-			return peek_user(child, addr, data);
-		if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP)
-			return poke_user(child, addr, data);
 #ifdef CONFIG_COMPAT
-		if (request == PTRACE_PEEKUSR &&
-		    addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
-			return peek_user_emu31(child, addr, data);
-		if (request == PTRACE_POKEUSR &&
-		    addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
-			return poke_user_emu31(child, addr, data);
-#endif
-	}
-
-	ret = ptrace_check_attach(child, request == PTRACE_KILL);
-	if (ret < 0)
-		return ret;
-
-	switch (request) {
-	case PTRACE_SYSCALL:
-		/* continue and stop at next (return from) syscall */
-	case PTRACE_CONT:
-		/* restart after signal. */
-		if (!valid_signal(data))
-			return -EIO;
-		if (request == PTRACE_SYSCALL)
-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		else
-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		child->exit_code = data;
-		/* make sure the single step bit is not set. */
-		clear_single_step(child);
-		wake_up_process(child);
-		return 0;
-
-	case PTRACE_KILL:
-		/*
-		 * make the child exit.  Best I can do is send it a sigkill. 
-		 * perhaps it should be put in the status that it wants to 
-		 * exit.
-		 */
-		if (child->exit_state == EXIT_ZOMBIE) /* already dead */
-			return 0;
-		child->exit_code = SIGKILL;
-		/* make sure the single step bit is not set. */
-		clear_single_step(child);
-		wake_up_process(child);
-		return 0;
+static const struct ptrace_layout_segment s390_compat_uarea[] = {
+	{PT_PSWMASK / 2, PT_FPC / 2, 0, 0},
+	{PT_FPC / 2, PT_CR_9 / 2, 1, 0},
+	{PT_CR_9 / 2, PT_IEEE_IP / 2, 2, 0},
+	{PT_IEEE_IP / 2, sizeof(struct user32), -1, -1},
+	{0, 0, -1, 0}
+};
+
+fastcall int arch_compat_ptrace(compat_long_t *request,
+				struct task_struct *child,
+				struct utrace_attached_engine *engine,
+				compat_ulong_t addr, compat_ulong_t data,
+				compat_long_t *val)
+{
+	ptrace_area_emu31 parea;
 
-	case PTRACE_SINGLESTEP:
-		/* set the trap flag. */
-		if (!valid_signal(data))
+	switch (*request) {
+	case PTRACE_PEEKUSR:
+		return ptrace_compat_peekusr(child, engine, s390_compat_uarea,
+					     addr, data);
+	case PTRACE_POKEUSR:
+		return ptrace_compat_pokeusr(child, engine, s390_compat_uarea,
+					     addr, data);
+	case PTRACE_PEEKUSR_AREA:
+	case PTRACE_POKEUSR_AREA:
+		if (copy_from_user(&parea, ((ptrace_area_emu31 __user *)
+					    (unsigned long) addr),
+				   sizeof(parea)))
+			return -EFAULT;
+		if ((parea.kernel_addr | parea.len) & (sizeof(data) - 1))
 			return -EIO;
-		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		child->exit_code = data;
-		if (data)
-			set_tsk_thread_flag(child, TIF_SINGLE_STEP);
-		else
-			set_single_step(child);
-		/* give it a chance to run. */
-		wake_up_process(child);
-		return 0;
-
-	case PTRACE_DETACH:
-		/* detach a process that was attached. */
-		return ptrace_detach(child, data);
-
-
-	/* Do requests that differ for 31/64 bit */
-	default:
-#ifdef CONFIG_COMPAT
-		if (test_thread_flag(TIF_31BIT))
-			return do_ptrace_emu31(child, request, addr, data);
-#endif
-		return do_ptrace_normal(child, request, addr, data);
+		return ptrace_layout_access(child, engine,
+					    utrace_native_view(current),
+					    s390_compat_uarea,
+					    parea.kernel_addr, parea.len,
+					    (void __user *)
+					    (unsigned long) parea.process_addr,
+					    NULL,
+					    *request == PTRACE_POKEUSR_AREA);
 	}
-	/* Not reached.  */
-	return -EIO;
-}
-
-asmlinkage long
-sys_ptrace(long request, long pid, long addr, long data)
-{
-	struct task_struct *child;
-	int ret;
 
-	lock_kernel();
-	if (request == PTRACE_TRACEME) {
-		 ret = ptrace_traceme();
-		 goto out;
-	}
-
-	child = ptrace_get_task_struct(pid);
-	if (IS_ERR(child)) {
-		ret = PTR_ERR(child);
-		goto out;
-	}
-
-	ret = do_ptrace(child, request, addr, data);
-	put_task_struct(child);
-out:
-	unlock_kernel();
-	return ret;
+	return -ENOSYS;
 }
+#endif	/* CONFIG_COMPAT */
+#endif	/* CONFIG_PTRACE */
+
 
 asmlinkage void
 syscall_trace(struct pt_regs *regs, int entryexit)
@@ -736,30 +686,17 @@ syscall_trace(struct pt_regs *regs, int 
 	if (unlikely(current->audit_context) && entryexit)
 		audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), regs->gprs[2]);
 
-	if (!test_thread_flag(TIF_SYSCALL_TRACE))
-		goto out;
-	if (!(current->ptrace & PT_PTRACED))
-		goto out;
-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
-				 ? 0x80 : 0));
+	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+		tracehook_report_syscall(regs, entryexit);
 
-	/*
-	 * If the debuffer has set an invalid system call number,
-	 * we prepare to skip the system call restart handling.
-	 */
-	if (!entryexit && regs->gprs[2] >= NR_syscalls)
-		regs->trap = -1;
-
-	/*
-	 * this isn't the same as continuing with a signal, but it will do
-	 * for normal use.  strace only continues with a signal if the
-	 * stopping signal is not SIGTRAP.  -brl
-	 */
-	if (current->exit_code) {
-		send_sig(current->exit_code, current, 1);
-		current->exit_code = 0;
+		/*
+		 * If the debugger has set an invalid system call number,
+		 * we prepare to skip the system call restart handling.
+		 */
+		if (!entryexit && regs->gprs[2] >= NR_syscalls)
+			regs->trap = -1;
 	}
- out:
+
 	if (unlikely(current->audit_context) && !entryexit)
 		audit_syscall_entry(test_thread_flag(TIF_31BIT)?AUDIT_ARCH_S390:AUDIT_ARCH_S390X,
 				    regs->gprs[2], regs->orig_gpr2, regs->gprs[3],
--- linux-2.6/arch/s390/kernel/Makefile.utrace-ptrace-compat
+++ linux-2.6/arch/s390/kernel/Makefile
@@ -34,3 +34,5 @@ obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS
 # This is just to get the dependencies...
 #
 binfmt_elf32.o:	$(TOPDIR)/fs/binfmt_elf.c
+
+CFLAGS_ptrace.o		+= -DUTS_MACHINE='"$(UTS_MACHINE)"'
--- linux-2.6/arch/x86_64/kernel/signal.c.utrace-ptrace-compat
+++ linux-2.6/arch/x86_64/kernel/signal.c
@@ -17,7 +17,7 @@
 #include <linux/signal.h>
 #include <linux/errno.h>
 #include <linux/wait.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/unistd.h>
 #include <linux/stddef.h>
 #include <linux/personality.h>
@@ -333,9 +333,6 @@ static int setup_rt_frame(int sig, struc
 	   see include/asm-x86_64/uaccess.h for details. */
 	set_fs(USER_DS);
 
-	regs->eflags &= ~TF_MASK;
-	if (test_thread_flag(TIF_SINGLESTEP))
-		ptrace_notify(SIGTRAP);
 #ifdef DEBUG_SIG
 	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
 		current->comm, current->pid, frame, regs->rip, frame->pretcode);
@@ -387,16 +384,12 @@ handle_signal(unsigned long sig, siginfo
 	}
 
 	/*
-	 * If TF is set due to a debugger (PT_DTRACE), clear the TF
-	 * flag so that register information in the sigcontext is
-	 * correct.
+	 * If TF is set due to a debugger (TIF_FORCED_TF), clear the TF flag so
+	 * that register information in the sigcontext is correct.
 	 */
-	if (unlikely(regs->eflags & TF_MASK)) {
-		if (likely(current->ptrace & PT_DTRACE)) {
-			current->ptrace &= ~PT_DTRACE;
-			regs->eflags &= ~TF_MASK;
-		}
-	}
+	if (unlikely(regs->eflags & TF_MASK)
+	    && likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
+		regs->eflags &= ~TF_MASK;
 
 #ifdef CONFIG_IA32_EMULATION
 	if (test_thread_flag(TIF_IA32)) {
@@ -415,6 +408,15 @@ handle_signal(unsigned long sig, siginfo
 			sigaddset(&current->blocked,sig);
 		recalc_sigpending();
 		spin_unlock_irq(&current->sighand->siglock);
+
+		/*
+		 * Clear TF when entering the signal handler, but
+		 * notify any tracer that was single-stepping it.
+		 * The tracer may want to single-step inside the
+		 * handler too.
+		 */
+		regs->eflags &= ~TF_MASK;
+		tracehook_report_handle_signal(sig, ka, oldset, regs);
 	}
 
 	return ret;
--- linux-2.6/arch/x86_64/kernel/process.c.utrace-ptrace-compat
+++ linux-2.6/arch/x86_64/kernel/process.c
@@ -635,11 +635,6 @@ long sys_execve(char __user *name, char 
 	if (IS_ERR(filename)) 
 		return error;
 	error = do_execve(filename, argv, envp, &regs); 
-	if (error == 0) {
-		task_lock(current);
-		current->ptrace &= ~PT_DTRACE;
-		task_unlock(current);
-	}
 	putname(filename);
 	return error;
 }
--- linux-2.6/arch/x86_64/kernel/traps.c.utrace-ptrace-compat
+++ linux-2.6/arch/x86_64/kernel/traps.c
@@ -870,14 +870,6 @@ asmlinkage void __kprobes do_debug(struc
 		 */
                 if (!user_mode(regs))
                        goto clear_TF_reenable;
-		/*
-		 * Was the TF flag set by a debugger? If so, clear it now,
-		 * so that register information is correct.
-		 */
-		if (tsk->ptrace & PT_DTRACE) {
-			regs->eflags &= ~TF_MASK;
-			tsk->ptrace &= ~PT_DTRACE;
-		}
 	}
 
 	/* Ok, finally something we can handle */
--- linux-2.6/arch/x86_64/kernel/ptrace.c.utrace-ptrace-compat
+++ linux-2.6/arch/x86_64/kernel/ptrace.c
@@ -13,12 +13,14 @@
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
 #include <linux/errno.h>
+#include <linux/tracehook.h>
 #include <linux/ptrace.h>
 #include <linux/user.h>
 #include <linux/security.h>
 #include <linux/audit.h>
 #include <linux/seccomp.h>
 #include <linux/signal.h>
+#include <linux/module.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -30,6 +32,7 @@
 #include <asm/desc.h>
 #include <asm/proto.h>
 #include <asm/ia32.h>
+#include <asm/prctl.h>
 
 /*
  * does not yet catch signals sent when the child dies.
@@ -162,7 +165,7 @@ static int is_at_popf(struct task_struct
 	return 0;
 }
 
-static void set_singlestep(struct task_struct *child)
+void tracehook_enable_single_step(struct task_struct *child)
 {
 	struct pt_regs *regs = task_pt_regs(child);
 
@@ -192,19 +195,18 @@ static void set_singlestep(struct task_s
 	if (is_at_popf(child, regs))
 		return;
 
-	child->ptrace |= PT_DTRACE;
+	set_tsk_thread_flag(child, TIF_FORCED_TF);
 }
 
-static void clear_singlestep(struct task_struct *child)
+void tracehook_disable_single_step(struct task_struct *child)
 {
 	/* Always clear TIF_SINGLESTEP... */
 	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
 
 	/* But touch TF only if it was set by us.. */
-	if (child->ptrace & PT_DTRACE) {
+	if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) {
 		struct pt_regs *regs = task_pt_regs(child);
 		regs->eflags &= ~TRAP_FLAG;
-		child->ptrace &= ~PT_DTRACE;
 	}
 }
 
@@ -215,7 +217,7 @@ static void clear_singlestep(struct task
  */
 void ptrace_disable(struct task_struct *child)
 { 
-	clear_singlestep(child);
+	tracehook_disable_single_step(child);
 }
 
 static int putreg(struct task_struct *child,
@@ -268,6 +270,7 @@ static int putreg(struct task_struct *ch
 			tmp = get_stack_long(child, EFL_OFFSET); 
 			tmp &= ~FLAG_MASK; 
 			value |= tmp;
+			clear_tsk_thread_flag(child, TIF_FORCED_TF);
 			break;
 		case offsetof(struct user_regs_struct,cs): 
 			if ((value & 3) != 3)
@@ -300,303 +303,431 @@ static unsigned long getreg(struct task_
 			val = get_stack_long(child, regno);
 			if (test_tsk_thread_flag(child, TIF_IA32))
 				val &= 0xffffffff;
+			if (regno == (offsetof(struct user_regs_struct, eflags)
+				      - sizeof(struct pt_regs))
+			    && test_tsk_thread_flag(child, TIF_FORCED_TF))
+				val &= ~X86_EFLAGS_TF;
 			return val;
 	}
 
 }
 
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
-{
-	long i, ret;
-	unsigned ui;
+static int
+genregs_get(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    void *kbuf, void __user *ubuf)
+{
+	if (kbuf) {
+		unsigned long *kp = kbuf;
+		while (count > 0) {
+			*kp++ = getreg(target, pos);
+			pos += sizeof(long);
+			count -= sizeof(long);
+		}
+	}
+	else {
+		unsigned long __user *up = ubuf;
+		while (count > 0) {
+			if (__put_user(getreg(target, pos), up++))
+				return -EFAULT;
+			pos += sizeof(long);
+			count -= sizeof(long);
+		}
+	}
 
-	switch (request) {
-	/* when I and D space are separate, these will need to be fixed. */
-	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
-	case PTRACE_PEEKDATA: {
-		unsigned long tmp;
-		int copied;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		ret = -EIO;
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp,(unsigned long __user *) data);
-		break;
+	return 0;
+}
+
+static int
+genregs_set(struct task_struct *target,
+	    const struct utrace_regset *regset,
+	    unsigned int pos, unsigned int count,
+	    const void *kbuf, const void __user *ubuf)
+{
+	int ret = 0;
+
+	if (kbuf) {
+		const unsigned long *kp = kbuf;
+		while (!ret && count > 0) {
+			ret = putreg(target, pos, *kp++);
+			pos += sizeof(long);
+			count -= sizeof(long);
+		}
+	}
+	else {
+		int ret = 0;
+		const unsigned long __user *up = ubuf;
+		while (!ret && count > 0) {
+			unsigned long val;
+			ret = __get_user(val, up++);
+			if (!ret)
+				ret = putreg(target, pos, val);
+			pos += sizeof(long);
+			count -= sizeof(long);
+		}
 	}
 
-	/* read the word at location addr in the USER area. */
-	case PTRACE_PEEKUSR: {
-		unsigned long tmp;
+	return ret;
+}
 
-		ret = -EIO;
-		if ((addr & 7) ||
-		    addr > sizeof(struct user) - 7)
-			break;
 
-		switch (addr) { 
-		case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
-			tmp = getreg(child, addr);
-			break;
-		case offsetof(struct user, u_debugreg[0]):
-			tmp = child->thread.debugreg0;
-			break;
-		case offsetof(struct user, u_debugreg[1]):
-			tmp = child->thread.debugreg1;
-			break;
-		case offsetof(struct user, u_debugreg[2]):
-			tmp = child->thread.debugreg2;
-			break;
-		case offsetof(struct user, u_debugreg[3]):
-			tmp = child->thread.debugreg3;
-			break;
-		case offsetof(struct user, u_debugreg[6]):
-			tmp = child->thread.debugreg6;
-			break;
-		case offsetof(struct user, u_debugreg[7]):
-			tmp = child->thread.debugreg7;
-			break;
-		default:
-			tmp = 0;
-			break;
+static int
+dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset)
+{
+	if (tsk->thread.debugreg6 | tsk->thread.debugreg7)
+		return 8;
+	return 0;
+}
+
+static int
+dbregs_get(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   void *kbuf, void __user *ubuf)
+{
+	for (pos >>= 3, count >>= 3; count > 0; --count, ++pos) {
+		unsigned long val;
+
+		/*
+		 * The hardware updates the status register on a debug trap,
+		 * but do_debug (traps.c) saves it for us when that happens.
+		 * So whether the target is current or not, debugregN is good.
+		 */
+		val = 0;
+		switch (pos) {
+		case 0:	val = target->thread.debugreg0; break;
+		case 1:	val = target->thread.debugreg1; break;
+		case 2:	val = target->thread.debugreg2; break;
+		case 3:	val = target->thread.debugreg3; break;
+		case 6:	val = target->thread.debugreg6; break;
+		case 7:	val = target->thread.debugreg7; break;
+		}
+
+		if (kbuf) {
+			*(unsigned long *) kbuf = val;
+			kbuf += sizeof(unsigned long);
+		}
+		else {
+			if (__put_user(val, (unsigned long __user *) ubuf))
+				return -EFAULT;
+			ubuf += sizeof(unsigned long);
 		}
-		ret = put_user(tmp,(unsigned long __user *) data);
-		break;
 	}
 
-	/* when I and D space are separate, this will have to be fixed. */
-	case PTRACE_POKETEXT: /* write the word at location addr. */
-	case PTRACE_POKEDATA:
-		ret = 0;
-		if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
-			break;
-		ret = -EIO;
-		break;
+	return 0;
+}
 
-	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
-	{
-		int dsize = test_tsk_thread_flag(child, TIF_IA32) ? 3 : 7;
-		ret = -EIO;
-		if ((addr & 7) ||
-		    addr > sizeof(struct user) - 7)
-			break;
+static int
+dbregs_set(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   const void *kbuf, const void __user *ubuf)
+{
+	unsigned long maxaddr = TASK_SIZE_OF(target);
+	maxaddr -= test_tsk_thread_flag(target, TIF_IA32) ? 3 : 7;
+
+	for (pos >>= 3, count >>= 3; count > 0; --count, ++pos) {
+		unsigned long val;
+		unsigned int i;
+
+		if (kbuf) {
+			val = *(const unsigned long *) kbuf;
+			kbuf += sizeof(unsigned long);
+		}
+		else {
+			if (__get_user(val, (unsigned long __user *) ubuf))
+				return -EFAULT;
+			ubuf += sizeof(unsigned long);
+		}
+
+		switch (pos) {
+#define SET_DBREG(n)							\
+			target->thread.debugreg##n = val;		\
+			if (target == current)				\
+				set_debugreg(target->thread.debugreg##n, n)
 
-		switch (addr) { 
-		case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
-			ret = putreg(child, addr, data);
+		case 0:
+			if (val >= maxaddr)
+				return -EIO;
+			SET_DBREG(0);
 			break;
-		/* Disallows to set a breakpoint into the vsyscall */
-		case offsetof(struct user, u_debugreg[0]):
-			if (data >= TASK_SIZE_OF(child) - dsize) break;
-			child->thread.debugreg0 = data;
-			ret = 0;
+		case 1:
+			if (val >= maxaddr)
+				return -EIO;
+			SET_DBREG(1);
 			break;
-		case offsetof(struct user, u_debugreg[1]):
-			if (data >= TASK_SIZE_OF(child) - dsize) break;
-			child->thread.debugreg1 = data;
-			ret = 0;
+		case 2:
+			if (val >= maxaddr)
+				return -EIO;
+			SET_DBREG(2);
 			break;
-		case offsetof(struct user, u_debugreg[2]):
-			if (data >= TASK_SIZE_OF(child) - dsize) break;
-			child->thread.debugreg2 = data;
-			ret = 0;
+		case 3:
+			if (val >= maxaddr)
+				return -EIO;
+			SET_DBREG(3);
 			break;
-		case offsetof(struct user, u_debugreg[3]):
-			if (data >= TASK_SIZE_OF(child) - dsize) break;
-			child->thread.debugreg3 = data;
-			ret = 0;
+		case 4:
+		case 5:
+			if (val != 0)
+				return -EIO;
+			break;
+		case 6:
+			if (val >> 32)
+				return -EIO;
+			SET_DBREG(6);
 			break;
-		case offsetof(struct user, u_debugreg[6]):
-				  if (data >> 32)
-				break; 
-			child->thread.debugreg6 = data;
-			ret = 0;
+		case 7:
+			/*
+			 * See arch/i386/kernel/ptrace.c for an explanation
+			 * of this awkward check.
+			 */
+			val &= ~DR_CONTROL_RESERVED;
+			for (i = 0; i < 4; i++)
+				if ((0x5554 >> ((val >> (16 + 4*i)) & 0xf))
+				    & 1)
+					return -EIO;
+			SET_DBREG(7);
 			break;
-		case offsetof(struct user, u_debugreg[7]):
-			/* See arch/i386/kernel/ptrace.c for an explanation of
-			 * this awkward check.*/
-			data &= ~DR_CONTROL_RESERVED;
-			for(i=0; i<4; i++)
-				if ((0x5554 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
-					break;
-			if (i == 4) {
-				child->thread.debugreg7 = data;
-			  ret = 0;
-		  }
-		  break;
+#undef	SET_DBREG
 		}
-		break;
 	}
-	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
-	case PTRACE_CONT:    /* restart after signal. */
 
-		ret = -EIO;
-		if (!valid_signal(data))
-			break;
-		if (request == PTRACE_SYSCALL)
-			set_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
-		else
-			clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
-		clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-		child->exit_code = data;
-		/* make sure the single step bit is not set. */
-		clear_singlestep(child);
-		wake_up_process(child);
-		ret = 0;
-		break;
+	return 0;
+}
 
-#ifdef CONFIG_IA32_EMULATION
-		/* This makes only sense with 32bit programs. Allow a
-		   64bit debugger to fully examine them too. Better
-		   don't use it against 64bit processes, use
-		   PTRACE_ARCH_PRCTL instead. */
-	case PTRACE_SET_THREAD_AREA: {
-		struct user_desc __user *p;
-		int old; 
-		p = (struct user_desc __user *)data;
-		get_user(old,  &p->entry_number); 
-		put_user(addr, &p->entry_number);
-		ret = do_set_thread_area(&child->thread, p);
-		put_user(old,  &p->entry_number); 
-		break;
-	case PTRACE_GET_THREAD_AREA:
-		p = (struct user_desc __user *)data;
-		get_user(old,  &p->entry_number); 
-		put_user(addr, &p->entry_number);
-		ret = do_get_thread_area(&child->thread, p);
-		put_user(old,  &p->entry_number); 
-		break;
-	} 
-#endif
-		/* normal 64bit interface to access TLS data. 
-		   Works just like arch_prctl, except that the arguments
-		   are reversed. */
-	case PTRACE_ARCH_PRCTL: 
-		ret = do_arch_prctl(child, data, addr);
-		break;
 
-/*
- * make the child exit.  Best I can do is send it a sigkill. 
- * perhaps it should be put in the status that it wants to 
- * exit.
- */
-	case PTRACE_KILL:
-		ret = 0;
-		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
-			break;
-		clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-		child->exit_code = SIGKILL;
-		/* make sure the single step bit is not set. */
-		clear_singlestep(child);
-		wake_up_process(child);
-		break;
-
-	case PTRACE_SINGLESTEP:    /* set the trap flag. */
-		ret = -EIO;
-		if (!valid_signal(data))
-			break;
-		clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
-		set_singlestep(child);
-		child->exit_code = data;
-		/* give it a chance to run. */
-		wake_up_process(child);
-		ret = 0;
-		break;
-
-	case PTRACE_DETACH:
-		/* detach a process that was attached. */
-		ret = ptrace_detach(child, data);
-		break;
-
-	case PTRACE_GETREGS: { /* Get all gp regs from the child. */
-	  	if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
-			       sizeof(struct user_regs_struct))) {
-			ret = -EIO;
-			break;
-		}
-		ret = 0;
-		for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
-			ret |= __put_user(getreg(child, ui),(unsigned long __user *) data);
-			data += sizeof(long);
-		}
-		break;
+static int
+fpregs_active(struct task_struct *target, const struct utrace_regset *regset)
+{
+	return tsk_used_math(target) ? regset->n : 0;
+}
+
+static int
+fpregs_get(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   void *kbuf, void __user *ubuf)
+{
+	if (tsk_used_math(target)) {
+		if (target == current)
+			unlazy_fpu(target);
 	}
+	else
+		init_fpu(target);
 
-	case PTRACE_SETREGS: { /* Set all gp regs in the child. */
-		unsigned long tmp;
-	  	if (!access_ok(VERIFY_READ, (unsigned __user *)data,
-			       sizeof(struct user_regs_struct))) {
-			ret = -EIO;
-			break;
-		}
-		ret = 0;
-		for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
-			ret |= __get_user(tmp, (unsigned long __user *) data);
-			putreg(child, ui, tmp);
-			data += sizeof(long);
-		}
-		break;
+	return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				     &target->thread.i387.fxsave, 0, -1);
+}
+
+static int
+fpregs_set(struct task_struct *target,
+	   const struct utrace_regset *regset,
+	   unsigned int pos, unsigned int count,
+	   const void *kbuf, const void __user *ubuf)
+{
+	int ret;
+
+	if (tsk_used_math(target)) {
+		if (target == current)
+			unlazy_fpu(target);
 	}
+	else if (pos == 0 && count == sizeof(struct user_i387_struct))
+		set_stopped_child_used_math(target);
+	else
+		init_fpu(target);
 
-	case PTRACE_GETFPREGS: { /* Get the child extended FPU state. */
-		if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
-			       sizeof(struct user_i387_struct))) {
-			ret = -EIO;
-			break;
+	ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				   &target->thread.i387.fxsave, 0, -1);
+
+	target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
+
+	return ret;
+}
+
+static int
+fsgs_active(struct task_struct *tsk, const struct utrace_regset *regset)
+{
+	if (tsk->thread.gsindex == GS_TLS_SEL || tsk->thread.gs)
+		return 2;
+	if (tsk->thread.fsindex == FS_TLS_SEL || tsk->thread.fs)
+		return 1;
+	return 0;
+}
+
+static inline u32 read_32bit_tls(struct task_struct *t, int tls)
+{
+	struct desc_struct *desc = (void *)t->thread.tls_array;
+	desc += tls;
+	return desc->base0 |
+		(((u32)desc->base1) << 16) |
+		(((u32)desc->base2) << 24);
+}
+
+static int
+fsgs_get(struct task_struct *target,
+	 const struct utrace_regset *regset,
+	 unsigned int pos, unsigned int count,
+	 void *kbuf, void __user *ubuf)
+{
+	const unsigned long *kaddr = kbuf;
+	const unsigned long __user *uaddr = ubuf;
+	unsigned long addr;
+
+	/*
+	 * XXX why the MSR reads here?
+	 * Can anything change the MSRs without changing thread.fs first?
+	 */
+	if (pos == 0) {		/* FS */
+		if (kaddr)
+			addr = *kaddr++;
+		else if (__get_user(addr, uaddr++))
+			return -EFAULT;
+		if (target->thread.fsindex == FS_TLS_SEL)
+			addr = read_32bit_tls(target, FS_TLS);
+		else if (target == current) {
+			rdmsrl(MSR_FS_BASE, addr);
 		}
-		ret = get_fpregs((struct user_i387_struct __user *)data, child);
-		break;
+		else
+			addr = target->thread.fs;
 	}
 
-	case PTRACE_SETFPREGS: { /* Set the child extended FPU state. */
-		if (!access_ok(VERIFY_READ, (unsigned __user *)data,
-			       sizeof(struct user_i387_struct))) {
-			ret = -EIO;
-			break;
+	if (count > sizeof(unsigned long)) { /* GS */
+		if (kaddr)
+			addr = *kaddr;
+		else if (__get_user(addr, uaddr))
+			return -EFAULT;
+		if (target->thread.fsindex == GS_TLS_SEL)
+			addr = read_32bit_tls(target, GS_TLS);
+		else if (target == current) {
+			rdmsrl(MSR_GS_BASE, addr);
 		}
-		set_stopped_child_used_math(child);
-		ret = set_fpregs(child, (struct user_i387_struct __user *)data);
-		break;
+		else
+			addr = target->thread.fs;
 	}
 
-	default:
-		ret = ptrace_request(child, request, addr, data);
-		break;
+	return 0;
+}
+
+static int
+fsgs_set(struct task_struct *target,
+	 const struct utrace_regset *regset,
+	 unsigned int pos, unsigned int count,
+	 const void *kbuf, const void __user *ubuf)
+{
+	const unsigned long *kaddr = kbuf;
+	const unsigned long __user *uaddr = ubuf;
+	unsigned long addr;
+	int ret = 0;
+
+	if (pos == 0) {		/* FS */
+		if (kaddr)
+			addr = *kaddr++;
+		else if (__get_user(addr, uaddr++))
+			return -EFAULT;
+		ret = do_arch_prctl(target, ARCH_SET_FS, addr);
+	}
+
+	if (!ret && count > sizeof(unsigned long)) { /* GS */
+		if (kaddr)
+			addr = *kaddr;
+		else if (__get_user(addr, uaddr))
+			return -EFAULT;
+		ret = do_arch_prctl(target, ARCH_SET_GS, addr);
 	}
+
 	return ret;
 }
 
-static void syscall_trace(struct pt_regs *regs)
-{
 
-#if 0
-	printk("trace %s rip %lx rsp %lx rax %d origrax %d caller %lx tiflags %x ptrace %x\n",
-	       current->comm,
-	       regs->rip, regs->rsp, regs->rax, regs->orig_rax, __builtin_return_address(0),
-	       current_thread_info()->flags, current->ptrace); 
+/*
+ * These are our native regset flavors.
+ * XXX ioperm? vm86?
+ */
+static const struct utrace_regset native_regsets[] = {
+	{
+		.n = sizeof(struct user_regs_struct)/8, .size = 8, .align = 8,
+		.get = genregs_get, .set = genregs_set
+	},
+	{
+		.n = sizeof(struct user_i387_struct) / sizeof(long),
+		.size = sizeof(long), .align = sizeof(long),
+		.active = fpregs_active,
+		.get = fpregs_get, .set = fpregs_set
+	},
+	{
+		.n = 2, .size = sizeof(long), .align = sizeof(long),
+		.active = fsgs_active,
+		.get = fsgs_get, .set = fsgs_set
+	},
+	{
+		.n = 8, .size = sizeof(long), .align = sizeof(long),
+		.active = dbregs_active,
+		.get = dbregs_get, .set = dbregs_set
+	},
+};
+
+const struct utrace_regset_view utrace_x86_64_native = {
+	.name = "x86-64", .e_machine = EM_X86_64,
+	.regsets = native_regsets,
+	.n = sizeof native_regsets / sizeof native_regsets[0],
+};
+EXPORT_SYMBOL_GPL(utrace_x86_64_native);
+
+
+#ifdef CONFIG_PTRACE
+static const struct ptrace_layout_segment x86_64_uarea[] = {
+	{0, sizeof(struct user_regs_struct), 0, 0},
+	{offsetof(struct user, u_debugreg[0]),
+	 offsetof(struct user, u_debugreg[4]), 3, 0},
+	{offsetof(struct user, u_debugreg[6]),
+	 offsetof(struct user, u_debugreg[8]), 3, 6 * sizeof(long)},
+	{0, 0, -1, 0}
+};
+
+fastcall int arch_ptrace(long *req, struct task_struct *child,
+			 struct utrace_attached_engine *engine,
+			 unsigned long addr, unsigned long data, long *val)
+{
+	switch (*req) {
+	case PTRACE_PEEKUSR:
+		return ptrace_peekusr(child, engine, x86_64_uarea, addr, data);
+	case PTRACE_POKEUSR:
+		return ptrace_pokeusr(child, engine, x86_64_uarea, addr, data);
+	case PTRACE_GETREGS:
+		return ptrace_whole_regset(child, engine, data, 0, 0);
+	case PTRACE_SETREGS:
+		return ptrace_whole_regset(child, engine, data, 0, 1);
+	case PTRACE_GETFPREGS:
+		return ptrace_whole_regset(child, engine, data, 1, 0);
+	case PTRACE_SETFPREGS:
+		return ptrace_whole_regset(child, engine, data, 1, 1);
+#ifdef CONFIG_IA32_EMULATION
+	case PTRACE_GET_THREAD_AREA:
+	case PTRACE_SET_THREAD_AREA:
+		return ptrace_onereg_access(child, engine,
+					    &utrace_ia32_view, 3,
+					    addr, (void __user *)data,
+					    *req == PTRACE_SET_THREAD_AREA);
 #endif
-
-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
-				? 0x80 : 0));
-	/*
-	 * this isn't the same as continuing with a signal, but it will do
-	 * for normal use.  strace only continues with a signal if the
-	 * stopping signal is not SIGTRAP.  -brl
-	 */
-	if (current->exit_code) {
-		send_sig(current->exit_code, current, 1);
-		current->exit_code = 0;
+		/* normal 64bit interface to access TLS data.
+		   Works just like arch_prctl, except that the arguments
+		   are reversed. */
+	case PTRACE_ARCH_PRCTL:
+		return do_arch_prctl(child, data, addr);
 	}
+	return -ENOSYS;
 }
+#endif	/* CONFIG_PTRACE */
+
 
 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
 {
 	/* do the secure computing check first */
 	secure_computing(regs->orig_rax);
 
-	if (test_thread_flag(TIF_SYSCALL_TRACE)
-	    && (current->ptrace & PT_PTRACED))
-		syscall_trace(regs);
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall(regs, 0);
 
 	if (unlikely(current->audit_context)) {
 		if (test_thread_flag(TIF_IA32)) {
@@ -618,8 +749,11 @@ asmlinkage void syscall_trace_leave(stru
 	if (unlikely(current->audit_context))
 		audit_syscall_exit(AUDITSC_RESULT(regs->rax), regs->rax);
 
-	if ((test_thread_flag(TIF_SYSCALL_TRACE)
-	     || test_thread_flag(TIF_SINGLESTEP))
-	    && (current->ptrace & PT_PTRACED))
-		syscall_trace(regs);
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall(regs, 1);
+
+	if (test_thread_flag(TIF_SINGLESTEP)) {
+		force_sig(SIGTRAP, current); /* XXX */
+		tracehook_report_syscall_step(regs);
+	}
 }
--- linux-2.6/arch/x86_64/ia32/ptrace32.c.utrace-ptrace-compat
+++ linux-2.6/arch/x86_64/ia32/ptrace32.c
@@ -16,7 +16,11 @@
 #include <linux/unistd.h>
 #include <linux/mm.h>
 #include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/module.h>
+#include <linux/elf.h>
 #include <asm/ptrace.h>
+#include <asm/tracehook.h>
 #include <asm/compat.h>
 #include <asm/uaccess.h>
 #include <asm/user32.h>
@@ -25,7 +29,8 @@
 #include <asm/debugreg.h>
 #include <asm/i387.h>
 #include <asm/fpu32.h>
-#include <asm/ia32.h>
+#include <asm/ldt.h>
+#include <asm/desc.h>
 
 /*
  * Determines which flags the user has access to [1 = access, 0 = no access].
@@ -35,34 +40,33 @@
 #define FLAG_MASK 0x54dd5UL
 
 #define R32(l,q) \
-	case offsetof(struct user32, regs.l): stack[offsetof(struct pt_regs, q)/8] = val; break
+	case offsetof(struct user_regs_struct32, l): stack[offsetof(struct pt_regs, q)/8] = val; break
 
 static int putreg32(struct task_struct *child, unsigned regno, u32 val)
 {
-	int i;
 	__u64 *stack = (__u64 *)task_pt_regs(child);
 
 	switch (regno) {
-	case offsetof(struct user32, regs.fs):
+	case offsetof(struct user_regs_struct32, fs):
 		if (val && (val & 3) != 3) return -EIO; 
 		child->thread.fsindex = val & 0xffff;
 		break;
-	case offsetof(struct user32, regs.gs):
+	case offsetof(struct user_regs_struct32, gs):
 		if (val && (val & 3) != 3) return -EIO; 
 		child->thread.gsindex = val & 0xffff;
 		break;
-	case offsetof(struct user32, regs.ds):
+	case offsetof(struct user_regs_struct32, ds):
 		if (val && (val & 3) != 3) return -EIO; 
 		child->thread.ds = val & 0xffff;
 		break;
-	case offsetof(struct user32, regs.es):
+	case offsetof(struct user_regs_struct32, es):
 		child->thread.es = val & 0xffff;
 		break;
-	case offsetof(struct user32, regs.ss): 
+	case offsetof(struct user_regs_struct32, ss):
 		if ((val & 3) != 3) return -EIO;
         	stack[offsetof(struct pt_regs, ss)/8] = val & 0xffff;
 		break;
-	case offsetof(struct user32, regs.cs): 
+	case offsetof(struct user_regs_struct32, cs):
 		if ((val & 3) != 3) return -EIO;
 		stack[offsetof(struct pt_regs, cs)/8] = val & 0xffff;
 		break;
@@ -78,53 +82,16 @@ static int putreg32(struct task_struct *
 	R32(eip, rip);
 	R32(esp, rsp);
 
-	case offsetof(struct user32, regs.eflags): {
+	case offsetof(struct user_regs_struct32, eflags): {
 		__u64 *flags = &stack[offsetof(struct pt_regs, eflags)/8];
 		val &= FLAG_MASK;
 		*flags = val | (*flags & ~FLAG_MASK);
+		clear_tsk_thread_flag(child, TIF_FORCED_TF);
 		break;
 	}
 
-	case offsetof(struct user32, u_debugreg[4]): 
-	case offsetof(struct user32, u_debugreg[5]):
-		return -EIO;
-
-	case offsetof(struct user32, u_debugreg[0]):
-		child->thread.debugreg0 = val;
-		break;
-
-	case offsetof(struct user32, u_debugreg[1]):
-		child->thread.debugreg1 = val;
-		break;
-
-	case offsetof(struct user32, u_debugreg[2]):
-		child->thread.debugreg2 = val;
-		break;
-
-	case offsetof(struct user32, u_debugreg[3]):
-		child->thread.debugreg3 = val;
-		break;
-
-	case offsetof(struct user32, u_debugreg[6]):
-		child->thread.debugreg6 = val;
-		break; 
-
-	case offsetof(struct user32, u_debugreg[7]):
-		val &= ~DR_CONTROL_RESERVED;
-		/* See arch/i386/kernel/ptrace.c for an explanation of
-		 * this awkward check.*/
-		for(i=0; i<4; i++)
-			if ((0x5454 >> ((val >> (16 + 4*i)) & 0xf)) & 1)
-			       return -EIO;
-		child->thread.debugreg7 = val; 
-		break; 
-		    
 	default:
-		if (regno > sizeof(struct user32) || (regno & 3))
-			return -EIO;
-	       
-		/* Other dummy fields in the virtual user structure are ignored */ 
-		break; 		
+		BUG();
 	}
 	return 0;
 }
@@ -132,24 +99,25 @@ static int putreg32(struct task_struct *
 #undef R32
 
 #define R32(l,q) \
-	case offsetof(struct user32, regs.l): *val = stack[offsetof(struct pt_regs, q)/8]; break
+	case offsetof(struct user_regs_struct32, l): val = stack[offsetof(struct pt_regs, q)/8]; break
 
-static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
+static int getreg32(struct task_struct *child, unsigned regno)
 {
 	__u64 *stack = (__u64 *)task_pt_regs(child);
+	u32 val;
 
 	switch (regno) {
-	case offsetof(struct user32, regs.fs):
-	        *val = child->thread.fsindex;
+	case offsetof(struct user_regs_struct32, fs):
+	        val = child->thread.fsindex;
 		break;
-	case offsetof(struct user32, regs.gs):
-		*val = child->thread.gsindex;
+	case offsetof(struct user_regs_struct32, gs):
+		val = child->thread.gsindex;
 		break;
-	case offsetof(struct user32, regs.ds):
-		*val = child->thread.ds;
+	case offsetof(struct user_regs_struct32, ds):
+		val = child->thread.ds;
 		break;
-	case offsetof(struct user32, regs.es):
-		*val = child->thread.es;
+	case offsetof(struct user_regs_struct32, es):
+		val = child->thread.es;
 		break;
 
 	R32(cs, cs);
@@ -163,232 +131,503 @@ static int getreg32(struct task_struct *
 	R32(eax, rax);
 	R32(orig_eax, orig_rax);
 	R32(eip, rip);
-	R32(eflags, eflags);
 	R32(esp, rsp);
 
-	case offsetof(struct user32, u_debugreg[0]): 
-		*val = child->thread.debugreg0; 
-		break; 
-	case offsetof(struct user32, u_debugreg[1]): 
-		*val = child->thread.debugreg1; 
-		break; 
-	case offsetof(struct user32, u_debugreg[2]): 
-		*val = child->thread.debugreg2; 
-		break; 
-	case offsetof(struct user32, u_debugreg[3]): 
-		*val = child->thread.debugreg3; 
-		break; 
-	case offsetof(struct user32, u_debugreg[6]): 
-		*val = child->thread.debugreg6; 
-		break; 
-	case offsetof(struct user32, u_debugreg[7]): 
-		*val = child->thread.debugreg7; 
+	case offsetof(struct user_regs_struct32, eflags):
+		val = stack[offsetof(struct pt_regs, eflags) / 8];
+		if (test_tsk_thread_flag(child, TIF_FORCED_TF))
+			val &= ~X86_EFLAGS_TF;
 		break; 
 		    
 	default:
-		if (regno > sizeof(struct user32) || (regno & 3))
-			return -EIO;
-
-		/* Other dummy fields in the virtual user structure are ignored */ 
-		*val = 0;
+		BUG();
+		val = -1;
 		break; 		
 	}
-	return 0;
+
+	return val;
 }
 
 #undef R32
 
-static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
+static int
+ia32_genregs_get(struct task_struct *target,
+		 const struct utrace_regset *regset,
+		 unsigned int pos, unsigned int count,
+		 void *kbuf, void __user *ubuf)
 {
+	if (kbuf) {
+		u32 *kp = kbuf;
+		while (count > 0) {
+			*kp++ = getreg32(target, pos);
+			pos += 4;
+			count -= 4;
+		}
+	}
+	else {
+		u32 __user *up = ubuf;
+		while (count > 0) {
+			if (__put_user(getreg32(target, pos), up++))
+				return -EFAULT;
+			pos += 4;
+			count -= 4;
+		}
+	}
+
+	return 0;
+}
+
+static int
+ia32_genregs_set(struct task_struct *target,
+		 const struct utrace_regset *regset,
+		 unsigned int pos, unsigned int count,
+		 const void *kbuf, const void __user *ubuf)
+{
+	int ret = 0;
+
+	if (kbuf) {
+		const u32 *kp = kbuf;
+		while (!ret && count > 0) {
+			ret = putreg32(target, pos, *kp++);
+			pos += 4;
+			count -= 4;
+		}
+	}
+	else {
+		int ret = 0;
+		const u32 __user *up = ubuf;
+		while (!ret && count > 0) {
+			u32 val;
+			ret = __get_user(val, up++);
+			if (!ret)
+				ret = putreg32(target, pos, val);
+			pos += 4;
+			count -= 4;
+		}
+	}
+
+	return ret;
+}
+
+static int
+ia32_fpregs_active(struct task_struct *target,
+		   const struct utrace_regset *regset)
+{
+	return tsk_used_math(target) ? regset->n : 0;
+}
+
+static int
+ia32_fpregs_get(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		void *kbuf, void __user *ubuf)
+{
+	struct user_i387_ia32_struct fp;
 	int ret;
-	compat_siginfo_t *si32 = (compat_siginfo_t *)compat_ptr(data);
-	siginfo_t ssi; 
-	siginfo_t *si = compat_alloc_user_space(sizeof(siginfo_t));
-	if (request == PTRACE_SETSIGINFO) {
-		memset(&ssi, 0, sizeof(siginfo_t));
-		ret = copy_siginfo_from_user32(&ssi, si32);
+
+	if (tsk_used_math(target)) {
+		if (target == current)
+			unlazy_fpu(target);
+	}
+	else
+		init_fpu(target);
+
+	ret = get_fpregs32(&fp, target);
+	if (ret == 0)
+		ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					    &fp, 0, -1);
+
+	return ret;
+}
+
+static int
+ia32_fpregs_set(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	struct user_i387_ia32_struct fp;
+	int ret;
+
+	if (tsk_used_math(target)) {
+		if (target == current)
+			unlazy_fpu(target);
+	}
+	else if (pos == 0 && count == sizeof(fp))
+		set_stopped_child_used_math(target);
+	else
+		init_fpu(target);
+
+	if (pos > 0 || count < sizeof(fp)) {
+		ret = get_fpregs32(&fp, target);
+		if (ret == 0)
+			ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+						   &fp, 0, -1);
 		if (ret)
 			return ret;
-		if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
-			return -EFAULT;
+		kbuf = &fp;
 	}
-	ret = sys_ptrace(request, pid, addr, (unsigned long)si);
-	if (ret)
-		return ret;
-	if (request == PTRACE_GETSIGINFO) {
-		if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
+	else if (kbuf == NULL) {
+		if (__copy_from_user(&fp, ubuf, sizeof(fp)))
 			return -EFAULT;
-		ret = copy_siginfo_to_user32(si32, &ssi);
+		kbuf = &fp;
 	}
-	return ret;
+
+	return set_fpregs32(target, kbuf);
 }
 
-asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
+static int
+ia32_fpxregs_active(struct task_struct *target,
+		    const struct utrace_regset *regset)
 {
-	struct task_struct *child;
-	struct pt_regs *childregs; 
-	void __user *datap = compat_ptr(data);
-	int ret;
-	__u32 val;
+	return tsk_used_math(target) ? regset->n : 0;
+}
 
-	switch (request) { 
-	case PTRACE_TRACEME:
-	case PTRACE_ATTACH:
-	case PTRACE_KILL:
-	case PTRACE_CONT:
-	case PTRACE_SINGLESTEP:
-	case PTRACE_DETACH:
-	case PTRACE_SYSCALL:
-	case PTRACE_SETOPTIONS:
-		return sys_ptrace(request, pid, addr, data); 
+static int
+ia32_fpxregs_get(struct task_struct *target,
+		 const struct utrace_regset *regset,
+		 unsigned int pos, unsigned int count,
+		 void *kbuf, void __user *ubuf)
+{
+	if (tsk_used_math(target)) {
+		if (target == current)
+			unlazy_fpu(target);
+	}
+	else
+		init_fpu(target);
 
-	default:
-		return -EINVAL;
+	return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				     &target->thread.i387.fxsave, 0, -1);
+}
 
-	case PTRACE_PEEKTEXT:
-	case PTRACE_PEEKDATA:
-	case PTRACE_POKEDATA:
-	case PTRACE_POKETEXT:
-	case PTRACE_POKEUSR:       
-	case PTRACE_PEEKUSR:
-	case PTRACE_GETREGS:
-	case PTRACE_SETREGS:
-	case PTRACE_SETFPREGS:
-	case PTRACE_GETFPREGS:
-	case PTRACE_SETFPXREGS:
-	case PTRACE_GETFPXREGS:
-	case PTRACE_GETEVENTMSG:
-		break;
+static int
+ia32_fpxregs_set(struct task_struct *target,
+		 const struct utrace_regset *regset,
+		 unsigned int pos, unsigned int count,
+		 const void *kbuf, const void __user *ubuf)
 
-	case PTRACE_SETSIGINFO:
-	case PTRACE_GETSIGINFO:
-		return ptrace32_siginfo(request, pid, addr, data);
-	}
+{
+	int ret;
 
-	child = ptrace_get_task_struct(pid);
-	if (IS_ERR(child))
-		return PTR_ERR(child);
+	if (tsk_used_math(target)) {
+		if (target == current)
+			unlazy_fpu(target);
+	}
+	else if (pos == 0 && count == sizeof(struct i387_fxsave_struct))
+		set_stopped_child_used_math(target);
+	else
+		init_fpu(target);
 
-	ret = ptrace_check_attach(child, request == PTRACE_KILL);
-	if (ret < 0)
-		goto out;
+	ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				   &target->thread.i387.fxsave, 0, -1);
 
-	childregs = task_pt_regs(child);
+	target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
 
-	switch (request) {
-	case PTRACE_PEEKDATA:
-	case PTRACE_PEEKTEXT:
-		ret = 0;
-		if (access_process_vm(child, addr, &val, sizeof(u32), 0)!=sizeof(u32))
-			ret = -EIO;
-		else
-			ret = put_user(val, (unsigned int __user *)datap); 
-		break; 
+	return ret;
+}
 
-	case PTRACE_POKEDATA:
-	case PTRACE_POKETEXT:
-		ret = 0;
-		if (access_process_vm(child, addr, &data, sizeof(u32), 1)!=sizeof(u32))
-			ret = -EIO; 
-		break;
+static int
+ia32_dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset)
+{
+	if (tsk->thread.debugreg6 | tsk->thread.debugreg7)
+		return 8;
+	return 0;
+}
 
-	case PTRACE_PEEKUSR:
-		ret = getreg32(child, addr, &val);
-		if (ret == 0)
-			ret = put_user(val, (__u32 __user *)datap);
-		break;
+static int
+ia32_dbregs_get(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		void *kbuf, void __user *ubuf)
+{
+	for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) {
+		u32 val;
 
-	case PTRACE_POKEUSR:
-		ret = putreg32(child, addr, data);
-		break;
+		/*
+		 * The hardware updates the status register on a debug trap,
+		 * but do_debug (traps.c) saves it for us when that happens.
+		 * So whether the target is current or not, debugregN is good.
+		 */
+		val = 0;
+		switch (pos) {
+		case 0:	val = target->thread.debugreg0; break;
+		case 1:	val = target->thread.debugreg1; break;
+		case 2:	val = target->thread.debugreg2; break;
+		case 3:	val = target->thread.debugreg3; break;
+		case 6:	val = target->thread.debugreg6; break;
+		case 7:	val = target->thread.debugreg7; break;
+		}
 
-	case PTRACE_GETREGS: { /* Get all gp regs from the child. */
-		int i;
-	  	if (!access_ok(VERIFY_WRITE, datap, 16*4)) {
-			ret = -EIO;
-			break;
+		if (kbuf) {
+			*(u32 *) kbuf = val;
+			kbuf += sizeof(u32);
 		}
-		ret = 0;
-		for ( i = 0; i <= 16*4 ; i += sizeof(__u32) ) {
-			getreg32(child, i, &val);
-			ret |= __put_user(val,(u32 __user *)datap);
-			datap += sizeof(u32);
+		else {
+			if (__put_user(val, (u32 __user *) ubuf))
+				return -EFAULT;
+			ubuf += sizeof(u32);
 		}
-		break;
 	}
 
-	case PTRACE_SETREGS: { /* Set all gp regs in the child. */
-		unsigned long tmp;
-		int i;
-	  	if (!access_ok(VERIFY_READ, datap, 16*4)) {
-			ret = -EIO;
-			break;
+	return 0;
+}
+
+static int
+ia32_dbregs_set(struct task_struct *target,
+		const struct utrace_regset *regset,
+		unsigned int pos, unsigned int count,
+		const void *kbuf, const void __user *ubuf)
+{
+	/*
+	 * We'll just hijack the native setter to do the real work for us.
+	 */
+	const struct utrace_regset *dbregset = &utrace_x86_64_native.regsets[2];
+
+	int ret = 0;
+
+	for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) {
+		unsigned long val;
+
+		if (kbuf) {
+			val = *(const u32 *) kbuf;
+			kbuf += sizeof(u32);
 		}
-		ret = 0; 
-		for ( i = 0; i <= 16*4; i += sizeof(u32) ) {
-			ret |= __get_user(tmp, (u32 __user *)datap);
-			putreg32(child, i, tmp);
-			datap += sizeof(u32);
+		else {
+			if (__get_user(val, (u32 __user *) ubuf))
+				return -EFAULT;
+			ubuf += sizeof(u32);
 		}
-		break;
-	}
 
-	case PTRACE_GETFPREGS:
-		ret = -EIO; 
-		if (!access_ok(VERIFY_READ, compat_ptr(data), 
-			       sizeof(struct user_i387_struct)))
-			break;
-		save_i387_ia32(child, datap, childregs, 1);
-		ret = 0; 
+		ret = (*dbregset->set)(target, dbregset, pos * sizeof(long),
+				       sizeof(val), &val, NULL);
+		if (ret)
 			break;
+	}
 
-	case PTRACE_SETFPREGS:
-		ret = -EIO;
-		if (!access_ok(VERIFY_WRITE, datap, 
-			       sizeof(struct user_i387_struct)))
-			break;
-		ret = 0;
-		/* don't check EFAULT to be bug-to-bug compatible to i386 */
-		restore_i387_ia32(child, datap, 1);
-		break;
+	return ret;
+}
 
-	case PTRACE_GETFPXREGS: { 
-		struct user32_fxsr_struct __user *u = datap;
-		init_fpu(child); 
-		ret = -EIO;
-		if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
-			break;
-			ret = -EFAULT;
-		if (__copy_to_user(u, &child->thread.i387.fxsave, sizeof(*u)))
-			break;
-		ret = __put_user(childregs->cs, &u->fcs);
-		ret |= __put_user(child->thread.ds, &u->fos); 
-		break; 
-	} 
-	case PTRACE_SETFPXREGS: { 
-		struct user32_fxsr_struct __user *u = datap;
-		unlazy_fpu(child);
-		ret = -EIO;
-		if (!access_ok(VERIFY_READ, u, sizeof(*u)))
-			break;
-		/* no checking to be bug-to-bug compatible with i386 */
-		__copy_from_user(&child->thread.i387.fxsave, u, sizeof(*u));
-		set_stopped_child_used_math(child);
-		child->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
-		ret = 0; 
-		break;
+
+/*
+ * Perform get_thread_area on behalf of the traced child.
+ */
+static int
+ia32_tls_get(struct task_struct *target,
+	     const struct utrace_regset *regset,
+	     unsigned int pos, unsigned int count,
+	     void *kbuf,  void __user *ubuf)
+{
+	struct user_desc info, *ip;
+	const struct n_desc_struct *desc;
+	const struct n_desc_struct *tls;
+
+/*
+ * Get the current Thread-Local Storage area:
+ */
+
+#define GET_BASE(desc) ( \
+	(((desc)->a >> 16) & 0x0000ffff) | \
+	(((desc)->b << 16) & 0x00ff0000) | \
+	( (desc)->b        & 0xff000000)   )
+
+#define GET_LIMIT(desc) ( \
+	((desc)->a & 0x0ffff) | \
+	 ((desc)->b & 0xf0000) )
+
+#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
+#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
+#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
+#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
+#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
+#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
+
+	tls = (struct n_desc_struct *) target->thread.tls_array;
+	desc = &tls[pos];
+	ip = kbuf ?: &info;
+	memset(ip, 0, sizeof *ip);
+	for (; count > 0; count -= sizeof(struct user_desc), ++desc) {
+		ip->entry_number = desc - tls + GDT_ENTRY_TLS_MIN;
+		ip->base_addr = GET_BASE(desc);
+		ip->limit = GET_LIMIT(desc);
+		ip->seg_32bit = GET_32BIT(desc);
+		ip->contents = GET_CONTENTS(desc);
+		ip->read_exec_only = !GET_WRITABLE(desc);
+		ip->limit_in_pages = GET_LIMIT_PAGES(desc);
+		ip->seg_not_present = !GET_PRESENT(desc);
+		ip->useable = GET_USEABLE(desc);
+
+		if (kbuf)
+			++ip;
+		else {
+			if (__copy_to_user(ubuf, &info, sizeof(info)))
+				return -EFAULT;
+			ubuf += sizeof(info);
+		}
 	}
 
-	case PTRACE_GETEVENTMSG:
-		ret = put_user(child->ptrace_message,(unsigned int __user *)compat_ptr(data));
-		break;
+	return 0;
+}
 
-	default:
-		BUG();
+/*
+ * Perform set_thread_area on behalf of the traced child.
+ */
+static int
+ia32_tls_set(struct task_struct *target,
+	     const struct utrace_regset *regset,
+	     unsigned int pos, unsigned int count,
+	     const void *kbuf, const void __user *ubuf)
+{
+	struct user_desc info;
+	struct n_desc_struct *desc;
+	struct n_desc_struct newtls[GDT_ENTRY_TLS_ENTRIES];
+	unsigned int i;
+	int cpu;
+
+	pos /= sizeof(struct user_desc);
+	count /= sizeof(struct user_desc);
+
+	desc = &newtls[pos];
+	for (i = 0; i < count; ++i, ++desc) {
+		const struct user_desc *ip;
+		if (kbuf) {
+			ip = kbuf;
+			kbuf += sizeof(struct user_desc);
+		}
+		else {
+			ip = &info;
+			if (__copy_from_user(&info, ubuf, sizeof(info)))
+				return -EFAULT;
+			ubuf += sizeof(struct user_desc);
+		}
+
+		if (LDT_empty(ip)) {
+			desc->a = 0;
+			desc->b = 0;
+		} else {
+			desc->a = LDT_entry_a(ip);
+			desc->b = LDT_entry_b(ip);
+		}
 	}
 
- out:
-	put_task_struct(child);
-	return ret;
+	/*
+	 * We must not get preempted while modifying the TLS.
+	 */
+	cpu = get_cpu();
+	memcpy(&target->thread.tls_array[pos], newtls,
+	       count * sizeof(newtls[0]));
+	if (target == current)
+		load_TLS(&target->thread, cpu);
+	put_cpu();
+
+	return 0;
+}
+
+/*
+ * Determine how many TLS slots are in use.
+ */
+static int
+ia32_tls_active(struct task_struct *target, const struct utrace_regset *regset)
+{
+	int i;
+	for (i = GDT_ENTRY_TLS_ENTRIES; i > 0; --i) {
+		struct n_desc_struct *desc = (struct n_desc_struct *)
+			&target->thread.tls_array[i - 1];
+		if ((desc->a | desc->b) != 0)
+			break;
+	}
+	return i;
 }
 
+
+/*
+ * This should match arch/i386/kernel/ptrace.c:native_regsets.
+ * XXX ioperm? vm86?
+ */
+static const struct utrace_regset ia32_regsets[] = {
+	{
+		.n = sizeof(struct user_regs_struct32)/4,
+		.size = 4, .align = 4,
+		.get = ia32_genregs_get, .set = ia32_genregs_set
+	},
+	{
+		.n = sizeof(struct user_i387_ia32_struct) / 4,
+		.size = 4, .align = 4,
+		.active = ia32_fpregs_active,
+		.get = ia32_fpregs_get, .set = ia32_fpregs_set
+	},
+	{
+		.n = sizeof(struct user32_fxsr_struct) / 4,
+		.size = 4, .align = 4,
+		.active = ia32_fpxregs_active,
+		.get = ia32_fpxregs_get, .set = ia32_fpxregs_set
+	},
+	{
+		.n = GDT_ENTRY_TLS_ENTRIES,
+		.bias = GDT_ENTRY_TLS_MIN,
+		.size = sizeof(struct user_desc),
+		.align = sizeof(struct user_desc),
+		.active = ia32_tls_active,
+		.get = ia32_tls_get, .set = ia32_tls_set
+	},
+	{
+		.n = 8, .size = 4, .align = 4,
+		.active = ia32_dbregs_active,
+		.get = ia32_dbregs_get, .set = ia32_dbregs_set
+	},
+};
+
+const struct utrace_regset_view utrace_ia32_view = {
+	.name = "i386", .e_machine = EM_386,
+	.regsets = ia32_regsets,
+	.n = sizeof ia32_regsets / sizeof ia32_regsets[0],
+};
+EXPORT_SYMBOL_GPL(utrace_ia32_view);
+
+
+#ifdef CONFIG_PTRACE
+/*
+ * This matches the arch/i386/kernel/ptrace.c definitions.
+ */
+
+static const struct ptrace_layout_segment ia32_uarea[] = {
+	{0, sizeof(struct user_regs_struct32), 0, 0},
+	{offsetof(struct user32, u_debugreg[0]),
+	 offsetof(struct user32, u_debugreg[8]), 4, 0},
+	{0, 0, -1, 0}
+};
+
+fastcall int arch_compat_ptrace(compat_long_t *req, struct task_struct *child,
+				struct utrace_attached_engine *engine,
+				compat_ulong_t addr, compat_ulong_t data,
+				compat_long_t *val)
+{
+	switch (*req) {
+	case PTRACE_PEEKUSR:
+		return ptrace_compat_peekusr(child, engine, ia32_uarea,
+					     addr, data);
+	case PTRACE_POKEUSR:
+		return ptrace_compat_pokeusr(child, engine, ia32_uarea,
+					     addr, data);
+	case PTRACE_GETREGS:
+		return ptrace_whole_regset(child, engine, data, 0, 0);
+	case PTRACE_SETREGS:
+		return ptrace_whole_regset(child, engine, data, 0, 1);
+	case PTRACE_GETFPREGS:
+		return ptrace_whole_regset(child, engine, data, 1, 0);
+	case PTRACE_SETFPREGS:
+		return ptrace_whole_regset(child, engine, data, 1, 1);
+	case PTRACE_GETFPXREGS:
+		return ptrace_whole_regset(child, engine, data, 2, 0);
+	case PTRACE_SETFPXREGS:
+		return ptrace_whole_regset(child, engine, data, 2, 1);
+	case PTRACE_GET_THREAD_AREA:
+	case PTRACE_SET_THREAD_AREA:
+		return ptrace_onereg_access(child, engine,
+					    &utrace_ia32_view, 3,
+					    addr,
+					    (void __user *)(unsigned long)data,
+					    *req == PTRACE_SET_THREAD_AREA);
+	}
+	return -ENOSYS;
+}
+#endif	/* CONFIG_PTRACE */
--- linux-2.6/arch/x86_64/ia32/ia32_signal.c.utrace-ptrace-compat
+++ linux-2.6/arch/x86_64/ia32/ia32_signal.c
@@ -497,11 +497,7 @@ int ia32_setup_frame(int sig, struct k_s
 
 	regs->cs = __USER32_CS; 
 	regs->ss = __USER32_DS; 
-
 	set_fs(USER_DS);
-    regs->eflags &= ~TF_MASK;
-    if (test_thread_flag(TIF_SINGLESTEP))
-        ptrace_notify(SIGTRAP);
 
 #if DEBUG_SIG
 	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
@@ -593,11 +589,7 @@ int ia32_setup_rt_frame(int sig, struct 
 	
 	regs->cs = __USER32_CS; 
 	regs->ss = __USER32_DS; 
-
 	set_fs(USER_DS);
-    regs->eflags &= ~TF_MASK;
-    if (test_thread_flag(TIF_SINGLESTEP))
-        ptrace_notify(SIGTRAP);
 
 #if DEBUG_SIG
 	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
--- linux-2.6/arch/x86_64/ia32/sys_ia32.c.utrace-ptrace-compat
+++ linux-2.6/arch/x86_64/ia32/sys_ia32.c
@@ -855,11 +855,6 @@ asmlinkage long sys32_execve(char __user
 	if (IS_ERR(filename))
 		return error;
 	error = compat_do_execve(filename, argv, envp, regs);
-	if (error == 0) {
-		task_lock(current);
-		current->ptrace &= ~PT_DTRACE;
-		task_unlock(current);
-	}
 	putname(filename);
 	return error;
 }
--- linux-2.6/arch/x86_64/ia32/ia32_aout.c.utrace-ptrace-compat
+++ linux-2.6/arch/x86_64/ia32/ia32_aout.c
@@ -421,12 +421,6 @@ beyond_if:
 	(regs)->cs = __USER32_CS;
 	(regs)->ss = __USER32_DS;
 	set_fs(USER_DS);
-	if (unlikely(current->ptrace & PT_PTRACED)) {
-		if (current->ptrace & PT_TRACE_EXEC)
-			ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
-		else
-			send_sig(SIGTRAP, current, 0);
-	}
 	return 0;
 }
 
--- linux-2.6/arch/x86_64/ia32/fpu32.c.utrace-ptrace-compat
+++ linux-2.6/arch/x86_64/ia32/fpu32.c
@@ -9,6 +9,7 @@
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/i387.h>
+#include <asm/user32.h>
 
 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
 {
@@ -24,7 +25,8 @@ static inline unsigned short twd_i387_to
         return tmp;
 }
 
-static inline unsigned long twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
+static inline unsigned long
+twd_fxsr_to_i387(const struct i387_fxsave_struct *fxsave)
 {
 	struct _fpxreg *st = NULL;
 	unsigned long tos = (fxsave->swd >> 11) & 7;
@@ -71,16 +73,11 @@ static inline unsigned long twd_fxsr_to_
 }
 
 
-static inline int convert_fxsr_from_user(struct i387_fxsave_struct *fxsave,
-					 struct _fpstate_ia32 __user *buf)
+static inline void
+convert_fxsr_env_from_i387(struct i387_fxsave_struct *fxsave, const u32 env[7])
 {
-	struct _fpxreg *to;
-	struct _fpreg __user *from;
-	int i;
 	u32 v;
-	int err = 0;
-
-#define G(num,val) err |= __get_user(val, num + (u32 __user *)buf)
+#define G(num,val) val = env[num]
 	G(0, fxsave->cwd);
 	G(1, fxsave->swd);
 	G(2, fxsave->twd);
@@ -91,9 +88,21 @@ static inline int convert_fxsr_from_user
 	G(5, fxsave->rdp);
 	/* 6: ds ignored */
 #undef G
-	if (err) 
+}
+
+static inline int convert_fxsr_from_user(struct i387_fxsave_struct *fxsave,
+					 struct _fpstate_ia32 __user *buf)
+{
+	u32 env[7];
+	struct _fpxreg *to;
+	struct _fpreg __user *from;
+	int i;
+
+	if (__copy_from_user(env, buf, sizeof(env)))
 		return -1; 
 
+	convert_fxsr_env_from_i387(fxsave, env);
+
 	to = (struct _fpxreg *)&fxsave->st_space[0];
 	from = &buf->_st[0];
 	for (i = 0 ; i < 8 ; i++, to++, from++) {
@@ -104,16 +113,11 @@ static inline int convert_fxsr_from_user
 }
 
 
-static inline int convert_fxsr_to_user(struct _fpstate_ia32 __user *buf,
-				       struct i387_fxsave_struct *fxsave,
-				       struct pt_regs *regs,
-				       struct task_struct *tsk)
+static inline void
+convert_fxsr_env_to_i387(struct task_struct *tsk, struct pt_regs *regs,
+			 u32 env[7], const struct i387_fxsave_struct *fxsave)
 {
-	struct _fpreg __user *to;
-	struct _fpxreg *from;
-	int i;
 	u16 cs,ds; 
-	int err = 0; 
 
 	if (tsk == current) {
 		/* should be actually ds/cs at fpu exception time,
@@ -125,7 +129,7 @@ static inline int convert_fxsr_to_user(s
 		cs = regs->cs;
 	} 
 
-#define P(num,val) err |= __put_user(val, num + (u32 __user *)buf)
+#define P(num,val) env[num] = val
 	P(0, (u32)fxsave->cwd | 0xffff0000);
 	P(1, (u32)fxsave->swd | 0xffff0000);
 	P(2, twd_fxsr_to_i387(fxsave));
@@ -134,8 +138,21 @@ static inline int convert_fxsr_to_user(s
 	P(5, fxsave->rdp);
 	P(6, 0xffff0000 | ds);
 #undef P
+}
+
+
+static inline int convert_fxsr_to_user(struct _fpstate_ia32 __user *buf,
+				       struct i387_fxsave_struct *fxsave,
+				       struct pt_regs *regs,
+				       struct task_struct *tsk)
+{
+	struct _fpreg __user *to;
+	struct _fpxreg *from;
+	int i;
+	u32 env[7];
 
-	if (err) 
+	convert_fxsr_env_to_i387(tsk, regs, env, fxsave);
+	if (__copy_to_user(buf, env, sizeof(env)))
 		return -1; 
 
 	to = &buf->_st[0];
@@ -181,3 +198,38 @@ int save_i387_ia32(struct task_struct *t
 			      sizeof(struct i387_fxsave_struct));
 	return err ? -1 : 1;
 }
+
+int get_fpregs32(struct user_i387_ia32_struct *buf, struct task_struct *tsk)
+{
+	struct pt_regs *regs = ((struct pt_regs *)tsk->thread.rsp0) - 1;
+	struct _fpreg *to;
+	const struct _fpxreg *from;
+	unsigned int i;
+
+	convert_fxsr_env_to_i387(tsk, regs,
+				 (u32 *) buf, &tsk->thread.i387.fxsave);
+
+	to = (struct _fpreg *) buf->st_space;
+	from = (const struct _fpxreg *) &tsk->thread.i387.fxsave.st_space[0];
+	for (i = 0; i < 8; i++, to++, from++)
+		*to = *(const struct _fpreg *) from;
+
+	return 0;
+}
+
+int
+set_fpregs32(struct task_struct *tsk, const struct user_i387_ia32_struct *buf)
+{
+	struct _fpxreg *to;
+	const struct _fpreg *from;
+	unsigned int i;
+
+	convert_fxsr_env_from_i387(&tsk->thread.i387.fxsave, (u32 *) buf);
+
+	to = (struct _fpxreg *) &tsk->thread.i387.fxsave.st_space[0];
+	from = (const struct _fpreg *) buf->st_space;
+	for (i = 0; i < 8; i++, to++, from++)
+		*(struct _fpreg *) to = *from;
+
+	return 0;
+}
--- linux-2.6/arch/x86_64/ia32/ia32entry.S.utrace-ptrace-compat
+++ linux-2.6/arch/x86_64/ia32/ia32entry.S
@@ -417,7 +417,7 @@ ia32_sys_call_table:
 	.quad sys_setuid16
 	.quad sys_getuid16
 	.quad compat_sys_stime	/* stime */		/* 25 */
-	.quad sys32_ptrace	/* ptrace */
+	.quad compat_sys_ptrace	/* ptrace */
 	.quad sys_alarm
 	.quad sys_fstat	/* (old)fstat */
 	.quad sys_pause
--- linux-2.6/arch/x86_64/mm/fault.c.utrace-ptrace-compat
+++ linux-2.6/arch/x86_64/mm/fault.c
@@ -11,7 +11,7 @@
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/types.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
@@ -252,7 +252,7 @@ int unhandled_signal(struct task_struct 
 {
 	if (tsk->pid == 1)
 		return 1;
-	if (tsk->ptrace & PT_PTRACED)
+	if (tracehook_consider_fatal_signal(tsk, sig))
 		return 0;
 	return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
 		(tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
--- linux-2.6/arch/frv/kernel/ptrace.c.utrace-ptrace-compat
+++ linux-2.6/arch/frv/kernel/ptrace.c
@@ -700,24 +700,11 @@ asmlinkage void do_syscall_trace(int lea
 	if (!test_thread_flag(TIF_SYSCALL_TRACE))
 		return;
 
-	if (!(current->ptrace & PT_PTRACED))
-		return;
-
 	/* we need to indicate entry or exit to strace */
 	if (leaving)
 		__frame->__status |= REG__STATUS_SYSC_EXIT;
 	else
 		__frame->__status |= REG__STATUS_SYSC_ENTRY;
 
-	ptrace_notify(SIGTRAP);
-
-	/*
-	 * this isn't the same as continuing with a signal, but it will do
-	 * for normal use.  strace only continues with a signal if the
-	 * stopping signal is not SIGTRAP.  -brl
-	 */
-	if (current->exit_code) {
-		send_sig(current->exit_code, current, 1);
-		current->exit_code = 0;
-	}
+	tracehook_report_syscall(regs, leaving);
 }