Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 | /**
* @file backtrace.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon
* @author David Smith
*/
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/compat.h>
#include <linux/highmem.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
#include <asm/stacktrace.h>
static int backtrace_stack(void *data, char *name)
{
/* Yes, we want all stacks */
return 0;
}
static void backtrace_address(void *data, unsigned long addr, int reliable)
{
unsigned int *depth = data;
if ((*depth)--)
oprofile_add_trace(addr);
}
static struct stacktrace_ops backtrace_ops = {
.stack = backtrace_stack,
.address = backtrace_address,
.walk_stack = print_context_stack,
};
/* from arch/x86/kernel/cpu/perf_event.c: */
/*
* best effort, GUP based copy_from_user() that assumes IRQ or NMI context
*/
static unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{
unsigned long offset, addr = (unsigned long)from;
unsigned long size, len = 0;
struct page *page;
void *map;
int ret;
do {
ret = __get_user_pages_fast(addr, 1, 0, &page);
if (!ret)
break;
offset = addr & (PAGE_SIZE - 1);
size = min(PAGE_SIZE - offset, n - len);
map = kmap_atomic(page);
memcpy(to, map+offset, size);
kunmap_atomic(map);
put_page(page);
len += size;
to += size;
addr += size;
} while (len < n);
return len;
}
#ifdef CONFIG_COMPAT
static struct stack_frame_ia32 *
dump_user_backtrace_32(struct stack_frame_ia32 *head)
{
/* Also check accessibility of one struct frame_head beyond: */
struct stack_frame_ia32 bufhead[2];
struct stack_frame_ia32 *fp;
unsigned long bytes;
bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
if (bytes != sizeof(bufhead))
return NULL;
fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
oprofile_add_trace(bufhead[0].return_address);
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
if (head >= fp)
return NULL;
return fp;
}
static inline int
x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
{
struct stack_frame_ia32 *head;
/* User process is 32-bit */
if (!current || !test_thread_flag(TIF_IA32))
return 0;
head = (struct stack_frame_ia32 *) regs->bp;
while (depth-- && head)
head = dump_user_backtrace_32(head);
return 1;
}
#else
static inline int
x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
{
return 0;
}
#endif /* CONFIG_COMPAT */
static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
{
/* Also check accessibility of one struct frame_head beyond: */
struct stack_frame bufhead[2];
unsigned long bytes;
bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
if (bytes != sizeof(bufhead))
return NULL;
oprofile_add_trace(bufhead[0].return_address);
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
if (head >= bufhead[0].next_frame)
return NULL;
return bufhead[0].next_frame;
}
void
x86_backtrace(struct pt_regs * const regs, unsigned int depth)
{
struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
if (!user_mode_vm(regs)) {
unsigned long stack = kernel_stack_pointer(regs);
if (depth)
dump_trace(NULL, regs, (unsigned long *)stack, 0,
&backtrace_ops, &depth);
return;
}
if (x86_backtrace_32(regs, depth))
return;
while (depth-- && head)
head = dump_user_backtrace(head);
}
|