bug.c 5.48 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8
/*
  Generic support for BUG()

  This respects the following config options:

  CONFIG_BUG - emit BUG traps.  Nothing happens without this.
  CONFIG_GENERIC_BUG - enable this code.
9 10
  CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to
	the containing struct bug_entry for bug_addr and file.
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
  CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG

  CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
  (though they're generally always on).

  CONFIG_GENERIC_BUG is set by each architecture using this code.

  To use this, your architecture must:

  1. Set up the config options:
     - Enable CONFIG_GENERIC_BUG if CONFIG_BUG

  2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON)
     - Define HAVE_ARCH_BUG
     - Implement BUG() to generate a faulting instruction
     - NOTE: struct bug_entry does not have "file" or "line" entries
       when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate
       the values accordingly.

  3. Implement the trap
     - In the illegal instruction trap handler (typically), verify
       that the fault was in kernel mode, and call report_bug()
     - report_bug() will return whether it was a false alarm, a warning,
       or an actual bug.
     - You must implement the is_valid_bugaddr(bugaddr) callback which
       returns true if the eip is a real kernel address, and it points
       to the expected BUG trap instruction.

    Jeremy Fitzhardinge <jeremy@goop.org> 2006
 */
41 42 43

#define pr_fmt(fmt) fmt

44 45
#include <linux/list.h>
#include <linux/module.h>
46
#include <linux/kernel.h>
47
#include <linux/bug.h>
48
#include <linux/sched.h>
49
#include <linux/rculist.h>
50

51
extern struct bug_entry __start___bug_table[], __stop___bug_table[];
52

53 54 55 56 57 58 59 60 61
static inline unsigned long bug_addr(const struct bug_entry *bug)
{
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
	return bug->bug_addr;
#else
	return (unsigned long)bug + bug->bug_addr_disp;
#endif
}

62
#ifdef CONFIG_MODULES
63
/* Updates are protected by module mutex */
64 65
static LIST_HEAD(module_bug_list);

66
static struct bug_entry *module_find_bug(unsigned long bugaddr)
67 68
{
	struct module *mod;
69
	struct bug_entry *bug = NULL;
70

71
	rcu_read_lock_sched();
72
	list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
73 74
		unsigned i;

75
		bug = mod->bug_table;
76
		for (i = 0; i < mod->num_bugs; ++i, ++bug)
77
			if (bugaddr == bug_addr(bug))
78
				goto out;
79
	}
80 81
	bug = NULL;
out:
82
	rcu_read_unlock_sched();
83 84

	return bug;
85 86
}

87 88
void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
			 struct module *mod)
89 90 91 92
{
	char *secstrings;
	unsigned int i;

93 94
	lockdep_assert_held(&module_mutex);

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
	mod->bug_table = NULL;
	mod->num_bugs = 0;

	/* Find the __bug_table section, if present */
	secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
	for (i = 1; i < hdr->e_shnum; i++) {
		if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
			continue;
		mod->bug_table = (void *) sechdrs[i].sh_addr;
		mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
		break;
	}

	/*
	 * Strictly speaking this should have a spinlock to protect against
	 * traversals, but since we only traverse on BUG()s, a spinlock
	 * could potentially lead to deadlock and thus be counter-productive.
112 113
	 * Thus, this uses RCU to safely manipulate the bug list, since BUG
	 * must run in non-interruptive state.
114
	 */
115
	list_add_rcu(&mod->bug_list, &module_bug_list);
116 117 118 119
}

void module_bug_cleanup(struct module *mod)
{
120
	lockdep_assert_held(&module_mutex);
121
	list_del_rcu(&mod->bug_list);
122 123 124 125
}

#else

126
static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
127 128 129 130 131
{
	return NULL;
}
#endif

132
struct bug_entry *find_bug(unsigned long bugaddr)
133
{
134
	struct bug_entry *bug;
135 136

	for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
137
		if (bugaddr == bug_addr(bug))
138 139 140 141 142
			return bug;

	return module_find_bug(bugaddr);
}

143
enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
144
{
145
	struct bug_entry *bug;
146
	const char *file;
147
	unsigned line, warning, once, done;
148 149 150 151 152

	if (!is_valid_bugaddr(bugaddr))
		return BUG_TRAP_TYPE_NONE;

	bug = find_bug(bugaddr);
153 154
	if (!bug)
		return BUG_TRAP_TYPE_NONE;
155 156 157 158 159 160 161

	file = NULL;
	line = 0;
	warning = 0;

	if (bug) {
#ifdef CONFIG_DEBUG_BUGVERBOSE
162
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
163
		file = bug->file;
164 165 166
#else
		file = (const char *)bug + bug->file_disp;
#endif
167 168 169
		line = bug->line;
#endif
		warning = (bug->flags & BUGFLAG_WARNING) != 0;
170 171 172 173 174 175 176 177 178 179 180 181
		once = (bug->flags & BUGFLAG_ONCE) != 0;
		done = (bug->flags & BUGFLAG_DONE) != 0;

		if (warning && once) {
			if (done)
				return BUG_TRAP_TYPE_WARN;

			/*
			 * Since this is the only store, concurrency is not an issue.
			 */
			bug->flags |= BUGFLAG_DONE;
		}
182 183 184 185
	}

	if (warning) {
		/* this is a WARN_ON rather than BUG/BUG_ON */
186 187
		__warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs,
		       NULL);
188 189 190
		return BUG_TRAP_TYPE_WARN;
	}

191
	printk(KERN_DEFAULT CUT_HERE);
192

193
	if (file)
194
		pr_crit("kernel BUG at %s:%u!\n", file, line);
195
	else
196
		pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
197
			(void *)bugaddr);
198 199 200

	return BUG_TRAP_TYPE_BUG;
}
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

static void clear_once_table(struct bug_entry *start, struct bug_entry *end)
{
	struct bug_entry *bug;

	for (bug = start; bug < end; bug++)
		bug->flags &= ~BUGFLAG_DONE;
}

void generic_bug_clear_once(void)
{
#ifdef CONFIG_MODULES
	struct module *mod;

	rcu_read_lock_sched();
	list_for_each_entry_rcu(mod, &module_bug_list, bug_list)
		clear_once_table(mod->bug_table,
				 mod->bug_table + mod->num_bugs);
	rcu_read_unlock_sched();
#endif

	clear_once_table(__start___bug_table, __stop___bug_table);
}