crash.c 9.95 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
Hiroshi Shimamoto's avatar
Hiroshi Shimamoto committed
3
 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4
5
6
7
 *
 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
 *
 * Copyright (C) IBM Corporation, 2004. All rights reserved.
8
9
10
 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
 * Authors:
 *      Vivek Goyal <vgoyal@redhat.com>
11
12
13
 *
 */

14
15
#define pr_fmt(fmt)	"kexec: " fmt

16
17
18
19
20
21
22
23
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
24
#include <linux/export.h>
25
#include <linux/slab.h>
26
#include <linux/vmalloc.h>
27
#include <linux/memblock.h>
28
29
30
31
32

#include <asm/processor.h>
#include <asm/hardirq.h>
#include <asm/nmi.h>
#include <asm/hw_irq.h>
33
#include <asm/apic.h>
34
#include <asm/e820/types.h>
35
#include <asm/io_apic.h>
36
#include <asm/hpet.h>
37
#include <linux/kdebug.h>
38
#include <asm/cpu.h>
39
#include <asm/reboot.h>
40
#include <asm/virtext.h>
41
#include <asm/intel_pt.h>
42
#include <asm/crash.h>
43
#include <asm/cmdline.h>
44

45
46
47
48
49
50
51
/* Used while preparing memory map entries for second kernel */
struct crash_memmap_data {
	struct boot_params *params;
	/* Type of memory */
	unsigned int type;
};

52
53
54
55
56
57
58
/*
 * This is used to VMCLEAR all VMCSs loaded on the
 * processor. And when loading kvm_intel module, the
 * callback function pointer will be assigned.
 *
 * protected by rcu.
 */
59
crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
60
61
62
63
EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);

static inline void cpu_crash_vmclear_loaded_vmcss(void)
{
64
	crash_vmclear_fn *do_vmclear_operation = NULL;
65
66
67
68
69
70
71
72

	rcu_read_lock();
	do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
	if (do_vmclear_operation)
		do_vmclear_operation();
	rcu_read_unlock();
}

73
74
75
76
77
78
79
80
81
82
83
84
85
/*
 * When the crashkernel option is specified, only use the low
 * 1M for the real mode trampoline.
 */
void __init crash_reserve_low_1M(void)
{
	if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0)
		return;

	memblock_reserve(0, 1<<20);
	pr_info("Reserving the low 1M of memory for crashkernel\n");
}

86
87
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)

88
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
89
{
90
91
	crash_save_cpu(regs, cpu);

92
93
94
95
96
	/*
	 * VMCLEAR VMCSs loaded on all cpus if needed.
	 */
	cpu_crash_vmclear_loaded_vmcss();

97
98
99
100
101
102
103
104
105
	/* Disable VMX or SVM if needed.
	 *
	 * We need to disable virtualization on all CPUs.
	 * Having VMX or SVM enabled on any CPU may break rebooting
	 * after the kdump kernel has finished its task.
	 */
	cpu_emergency_vmxoff();
	cpu_emergency_svm_disable();

106
107
108
109
110
	/*
	 * Disable Intel PT to stop its logging
	 */
	cpu_emergency_stop_pt();

111
112
113
	disable_local_APIC();
}

114
void kdump_nmi_shootdown_cpus(void)
115
{
116
	nmi_shootdown_cpus(kdump_nmi_callback);
117

118
	disable_local_APIC();
119
}
120

121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
/* Override the weak function in kernel/panic.c */
void crash_smp_send_stop(void)
{
	static int cpus_stopped;

	if (cpus_stopped)
		return;

	if (smp_ops.crash_stop_other_cpus)
		smp_ops.crash_stop_other_cpus();
	else
		smp_send_stop();

	cpus_stopped = 1;
}

137
#else
138
void crash_smp_send_stop(void)
139
140
141
142
143
{
	/* There are no cpus to shootdown */
}
#endif

144
void native_machine_crash_shutdown(struct pt_regs *regs)
145
146
{
	/* This function is only called after the system
Lee Revell's avatar
Lee Revell committed
147
	 * has panicked or is otherwise in a critical state.
148
149
150
151
152
153
	 * The minimum amount of code to allow a kexec'd kernel
	 * to run successfully needs to happen here.
	 *
	 * In practice this means shooting down the other cpus in
	 * an SMP system.
	 */
154
155
	/* The kernel is broken so disable interrupts */
	local_irq_disable();
156

157
	crash_smp_send_stop();
158

159
160
161
162
163
	/*
	 * VMCLEAR VMCSs loaded on this cpu if needed.
	 */
	cpu_crash_vmclear_loaded_vmcss();

164
165
166
167
168
169
170
	/* Booting kdump kernel with VMX or SVM enabled won't work,
	 * because (among other limitations) we can't disable paging
	 * with the virt flags.
	 */
	cpu_emergency_vmxoff();
	cpu_emergency_svm_disable();

171
172
173
174
175
	/*
	 * Disable Intel PT to stop its logging
	 */
	cpu_emergency_stop_pt();

176
177
178
#ifdef CONFIG_X86_IO_APIC
	/* Prevent crash_kexec() from deadlocking on ioapic_lock. */
	ioapic_zap_locks();
179
	clear_IO_APIC();
180
#endif
181
	lapic_shutdown();
182
	restore_boot_irq_mode();
183
184
#ifdef CONFIG_HPET_TIMER
	hpet_disable();
185
#endif
186
	crash_save_cpu(regs, safe_smp_processor_id());
187
}
188

189
#ifdef CONFIG_KEXEC_FILE
190

191
static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
192
{
193
	unsigned int *nr_ranges = arg;
194
195
196
197
198
199

	(*nr_ranges)++;
	return 0;
}

/* Gather all the required information to prepare elf headers for ram regions */
200
static struct crash_mem *fill_up_crash_elf_data(void)
201
202
{
	unsigned int nr_ranges = 0;
203
	struct crash_mem *cmem;
204

205
	walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
206
207
	if (!nr_ranges)
		return NULL;
208

209
210
211
212
213
	/*
	 * Exclusion of crash region and/or crashk_low_res may cause
	 * another range split. So add extra two slots here.
	 */
	nr_ranges += 2;
214
	cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
215
216
	if (!cmem)
		return NULL;
217

218
219
	cmem->max_nr_ranges = nr_ranges;
	cmem->nr_ranges = 0;
220

221
	return cmem;
222
223
224
225
}

/*
 * Look for any unwanted ranges between mstart, mend and remove them. This
226
 * might lead to split and split ranges are put in cmem->ranges[] array
227
 */
228
static int elf_header_exclude_ranges(struct crash_mem *cmem)
229
230
231
{
	int ret = 0;

232
	/* Exclude the low 1M because it is always reserved */
233
	ret = crash_exclude_mem_range(cmem, 0, (1<<20)-1);
234
235
236
	if (ret)
		return ret;

237
	/* Exclude crashkernel region */
238
	ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
239
240
241
	if (ret)
		return ret;

242
	if (crashk_low_res.end)
243
		ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
244
					      crashk_low_res.end);
245
246
247
248

	return ret;
}

249
static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
250
{
251
	struct crash_mem *cmem = arg;
252

253
254
255
	cmem->ranges[cmem->nr_ranges].start = res->start;
	cmem->ranges[cmem->nr_ranges].end = res->end;
	cmem->nr_ranges++;
256

257
	return 0;
258
259
260
261
262
263
}

/* Prepare elf headers. Return addr and size */
static int prepare_elf_headers(struct kimage *image, void **addr,
					unsigned long *sz)
{
264
	struct crash_mem *cmem;
265
	int ret;
266

267
268
	cmem = fill_up_crash_elf_data();
	if (!cmem)
269
270
		return -ENOMEM;

271
	ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
272
273
274
275
	if (ret)
		goto out;

	/* Exclude unwanted mem ranges */
276
	ret = elf_header_exclude_ranges(cmem);
277
278
279
	if (ret)
		goto out;

280
	/* By default prepare 64bit headers */
281
	ret =  crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
282
283

out:
284
	vfree(cmem);
285
286
287
	return ret;
}

288
static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
289
290
291
292
{
	unsigned int nr_e820_entries;

	nr_e820_entries = params->e820_entries;
293
	if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
294
295
		return 1;

296
	memcpy(&params->e820_table[nr_e820_entries], entry, sizeof(struct e820_entry));
297
298
299
300
	params->e820_entries++;
	return 0;
}

301
static int memmap_entry_callback(struct resource *res, void *arg)
302
303
304
{
	struct crash_memmap_data *cmd = arg;
	struct boot_params *params = cmd->params;
305
	struct e820_entry ei;
306

307
	ei.addr = res->start;
308
	ei.size = resource_size(res);
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
	ei.type = cmd->type;
	add_e820_entry(params, &ei);

	return 0;
}

static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
				 unsigned long long mstart,
				 unsigned long long mend)
{
	unsigned long start, end;

	cmem->ranges[0].start = mstart;
	cmem->ranges[0].end = mend;
	cmem->nr_ranges = 1;

	/* Exclude elf header region */
	start = image->arch.elf_load_addr;
	end = start + image->arch.elf_headers_sz - 1;
328
	return crash_exclude_mem_range(cmem, start, end);
329
330
331
332
333
334
335
}

/* Prepare memory map for crash dump kernel */
int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
{
	int i, ret = 0;
	unsigned long flags;
336
	struct e820_entry ei;
337
338
339
340
341
342
343
344
345
346
	struct crash_memmap_data cmd;
	struct crash_mem *cmem;

	cmem = vzalloc(sizeof(struct crash_mem));
	if (!cmem)
		return -ENOMEM;

	memset(&cmd, 0, sizeof(struct crash_memmap_data));
	cmd.params = params;

347
348
349
350
	/* Add the low 1M */
	cmd.type = E820_TYPE_RAM;
	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
	walk_iomem_res_desc(IORES_DESC_NONE, flags, 0, (1<<20)-1, &cmd,
351
			    memmap_entry_callback);
352
353

	/* Add ACPI tables */
354
	cmd.type = E820_TYPE_ACPI;
355
	flags = IORESOURCE_MEM | IORESOURCE_BUSY;
356
	walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
357
			    memmap_entry_callback);
358
359

	/* Add ACPI Non-volatile Storage */
360
	cmd.type = E820_TYPE_NVS;
361
	walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
362
			    memmap_entry_callback);
363

364
365
366
367
	/* Add e820 reserved ranges */
	cmd.type = E820_TYPE_RESERVED;
	flags = IORESOURCE_MEM;
	walk_iomem_res_desc(IORES_DESC_RESERVED, flags, 0, -1, &cmd,
368
			    memmap_entry_callback);
369

370
371
372
	/* Add crashk_low_res region */
	if (crashk_low_res.end) {
		ei.addr = crashk_low_res.start;
Julia Lawall's avatar
Julia Lawall committed
373
		ei.size = resource_size(&crashk_low_res);
374
		ei.type = E820_TYPE_RAM;
375
376
377
378
		add_e820_entry(params, &ei);
	}

	/* Exclude some ranges from crashk_res and add rest to memmap */
379
	ret = memmap_exclude_ranges(image, cmem, crashk_res.start, crashk_res.end);
380
381
382
383
384
385
386
387
388
389
	if (ret)
		goto out;

	for (i = 0; i < cmem->nr_ranges; i++) {
		ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;

		/* If entry is less than a page, skip it */
		if (ei.size < PAGE_SIZE)
			continue;
		ei.addr = cmem->ranges[i].start;
390
		ei.type = E820_TYPE_RAM;
391
392
393
394
395
396
397
398
399
400
401
		add_e820_entry(params, &ei);
	}

out:
	vfree(cmem);
	return ret;
}

int crash_load_segments(struct kimage *image)
{
	int ret;
402
403
	struct kexec_buf kbuf = { .image = image, .buf_min = 0,
				  .buf_max = ULONG_MAX, .top_down = false };
404
405

	/* Prepare elf headers and add a segment */
406
	ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
407
408
409
	if (ret)
		return ret;

410
411
	image->arch.elf_headers = kbuf.buffer;
	image->arch.elf_headers_sz = kbuf.bufsz;
412

413
414
	kbuf.memsz = kbuf.bufsz;
	kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
415
	kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
416
	ret = kexec_add_buffer(&kbuf);
417
418
419
420
	if (ret) {
		vfree((void *)image->arch.elf_headers);
		return ret;
	}
421
	image->arch.elf_load_addr = kbuf.mem;
422
	pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
423
		 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
424
425
426

	return ret;
}
427
#endif /* CONFIG_KEXEC_FILE */