kvm-s390.c 109 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * hosting IBM Z kernel virtual machines (s390x)
4
 *
5
 * Copyright IBM Corp. 2008, 2018
6
7
8
9
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 *               Heiko Carstens <heiko.carstens@de.ibm.com>
10
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
11
 *               Jason J. Herne <jjherne@us.ibm.com>
12
13
14
15
16
 */

#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/fs.h>
17
#include <linux/hrtimer.h>
18
19
20
#include <linux/init.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
21
#include <linux/mman.h>
22
#include <linux/module.h>
23
#include <linux/moduleparam.h>
24
#include <linux/random.h>
25
#include <linux/slab.h>
26
#include <linux/timer.h>
27
#include <linux/vmalloc.h>
28
#include <linux/bitmap.h>
29
#include <linux/sched/signal.h>
30
#include <linux/string.h>
31

32
#include <asm/asm-offsets.h>
33
#include <asm/lowcore.h>
34
#include <asm/stp.h>
35
#include <asm/pgtable.h>
36
#include <asm/gmap.h>
37
#include <asm/nmi.h>
38
#include <asm/switch_to.h>
39
#include <asm/isc.h>
40
#include <asm/sclp.h>
41
#include <asm/cpacf.h>
42
#include <asm/timex.h>
43
#include "kvm-s390.h"
44
45
#include "gaccess.h"

46
47
48
49
#define KMSG_COMPONENT "kvm-s390"
#undef pr_fmt
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

50
51
#define CREATE_TRACE_POINTS
#include "trace.h"
52
#include "trace-s390.h"
53

54
#define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
55
56
57
#define LOCAL_IRQS 32
#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
			   (KVM_MAX_VCPUS + LOCAL_IRQS))
58

59
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
60
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
61
62
63

struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "userspace_handled", VCPU_STAT(exit_userspace) },
64
	{ "exit_null", VCPU_STAT(exit_null) },
65
66
67
	{ "exit_validity", VCPU_STAT(exit_validity) },
	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
	{ "exit_external_request", VCPU_STAT(exit_external_request) },
68
	{ "exit_io_request", VCPU_STAT(exit_io_request) },
69
	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
70
	{ "exit_instruction", VCPU_STAT(exit_instruction) },
71
	{ "exit_pei", VCPU_STAT(exit_pei) },
72
73
	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
74
	{ "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
75
	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
76
	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
77
	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
78
	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
79
	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
80
	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
81
82
	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
83
84
	{ "deliver_ckc", VCPU_STAT(deliver_ckc) },
	{ "deliver_cputm", VCPU_STAT(deliver_cputm) },
85
	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
86
	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
87
	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
88
	{ "deliver_virtio", VCPU_STAT(deliver_virtio) },
89
90
91
	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
92
93
	{ "deliver_program", VCPU_STAT(deliver_program) },
	{ "deliver_io", VCPU_STAT(deliver_io) },
94
	{ "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
95
	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
	{ "inject_ckc", VCPU_STAT(inject_ckc) },
	{ "inject_cputm", VCPU_STAT(inject_cputm) },
	{ "inject_external_call", VCPU_STAT(inject_external_call) },
	{ "inject_float_mchk", VM_STAT(inject_float_mchk) },
	{ "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
	{ "inject_io", VM_STAT(inject_io) },
	{ "inject_mchk", VCPU_STAT(inject_mchk) },
	{ "inject_pfault_done", VM_STAT(inject_pfault_done) },
	{ "inject_program", VCPU_STAT(inject_program) },
	{ "inject_restart", VCPU_STAT(inject_restart) },
	{ "inject_service_signal", VM_STAT(inject_service_signal) },
	{ "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
	{ "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
	{ "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
	{ "inject_virtio", VM_STAT(inject_virtio) },
111
112
113
114
115
	{ "instruction_epsw", VCPU_STAT(instruction_epsw) },
	{ "instruction_gs", VCPU_STAT(instruction_gs) },
	{ "instruction_io_other", VCPU_STAT(instruction_io_other) },
	{ "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
	{ "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
116
	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
117
	{ "instruction_ptff", VCPU_STAT(instruction_ptff) },
118
	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
119
120
	{ "instruction_sck", VCPU_STAT(instruction_sck) },
	{ "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
121
122
123
	{ "instruction_spx", VCPU_STAT(instruction_spx) },
	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
	{ "instruction_stap", VCPU_STAT(instruction_stap) },
124
125
126
127
	{ "instruction_iske", VCPU_STAT(instruction_iske) },
	{ "instruction_ri", VCPU_STAT(instruction_ri) },
	{ "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
	{ "instruction_sske", VCPU_STAT(instruction_sske) },
128
	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
129
	{ "instruction_essa", VCPU_STAT(instruction_essa) },
130
131
	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
132
133
	{ "instruction_tb", VCPU_STAT(instruction_tb) },
	{ "instruction_tpi", VCPU_STAT(instruction_tpi) },
134
	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
135
	{ "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank's avatar
Janosch Frank committed
136
	{ "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
137
	{ "instruction_sie", VCPU_STAT(instruction_sie) },
138
	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
139
	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
140
	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
141
	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
142
143
	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
144
	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
145
146
	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
147
	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
148
149
150
	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
151
152
153
	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
154
155
156
157
158
159
	{ "instruction_diag_10", VCPU_STAT(diagnose_10) },
	{ "instruction_diag_44", VCPU_STAT(diagnose_44) },
	{ "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
	{ "instruction_diag_258", VCPU_STAT(diagnose_258) },
	{ "instruction_diag_308", VCPU_STAT(diagnose_308) },
	{ "instruction_diag_500", VCPU_STAT(diagnose_500) },
160
	{ "instruction_diag_other", VCPU_STAT(diagnose_other) },
161
162
163
	{ NULL }
};

164
165
166
167
168
169
struct kvm_s390_tod_clock_ext {
	__u8 epoch_idx;
	__u64 tod;
	__u8 reserved[7];
} __packed;

170
171
172
173
174
/* allow nested virtualization in KVM (if enabled by user space) */
static int nested;
module_param(nested, int, S_IRUGO);
MODULE_PARM_DESC(nested, "Nested virtualization support");

175

176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
/*
 * For now we handle at most 16 double words as this is what the s390 base
 * kernel handles and stores in the prefix page. If we ever need to go beyond
 * this, this requires changes to code, but the external uapi can stay.
 */
#define SIZE_INTERNAL 16

/*
 * Base feature mask that defines default mask for facilities. Consists of the
 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
 */
static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
/*
 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
 * and defines the facilities that can be enabled via a cpu model.
 */
static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };

static unsigned long kvm_s390_fac_size(void)
195
{
196
197
198
199
200
201
	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
		sizeof(S390_lowcore.stfle_fac_list));

	return SIZE_INTERNAL;
202
203
}

204
205
/* available cpu features supported by kvm */
static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
206
207
/* available subfunctions indicated via query / "test bit" */
static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
208

209
static struct gmap_notifier gmap_notifier;
210
static struct gmap_notifier vsie_gmap_notifier;
211
debug_info_t *kvm_s390_dbf;
212

213
/* Section: not file related */
214
int kvm_arch_hardware_enable(void)
215
216
{
	/* every s390 is virtualization enabled ;-) */
217
	return 0;
218
219
}

220
221
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
			      unsigned long end);
222

223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
{
	u8 delta_idx = 0;

	/*
	 * The TOD jumps by delta, we have to compensate this by adding
	 * -delta to the epoch.
	 */
	delta = -delta;

	/* sign-extension - we're adding to signed values below */
	if ((s64)delta < 0)
		delta_idx = -1;

	scb->epoch += delta;
	if (scb->ecd & ECD_MEF) {
		scb->epdx += delta_idx;
		if (scb->epoch < delta)
			scb->epdx += 1;
	}
}

245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
/*
 * This callback is executed during stop_machine(). All CPUs are therefore
 * temporarily stopped. In order not to change guest behavior, we have to
 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
 * so a CPU won't be stopped while calculating with the epoch.
 */
static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
			  void *v)
{
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;
	unsigned long long *delta = v;

	list_for_each_entry(kvm, &vm_list, vm_list) {
		kvm_for_each_vcpu(i, vcpu, kvm) {
261
262
263
264
265
			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
			if (i == 0) {
				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
			}
266
267
			if (vcpu->arch.cputm_enabled)
				vcpu->arch.cputm_start += *delta;
268
			if (vcpu->arch.vsie_block)
269
270
				kvm_clock_sync_scb(vcpu->arch.vsie_block,
						   *delta);
271
272
273
274
275
276
277
278
279
		}
	}
	return NOTIFY_OK;
}

static struct notifier_block kvm_clock_notifier = {
	.notifier_call = kvm_clock_sync,
};

280
281
int kvm_arch_hardware_setup(void)
{
282
	gmap_notifier.notifier_call = kvm_gmap_notifier;
283
	gmap_register_pte_notifier(&gmap_notifier);
284
285
	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
	gmap_register_pte_notifier(&vsie_gmap_notifier);
286
287
	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
				       &kvm_clock_notifier);
288
289
290
291
292
	return 0;
}

void kvm_arch_hardware_unsetup(void)
{
293
	gmap_unregister_pte_notifier(&gmap_notifier);
294
	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
295
296
	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
					 &kvm_clock_notifier);
297
298
}

299
300
301
302
303
static void allow_cpu_feat(unsigned long nr)
{
	set_bit_inv(nr, kvm_s390_available_cpu_feat);
}

304
305
306
static inline int plo_test_bit(unsigned char nr)
{
	register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
307
	int cc;
308
309
310
311
312
313
314
315
316
317
318
319

	asm volatile(
		/* Parameter registers are ignored for "test bit" */
		"	plo	0,0,0,0(0)\n"
		"	ipm	%0\n"
		"	srl	%0,28\n"
		: "=d" (cc)
		: "d" (r0)
		: "cc");
	return cc == 0;
}

320
321
static void kvm_s390_cpu_feat_init(void)
{
322
323
324
325
326
327
328
329
	int i;

	for (i = 0; i < 256; ++i) {
		if (plo_test_bit(i))
			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
	}

	if (test_facility(28)) /* TOD-clock steering */
330
331
332
		ptff(kvm_s390_available_subfunc.ptff,
		     sizeof(kvm_s390_available_subfunc.ptff),
		     PTFF_QAF);
333
334

	if (test_facility(17)) { /* MSA */
335
336
337
338
339
340
341
342
343
344
		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
			      kvm_s390_available_subfunc.kmac);
		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
			      kvm_s390_available_subfunc.kmc);
		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
			      kvm_s390_available_subfunc.km);
		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
			      kvm_s390_available_subfunc.kimd);
		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
			      kvm_s390_available_subfunc.klmd);
345
346
	}
	if (test_facility(76)) /* MSA3 */
347
348
		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
			      kvm_s390_available_subfunc.pckmo);
349
	if (test_facility(77)) { /* MSA4 */
350
351
352
353
354
355
356
357
		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
			      kvm_s390_available_subfunc.kmctr);
		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
			      kvm_s390_available_subfunc.kmf);
		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
			      kvm_s390_available_subfunc.kmo);
		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
			      kvm_s390_available_subfunc.pcc);
358
359
	}
	if (test_facility(57)) /* MSA5 */
360
		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
361
			      kvm_s390_available_subfunc.ppno);
362

363
364
365
366
	if (test_facility(146)) /* MSA8 */
		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
			      kvm_s390_available_subfunc.kma);

367
368
	if (MACHINE_HAS_ESOP)
		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
369
370
371
372
373
	/*
	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
	 */
	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
374
	    !test_facility(3) || !nested)
375
376
		return;
	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
377
378
	if (sclp.has_64bscao)
		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
379
380
	if (sclp.has_siif)
		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
381
382
	if (sclp.has_gpere)
		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
383
384
	if (sclp.has_gsls)
		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
385
386
	if (sclp.has_ib)
		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
387
388
	if (sclp.has_cei)
		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
389
390
	if (sclp.has_ibs)
		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
391
392
	if (sclp.has_kss)
		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
	/*
	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
	 * all skey handling functions read/set the skey from the PGSTE
	 * instead of the real storage key.
	 *
	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
	 * pages being detected as preserved although they are resident.
	 *
	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
	 *
	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
	 *
	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
	 * cannot easily shadow the SCA because of the ipte lock.
	 */
411
412
}

413
414
int kvm_arch_init(void *opaque)
{
415
416
417
418
419
420
421
422
423
	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
	if (!kvm_s390_dbf)
		return -ENOMEM;

	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
		debug_unregister(kvm_s390_dbf);
		return -ENOMEM;
	}

424
425
	kvm_s390_cpu_feat_init();

426
427
	/* Register floating interrupt controller interface. */
	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
428
429
}

430
431
432
433
434
void kvm_arch_exit(void)
{
	debug_unregister(kvm_s390_dbf);
}

435
436
437
438
439
440
441
442
443
/* Section: device related */
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	if (ioctl == KVM_S390_ENABLE_SIE)
		return s390_enable_sie();
	return -EINVAL;
}

444
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
445
{
446
447
	int r;

448
	switch (ext) {
449
	case KVM_CAP_S390_PSW:
450
	case KVM_CAP_S390_GMAP:
451
	case KVM_CAP_SYNC_MMU:
452
453
454
#ifdef CONFIG_KVM_S390_UCONTROL
	case KVM_CAP_S390_UCONTROL:
#endif
455
	case KVM_CAP_ASYNC_PF:
456
	case KVM_CAP_SYNC_REGS:
457
	case KVM_CAP_ONE_REG:
458
	case KVM_CAP_ENABLE_CAP:
459
	case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck's avatar
Cornelia Huck committed
460
	case KVM_CAP_IOEVENTFD:
461
	case KVM_CAP_DEVICE_CTRL:
462
	case KVM_CAP_ENABLE_CAP_VM:
463
	case KVM_CAP_S390_IRQCHIP:
464
	case KVM_CAP_VM_ATTRIBUTES:
465
	case KVM_CAP_MP_STATE:
466
	case KVM_CAP_IMMEDIATE_EXIT:
467
	case KVM_CAP_S390_INJECT_IRQ:
468
	case KVM_CAP_S390_USER_SIGP:
469
	case KVM_CAP_S390_USER_STSI:
470
	case KVM_CAP_S390_SKEYS:
471
	case KVM_CAP_S390_IRQ_STATE:
472
	case KVM_CAP_S390_USER_INSTR0:
473
	case KVM_CAP_S390_CMMA_MIGRATION:
474
	case KVM_CAP_S390_AIS:
475
	case KVM_CAP_S390_AIS_MIGRATION:
476
477
		r = 1;
		break;
478
479
480
	case KVM_CAP_S390_MEM_OP:
		r = MEM_OP_MAX_SIZE;
		break;
481
482
	case KVM_CAP_NR_VCPUS:
	case KVM_CAP_MAX_VCPUS:
483
		r = KVM_S390_BSCA_CPU_SLOTS;
484
485
486
		if (!kvm_s390_use_sca_entries())
			r = KVM_MAX_VCPUS;
		else if (sclp.has_esca && sclp.has_64bscao)
487
			r = KVM_S390_ESCA_CPU_SLOTS;
488
		break;
489
490
491
	case KVM_CAP_NR_MEMSLOTS:
		r = KVM_USER_MEM_SLOTS;
		break;
492
	case KVM_CAP_S390_COW:
493
		r = MACHINE_HAS_ESOP;
494
		break;
495
496
497
	case KVM_CAP_S390_VECTOR_REGISTERS:
		r = MACHINE_HAS_VX;
		break;
498
499
500
	case KVM_CAP_S390_RI:
		r = test_facility(64);
		break;
501
502
503
	case KVM_CAP_S390_GS:
		r = test_facility(133);
		break;
504
505
506
	case KVM_CAP_S390_BPB:
		r = test_facility(82);
		break;
507
	default:
508
		r = 0;
509
	}
510
	return r;
511
512
}

513
514
515
516
517
518
519
520
521
522
523
524
static void kvm_s390_sync_dirty_log(struct kvm *kvm,
					struct kvm_memory_slot *memslot)
{
	gfn_t cur_gfn, last_gfn;
	unsigned long address;
	struct gmap *gmap = kvm->arch.gmap;

	/* Loop over all guest pages */
	last_gfn = memslot->base_gfn + memslot->npages;
	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
		address = gfn_to_hva_memslot(memslot, cur_gfn);

525
		if (test_and_clear_guest_dirty(gmap->mm, address))
526
			mark_page_dirty(kvm, cur_gfn);
527
528
		if (fatal_signal_pending(current))
			return;
529
		cond_resched();
530
531
532
	}
}

533
/* Section: vm related */
534
535
static void sca_del_vcpu(struct kvm_vcpu *vcpu);

536
537
538
539
540
541
/*
 * Get (and clear) the dirty memory log for a memory slot.
 */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
			       struct kvm_dirty_log *log)
{
542
543
	int r;
	unsigned long n;
544
	struct kvm_memslots *slots;
545
546
547
	struct kvm_memory_slot *memslot;
	int is_dirty = 0;

548
549
550
	if (kvm_is_ucontrol(kvm))
		return -EINVAL;

551
552
553
554
555
556
	mutex_lock(&kvm->slots_lock);

	r = -EINVAL;
	if (log->slot >= KVM_USER_MEM_SLOTS)
		goto out;

557
558
	slots = kvm_memslots(kvm);
	memslot = id_to_memslot(slots, log->slot);
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

	kvm_s390_sync_dirty_log(kvm, memslot);
	r = kvm_get_dirty_log(kvm, log, &is_dirty);
	if (r)
		goto out;

	/* Clear the dirty log */
	if (is_dirty) {
		n = kvm_dirty_bitmap_bytes(memslot);
		memset(memslot->dirty_bitmap, 0, n);
	}
	r = 0;
out:
	mutex_unlock(&kvm->slots_lock);
	return r;
577
578
}

579
580
581
582
583
584
585
586
587
588
static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
{
	unsigned int i;
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(i, vcpu, kvm) {
		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
	}
}

589
590
591
592
593
594
595
596
static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
{
	int r;

	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
597
	case KVM_CAP_S390_IRQCHIP:
598
		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
599
600
601
		kvm->arch.use_irqchip = 1;
		r = 0;
		break;
602
	case KVM_CAP_S390_USER_SIGP:
603
		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
604
605
606
		kvm->arch.user_sigp = 1;
		r = 0;
		break;
607
	case KVM_CAP_S390_VECTOR_REGISTERS:
608
		mutex_lock(&kvm->lock);
609
		if (kvm->created_vcpus) {
610
611
			r = -EBUSY;
		} else if (MACHINE_HAS_VX) {
612
613
			set_kvm_facility(kvm->arch.model.fac_mask, 129);
			set_kvm_facility(kvm->arch.model.fac_list, 129);
614
615
616
617
			if (test_facility(134)) {
				set_kvm_facility(kvm->arch.model.fac_mask, 134);
				set_kvm_facility(kvm->arch.model.fac_list, 134);
			}
618
619
620
621
			if (test_facility(135)) {
				set_kvm_facility(kvm->arch.model.fac_mask, 135);
				set_kvm_facility(kvm->arch.model.fac_list, 135);
			}
622
623
624
			r = 0;
		} else
			r = -EINVAL;
625
		mutex_unlock(&kvm->lock);
626
627
		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
			 r ? "(not available)" : "(success)");
628
		break;
629
630
631
	case KVM_CAP_S390_RI:
		r = -EINVAL;
		mutex_lock(&kvm->lock);
632
		if (kvm->created_vcpus) {
633
634
			r = -EBUSY;
		} else if (test_facility(64)) {
635
636
			set_kvm_facility(kvm->arch.model.fac_mask, 64);
			set_kvm_facility(kvm->arch.model.fac_list, 64);
637
638
639
640
641
642
			r = 0;
		}
		mutex_unlock(&kvm->lock);
		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
			 r ? "(not available)" : "(success)");
		break;
643
644
645
646
647
648
649
650
651
652
653
654
655
	case KVM_CAP_S390_AIS:
		mutex_lock(&kvm->lock);
		if (kvm->created_vcpus) {
			r = -EBUSY;
		} else {
			set_kvm_facility(kvm->arch.model.fac_mask, 72);
			set_kvm_facility(kvm->arch.model.fac_list, 72);
			r = 0;
		}
		mutex_unlock(&kvm->lock);
		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
			 r ? "(not available)" : "(success)");
		break;
656
657
658
	case KVM_CAP_S390_GS:
		r = -EINVAL;
		mutex_lock(&kvm->lock);
659
		if (kvm->created_vcpus) {
660
661
662
663
664
665
666
667
668
669
			r = -EBUSY;
		} else if (test_facility(133)) {
			set_kvm_facility(kvm->arch.model.fac_mask, 133);
			set_kvm_facility(kvm->arch.model.fac_list, 133);
			r = 0;
		}
		mutex_unlock(&kvm->lock);
		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
			 r ? "(not available)" : "(success)");
		break;
670
	case KVM_CAP_S390_USER_STSI:
671
		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
672
673
674
		kvm->arch.user_stsi = 1;
		r = 0;
		break;
675
676
677
678
679
680
	case KVM_CAP_S390_USER_INSTR0:
		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
		kvm->arch.user_instr0 = 1;
		icpt_operexc_on_all_vcpus(kvm);
		r = 0;
		break;
681
682
683
684
685
686
687
	default:
		r = -EINVAL;
		break;
	}
	return r;
}

688
689
690
691
692
693
694
static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	switch (attr->attr) {
	case KVM_S390_VM_MEM_LIMIT_SIZE:
		ret = 0;
695
		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
696
697
			 kvm->arch.mem_limit);
		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
698
699
700
701
702
703
704
705
706
707
			ret = -EFAULT;
		break;
	default:
		ret = -ENXIO;
		break;
	}
	return ret;
}

static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
708
709
710
711
712
{
	int ret;
	unsigned int idx;
	switch (attr->attr) {
	case KVM_S390_VM_MEM_ENABLE_CMMA:
713
		ret = -ENXIO;
714
		if (!sclp.has_cmma)
715
716
			break;

717
		ret = -EBUSY;
718
		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
719
		mutex_lock(&kvm->lock);
720
		if (!kvm->created_vcpus) {
721
			kvm->arch.use_cmma = 1;
722
723
			/* Not compatible with cmma. */
			kvm->arch.use_pfmfi = 0;
724
725
726
727
728
			ret = 0;
		}
		mutex_unlock(&kvm->lock);
		break;
	case KVM_S390_VM_MEM_CLR_CMMA:
729
730
731
		ret = -ENXIO;
		if (!sclp.has_cmma)
			break;
732
733
734
735
		ret = -EINVAL;
		if (!kvm->arch.use_cmma)
			break;

736
		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
737
738
		mutex_lock(&kvm->lock);
		idx = srcu_read_lock(&kvm->srcu);
739
		s390_reset_cmma(kvm->arch.gmap->mm);
740
741
742
743
		srcu_read_unlock(&kvm->srcu, idx);
		mutex_unlock(&kvm->lock);
		ret = 0;
		break;
744
745
746
747
748
749
750
751
752
	case KVM_S390_VM_MEM_LIMIT_SIZE: {
		unsigned long new_limit;

		if (kvm_is_ucontrol(kvm))
			return -EINVAL;

		if (get_user(new_limit, (u64 __user *)attr->addr))
			return -EFAULT;

753
754
		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
		    new_limit > kvm->arch.mem_limit)
755
756
			return -E2BIG;

757
758
759
		if (!new_limit)
			return -EINVAL;

760
		/* gmap_create takes last usable address */
761
762
763
		if (new_limit != KVM_S390_NO_MEM_LIMIT)
			new_limit -= 1;

764
765
		ret = -EBUSY;
		mutex_lock(&kvm->lock);
766
		if (!kvm->created_vcpus) {
767
768
			/* gmap_create will round the limit up */
			struct gmap *new = gmap_create(current->mm, new_limit);
769
770
771
772

			if (!new) {
				ret = -ENOMEM;
			} else {
773
				gmap_remove(kvm->arch.gmap);
774
775
776
777
778
779
				new->private = kvm;
				kvm->arch.gmap = new;
				ret = 0;
			}
		}
		mutex_unlock(&kvm->lock);
780
781
782
		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
			 (void *) kvm->arch.gmap->asce);
783
784
		break;
	}
785
786
787
788
789
790
791
	default:
		ret = -ENXIO;
		break;
	}
	return ret;
}

792
793
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);

794
void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
795
796
797
798
{
	struct kvm_vcpu *vcpu;
	int i;

799
800
801
802
803
804
805
806
807
808
	kvm_s390_vcpu_block_all(kvm);

	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_s390_vcpu_crypto_setup(vcpu);

	kvm_s390_vcpu_unblock_all(kvm);
}

static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
{
809
	if (!test_kvm_facility(kvm, 76))
810
811
812
813
814
815
816
817
818
		return -EINVAL;

	mutex_lock(&kvm->lock);
	switch (attr->attr) {
	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
		get_random_bytes(
			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
		kvm->arch.crypto.aes_kw = 1;
819
		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
820
821
822
823
824
825
		break;
	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
		get_random_bytes(
			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
		kvm->arch.crypto.dea_kw = 1;
826
		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
827
828
829
830
831
		break;
	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
		kvm->arch.crypto.aes_kw = 0;
		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
832
		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
833
834
835
836
837
		break;
	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
		kvm->arch.crypto.dea_kw = 0;
		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
838
		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
839
840
841
842
843
844
		break;
	default:
		mutex_unlock(&kvm->lock);
		return -ENXIO;
	}

845
	kvm_s390_vcpu_crypto_reset_all(kvm);
846
847
848
849
	mutex_unlock(&kvm->lock);
	return 0;
}

850
851
852
853
854
855
856
857
858
859
860
static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
{
	int cx;
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(cx, vcpu, kvm)
		kvm_s390_sync_request(req, vcpu);
}

/*
 * Must be called with kvm->srcu held to avoid races on memslots, and with
861
 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
862
863
864
865
866
 */
static int kvm_s390_vm_start_migration(struct kvm *kvm)
{
	struct kvm_memory_slot *ms;
	struct kvm_memslots *slots;
867
	unsigned long ram_pages = 0;
868
869
870
	int slotnr;

	/* migration mode already enabled */
871
	if (kvm->arch.migration_mode)
872
873
874
875
876
		return 0;
	slots = kvm_memslots(kvm);
	if (!slots || !slots->used_slots)
		return -EINVAL;

877
878
879
880
881
882
883
	if (!kvm->arch.use_cmma) {
		kvm->arch.migration_mode = 1;
		return 0;
	}
	/* mark all the pages in active slots as dirty */
	for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
		ms = slots->memslots + slotnr;
884
		/*
885
886
887
888
		 * The second half of the bitmap is only used on x86,
		 * and would be wasted otherwise, so we put it to good
		 * use here to keep track of the state of the storage
		 * attributes.
889
		 */
890
891
		memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
		ram_pages += ms->npages;
892
	}
893
894
895
	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
	kvm->arch.migration_mode = 1;
	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
896
897
898
899
	return 0;
}

/*
900
 * Must be called with kvm->slots_lock to avoid races with ourselves and
901
902
903
904
905
 * kvm_s390_vm_start_migration.
 */
static int kvm_s390_vm_stop_migration(struct kvm *kvm)
{
	/* migration mode already disabled */
906
	if (!kvm->arch.migration_mode)
907
		return 0;
908
909
	kvm->arch.migration_mode = 0;
	if (kvm->arch.use_cmma)
910
911
912
913
914
915
916
		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
	return 0;
}

static int kvm_s390_vm_set_migration(struct kvm *kvm,
				     struct kvm_device_attr *attr)
{
917
	int res = -ENXIO;
918

919
	mutex_lock(&kvm->slots_lock);
920
921
922
923
924
925
926
927
928
929
	switch (attr->attr) {
	case KVM_S390_VM_MIGRATION_START:
		res = kvm_s390_vm_start_migration(kvm);
		break;
	case KVM_S390_VM_MIGRATION_STOP:
		res = kvm_s390_vm_stop_migration(kvm);
		break;
	default:
		break;
	}
930
	mutex_unlock(&kvm->slots_lock);
931
932
933
934
935
936
937

	return res;
}

static int kvm_s390_vm_get_migration(struct kvm *kvm,
				     struct kvm_device_attr *attr)
{
938
	u64 mig = kvm->arch.migration_mode;
939
940
941
942
943
944
945
946
947

	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
		return -ENXIO;

	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
		return -EFAULT;
	return 0;
}

948
949
950
951
952
953
954
static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
{
	struct kvm_s390_vm_tod_clock gtod;

	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
		return -EFAULT;

955
	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
956
		return -EINVAL;
957
	kvm_s390_set_tod_clock(kvm, &gtod);
958
959
960
961
962
963
964

	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
		gtod.epoch_idx, gtod.tod);

	return 0;
}

965
966
967
968
969
970
971
972
973
974
static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
{
	u8 gtod_high;

	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
					   sizeof(gtod_high)))
		return -EFAULT;

	if (gtod_high != 0)
		return -EINVAL;
975
	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
976
977
978
979
980
981

	return 0;
}

static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
982
	struct kvm_s390_vm_tod_clock gtod = { 0 };
983

984
985
	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
			   sizeof(gtod.tod)))
986
987
		return -EFAULT;

988
989
	kvm_s390_set_tod_clock(kvm, &gtod);
	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
990
991
992
993
994
995
996
997
998
999
1000
	return 0;
}

static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	if (attr->flags)
		return -EINVAL;

	switch (attr->attr) {
1001
1002
1003
	case KVM_S390_VM_TOD_EXT:
		ret = kvm_s390_set_tod_ext(kvm, attr);
		break;
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
	case KVM_S390_VM_TOD_HIGH:
		ret = kvm_s390_set_tod_high(kvm, attr);
		break;
	case KVM_S390_VM_TOD_LOW:
		ret = kvm_s390_set_tod_low(kvm, attr);
		break;
	default:
		ret = -ENXIO;
		break;
	}
	return ret;
}

1017
1018
static void kvm_s390_get_tod_clock(struct kvm *kvm,
				   struct kvm_s390_vm_tod_clock *gtod)
1019
1020
1021
1022
1023
1024
1025
1026
{
	struct kvm_s390_tod_clock_ext htod;

	preempt_disable();

	get_tod_clock_ext((char *)&htod);

	gtod->tod = htod.tod + kvm->arch.epoch;
1027
1028
1029
1030
1031
1032
	gtod->epoch_idx = 0;
	if (test_kvm_facility(kvm, 139)) {
		gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
		if (gtod->tod < htod.tod)
			gtod->epoch_idx += 1;
	}
1033
1034
1035
1036
1037
1038
1039
1040
1041

	preempt_enable();
}

static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
{
	struct kvm_s390_vm_tod_clock gtod;

	memset(&gtod, 0, sizeof(gtod));
1042
	kvm_s390_get_tod_clock(kvm, &gtod);
1043
1044
1045
1046
1047
1048
1049
1050
	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
		return -EFAULT;

	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
		gtod.epoch_idx, gtod.tod);
	return 0;
}

1051
1052
1053
1054
1055
1056
1057
static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
{
	u8 gtod_high = 0;

	if (copy_to_user((void __user *)attr->addr, &gtod_high,
					 sizeof(gtod_high)))
		return -EFAULT;
1058
	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1059
1060
1061
1062
1063
1064

	return 0;
}

static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
1065
	u64 gtod;
1066

1067
	gtod = kvm_s390_get_tod_clock_fast(kvm);
1068
1069
	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
		return -EFAULT;
1070
	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082

	return 0;
}

static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
{
	int ret;

	if (attr->flags)
		return -EINVAL;

	switch (attr->attr) {
1083
1084
1085
	case KVM_S390_VM_TOD_EXT:
		ret = kvm_s390_get_tod_ext(kvm, attr);
		break;
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
	case KVM_S390_VM_TOD_HIGH:
		ret = kvm_s390_get_tod_high(kvm, attr);
		break;
	case KVM_S390_VM_TOD_LOW:
		ret = kvm_s390_get_tod_low(kvm, attr);
		break;
	default:
		ret = -ENXIO;
		break;
	}
	return ret;
}

1099
1100
1101
static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
{
	struct kvm_s390_vm_cpu_processor *proc;
1102
	u16 lowest_ibc, unblocked_ibc;
1103
1104
1105
	int ret = 0;

	mutex_lock(&kvm->lock);
1106
	if (kvm->created_vcpus) {
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
		ret = -EBUSY;
		goto out;
	}
	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
	if (!proc) {
		ret = -ENOMEM;
		goto out;
	}
	if (!copy_from_user(proc, (void __user *)attr->addr,
			    sizeof(*proc))) {
1117
		kvm->arch.model.cpuid = proc->cpuid;
1118
1119
		lowest_ibc = sclp.ibc >> 16 & 0xfff;
		unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand's avatar