Commit a52fb43a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cache control updates from Borislav Petkov:

 - The generalization of the RDT code to accommodate the addition of
   AMD's very similar implementation of the cache monitoring feature.

   This entails a subsystem move into a separate and generic
   arch/x86/kernel/cpu/resctrl/ directory along with adding
   vendor-specific initialization and feature detection helpers.

   Ontop of that is the unification of user-visible strings, both in the
   resctrl filesystem error handling and Kconfig.

   Provided by Babu Moger and Sherry Hurwitz.

 - Code simplifications and error handling improvements by Reinette
   Chatre.

* 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/resctrl: Fix rdt_find_domain() return value and checks
  x86/resctrl: Remove unnecessary check for cbm_validate()
  x86/resctrl: Use rdt_last_cmd_puts() where possible
  MAINTAINERS: Update resctrl filename patterns
  Documentation: Rename and update intel_rdt_ui.txt to resctrl_ui.txt
  x86/resctrl: Introduce AMD QOS feature
  x86/resctrl: Fixup the user-visible strings
  x86/resctrl: Add AMD's X86_FEATURE_MBA to the scattered CPUID features
  x86/resctrl: Rename the config option INTEL_RDT to RESCTRL
  x86/resctrl: Add vendor check for the MBA software controller
  x86/resctrl: Bring cbm_validate() into the resource structure
  x86/resctrl: Initialize the vendor-specific resource functions
  x86/resctrl: Move all the macros to resctrl/internal.h
  x86/resctrl: Re-arrange the RDT init code
  x86/resctrl: Rename the RDT functions and definitions
  x86/resctrl: Rename and move rdt files to a separate directory
parents 42b00f12 52eb7433
User Interface for Resource Allocation in Intel Resource Director Technology
User Interface for Resource Control feature
Intel refers to this feature as Intel Resource Director Technology(Intel(R) RDT).
AMD refers to this feature as AMD Platform Quality of Service(AMD QoS).
Copyright (C) 2016 Intel Corporation
......@@ -6,8 +9,8 @@ Fenghua Yu <fenghua.yu@intel.com>
Tony Luck <tony.luck@intel.com>
Vikas Shivappa <vikas.shivappa@intel.com>
This feature is enabled by the CONFIG_INTEL_RDT Kconfig and the
X86 /proc/cpuinfo flag bits:
This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo
flag bits:
RDT (Resource Director Technology) Allocation - "rdt_a"
CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
CDP (Code and Data Prioritization ) - "cdp_l3", "cdp_l2"
......
......@@ -12717,9 +12717,9 @@ M: Fenghua Yu <fenghua.yu@intel.com>
M: Reinette Chatre <reinette.chatre@intel.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: arch/x86/kernel/cpu/intel_rdt*
F: arch/x86/include/asm/intel_rdt_sched.h
F: Documentation/x86/intel_rdt*
F: arch/x86/kernel/cpu/resctrl/
F: arch/x86/include/asm/resctrl_sched.h
F: Documentation/x86/resctrl*
READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
......
......@@ -444,15 +444,23 @@ config RETPOLINE
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
config INTEL_RDT
bool "Intel Resource Director Technology support"
depends on X86 && CPU_SUP_INTEL
config RESCTRL
bool "Resource Control support"
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
select KERNFS
help
Select to enable resource allocation and monitoring which are
sub-features of Intel Resource Director Technology(RDT). More
information about RDT can be found in the Intel x86
Architecture Software Developer Manual.
Enable Resource Control support.
Provide support for the allocation and monitoring of system resources
usage by the CPU.
Intel calls this Intel Resource Director Technology
(Intel(R) RDT). More information about RDT can be found in the
Intel x86 Architecture Software Developer Manual.
AMD calls this AMD Platform Quality of Service (AMD QoS).
More information about AMD QoS can be found in the AMD64 Technology
Platform Quality of Service Extensions manual.
Say N if unsure.
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_INTEL_RDT_SCHED_H
#define _ASM_X86_INTEL_RDT_SCHED_H
#ifndef _ASM_X86_RESCTRL_SCHED_H
#define _ASM_X86_RESCTRL_SCHED_H
#ifdef CONFIG_INTEL_RDT
#ifdef CONFIG_RESCTRL
#include <linux/sched.h>
#include <linux/jump_label.h>
......@@ -10,7 +10,7 @@
#define IA32_PQR_ASSOC 0x0c8f
/**
* struct intel_pqr_state - State cache for the PQR MSR
* struct resctrl_pqr_state - State cache for the PQR MSR
* @cur_rmid: The cached Resource Monitoring ID
* @cur_closid: The cached Class Of Service ID
* @default_rmid: The user assigned Resource Monitoring ID
......@@ -24,21 +24,21 @@
* The cache also helps to avoid pointless updates if the value does
* not change.
*/
struct intel_pqr_state {
struct resctrl_pqr_state {
u32 cur_rmid;
u32 cur_closid;
u32 default_rmid;
u32 default_closid;
};
DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
/*
* __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
* __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
*
* Following considerations are made so that this has minimal impact
* on scheduler hot path:
......@@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* simple as possible.
* Must be called with preemption disabled.
*/
static void __intel_rdt_sched_in(void)
static void __resctrl_sched_in(void)
{
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid = state->default_closid;
u32 rmid = state->default_rmid;
......@@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void)
}
}
static inline void intel_rdt_sched_in(void)
static inline void resctrl_sched_in(void)
{
if (static_branch_likely(&rdt_enable_key))
__intel_rdt_sched_in();
__resctrl_sched_in();
}
#else
static inline void intel_rdt_sched_in(void) {}
static inline void resctrl_sched_in(void) {}
#endif /* CONFIG_INTEL_RDT */
#endif /* CONFIG_RESCTRL */
#endif /* _ASM_X86_INTEL_RDT_SCHED_H */
#endif /* _ASM_X86_RESCTRL_SCHED_H */
......@@ -36,13 +36,10 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o
obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o
CFLAGS_intel_rdt_pseudo_lock.o = -I$(src)
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_MICROCODE) += microcode/
obj-$(CONFIG_RESCTRL) += resctrl/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o
obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o
CFLAGS_pseudo_lock.o = -I$(src)
......@@ -22,7 +22,7 @@
* Software Developer Manual June 2016, volume 3, section 17.17.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define pr_fmt(fmt) "resctrl: " fmt
#include <linux/slab.h>
#include <linux/err.h>
......@@ -30,22 +30,19 @@
#include <linux/cpuhotplug.h>
#include <asm/intel-family.h>
#include <asm/intel_rdt_sched.h>
#include "intel_rdt.h"
#define MBA_IS_LINEAR 0x4
#define MBA_MAX_MBPS U32_MAX
#include <asm/resctrl_sched.h>
#include "internal.h"
/* Mutex to protect rdtgroup access. */
DEFINE_MUTEX(rdtgroup_mutex);
/*
* The cached intel_pqr_state is strictly per CPU and can never be
* The cached resctrl_pqr_state is strictly per CPU and can never be
* updated from a remote CPU. Functions which modify the state
* are called with interrupts disabled and no preemption, which
* is sufficient for the protection.
*/
DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
/*
* Used to store the max resource name width and max resource data width
......@@ -60,9 +57,13 @@ int max_name_width, max_data_width;
bool rdt_alloc_capable;
static void
mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
struct rdt_resource *r);
static void
cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
static void
mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
struct rdt_resource *r);
#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
......@@ -72,7 +73,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L3,
.name = "L3",
.domains = domain_init(RDT_RESOURCE_L3),
.msr_base = IA32_L3_CBM_BASE,
.msr_base = MSR_IA32_L3_CBM_BASE,
.msr_update = cat_wrmsr,
.cache_level = 3,
.cache = {
......@@ -89,7 +90,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L3DATA,
.name = "L3DATA",
.domains = domain_init(RDT_RESOURCE_L3DATA),
.msr_base = IA32_L3_CBM_BASE,
.msr_base = MSR_IA32_L3_CBM_BASE,
.msr_update = cat_wrmsr,
.cache_level = 3,
.cache = {
......@@ -106,7 +107,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L3CODE,
.name = "L3CODE",
.domains = domain_init(RDT_RESOURCE_L3CODE),
.msr_base = IA32_L3_CBM_BASE,
.msr_base = MSR_IA32_L3_CBM_BASE,
.msr_update = cat_wrmsr,
.cache_level = 3,
.cache = {
......@@ -123,7 +124,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L2,
.name = "L2",
.domains = domain_init(RDT_RESOURCE_L2),
.msr_base = IA32_L2_CBM_BASE,
.msr_base = MSR_IA32_L2_CBM_BASE,
.msr_update = cat_wrmsr,
.cache_level = 2,
.cache = {
......@@ -140,7 +141,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L2DATA,
.name = "L2DATA",
.domains = domain_init(RDT_RESOURCE_L2DATA),
.msr_base = IA32_L2_CBM_BASE,
.msr_base = MSR_IA32_L2_CBM_BASE,
.msr_update = cat_wrmsr,
.cache_level = 2,
.cache = {
......@@ -157,7 +158,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L2CODE,
.name = "L2CODE",
.domains = domain_init(RDT_RESOURCE_L2CODE),
.msr_base = IA32_L2_CBM_BASE,
.msr_base = MSR_IA32_L2_CBM_BASE,
.msr_update = cat_wrmsr,
.cache_level = 2,
.cache = {
......@@ -174,10 +175,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_MBA,
.name = "MB",
.domains = domain_init(RDT_RESOURCE_MBA),
.msr_base = IA32_MBA_THRTL_BASE,
.msr_update = mba_wrmsr,
.cache_level = 3,
.parse_ctrlval = parse_bw,
.format_str = "%d=%*u",
.fflags = RFTYPE_RES_MB,
},
......@@ -211,9 +209,10 @@ static inline void cache_alloc_hsw_probe(void)
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
u32 l, h, max_cbm = BIT_MASK(20) - 1;
if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
return;
rdmsr(IA32_L3_CBM_BASE, l, h);
rdmsr(MSR_IA32_L3_CBM_BASE, l, h);
/* If all the bits were set in MSR, return success */
if (l != max_cbm)
......@@ -259,7 +258,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
return false;
}
static bool rdt_get_mem_config(struct rdt_resource *r)
static bool __get_mem_config_intel(struct rdt_resource *r)
{
union cpuid_0x10_3_eax eax;
union cpuid_0x10_x_edx edx;
......@@ -285,6 +284,30 @@ static bool rdt_get_mem_config(struct rdt_resource *r)
return true;
}
static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
{
union cpuid_0x10_3_eax eax;
union cpuid_0x10_x_edx edx;
u32 ebx, ecx;
cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
r->num_closid = edx.split.cos_max + 1;
r->default_ctrl = MAX_MBA_BW_AMD;
/* AMD does not use delay */
r->membw.delay_linear = false;
r->membw.min_bw = 0;
r->membw.bw_gran = 1;
/* Max value is 2048, Data width should be 4 in decimal */
r->data_width = 4;
r->alloc_capable = true;
r->alloc_enabled = true;
return true;
}
static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
{
union cpuid_0x10_1_eax eax;
......@@ -344,6 +367,15 @@ static int get_cache_id(int cpu, int level)
return -1;
}
static void
mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
{
unsigned int i;
for (i = m->low; i < m->high; i++)
wrmsrl(r->msr_base + i, d->ctrl_val[i]);
}
/*
* Map the memory b/w percentage value to delay values
* that can be written to QOS_MSRs.
......@@ -359,7 +391,8 @@ u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
}
static void
mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
struct rdt_resource *r)
{
unsigned int i;
......@@ -421,7 +454,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
struct list_head *l;
if (id < 0)
return ERR_PTR(id);
return ERR_PTR(-ENODEV);
list_for_each(l, &r->domains) {
d = list_entry(l, struct rdt_domain, list);
......@@ -639,7 +672,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
static void clear_closid_rmid(int cpu)
{
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
state->default_closid = 0;
state->default_rmid = 0;
......@@ -648,7 +681,7 @@ static void clear_closid_rmid(int cpu)
wrmsr(IA32_PQR_ASSOC, 0, 0);
}
static int intel_rdt_online_cpu(unsigned int cpu)
static int resctrl_online_cpu(unsigned int cpu)
{
struct rdt_resource *r;
......@@ -674,7 +707,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
}
}
static int intel_rdt_offline_cpu(unsigned int cpu)
static int resctrl_offline_cpu(unsigned int cpu)
{
struct rdtgroup *rdtgrp;
struct rdt_resource *r;
......@@ -794,6 +827,19 @@ static bool __init rdt_cpu_has(int flag)
return ret;
}
static __init bool get_mem_config(void)
{
if (!rdt_cpu_has(X86_FEATURE_MBA))
return false;
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]);
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]);
return false;
}
static __init bool get_rdt_alloc_resources(void)
{
bool ret = false;
......@@ -818,10 +864,9 @@ static __init bool get_rdt_alloc_resources(void)
ret = true;
}
if (rdt_cpu_has(X86_FEATURE_MBA)) {
if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA]))
ret = true;
}
if (get_mem_config())
ret = true;
return ret;
}
......@@ -840,7 +885,7 @@ static __init bool get_rdt_mon_resources(void)
return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
}
static __init void rdt_quirks(void)
static __init void __check_quirks_intel(void)
{
switch (boot_cpu_data.x86_model) {
case INTEL_FAM6_HASWELL_X:
......@@ -855,30 +900,91 @@ static __init void rdt_quirks(void)
}
}
static __init void check_quirks(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
__check_quirks_intel();
}
static __init bool get_rdt_resources(void)
{
rdt_quirks();
rdt_alloc_capable = get_rdt_alloc_resources();
rdt_mon_capable = get_rdt_mon_resources();
return (rdt_mon_capable || rdt_alloc_capable);
}
static __init void rdt_init_res_defs_intel(void)
{
struct rdt_resource *r;
for_each_rdt_resource(r) {
if (r->rid == RDT_RESOURCE_L3 ||
r->rid == RDT_RESOURCE_L3DATA ||
r->rid == RDT_RESOURCE_L3CODE ||
r->rid == RDT_RESOURCE_L2 ||
r->rid == RDT_RESOURCE_L2DATA ||
r->rid == RDT_RESOURCE_L2CODE)
r->cbm_validate = cbm_validate_intel;
else if (r->rid == RDT_RESOURCE_MBA) {
r->msr_base = MSR_IA32_MBA_THRTL_BASE;
r->msr_update = mba_wrmsr_intel;
r->parse_ctrlval = parse_bw_intel;
}
}
}
static __init void rdt_init_res_defs_amd(void)
{
struct rdt_resource *r;
for_each_rdt_resource(r) {
if (r->rid == RDT_RESOURCE_L3 ||
r->rid == RDT_RESOURCE_L3DATA ||
r->rid == RDT_RESOURCE_L3CODE ||
r->rid == RDT_RESOURCE_L2 ||
r->rid == RDT_RESOURCE_L2DATA ||
r->rid == RDT_RESOURCE_L2CODE)
r->cbm_validate = cbm_validate_amd;
else if (r->rid == RDT_RESOURCE_MBA) {
r->msr_base = MSR_IA32_MBA_BW_BASE;
r->msr_update = mba_wrmsr_amd;
r->parse_ctrlval = parse_bw_amd;
}
}
}
static __init void rdt_init_res_defs(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
rdt_init_res_defs_intel();
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
rdt_init_res_defs_amd();
}
static enum cpuhp_state rdt_online;
static int __init intel_rdt_late_init(void)
static int __init resctrl_late_init(void)
{
struct rdt_resource *r;
int state, ret;
/*
* Initialize functions(or definitions) that are different
* between vendors here.
*/
rdt_init_res_defs();
check_quirks();
if (!get_rdt_resources())
return -ENODEV;
rdt_init_padding();
state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"x86/rdt/cat:online:",
intel_rdt_online_cpu, intel_rdt_offline_cpu);
"x86/resctrl/cat:online:",
resctrl_online_cpu, resctrl_offline_cpu);
if (state < 0)
return state;
......@@ -890,20 +996,20 @@ static int __init intel_rdt_late_init(void)
rdt_online = state;
for_each_alloc_capable_rdt_resource(r)
pr_info("Intel RDT %s allocation detected\n", r->name);
pr_info("%s allocation detected\n", r->name);
for_each_mon_capable_rdt_resource(r)
pr_info("Intel RDT %s monitoring detected\n", r->name);
pr_info("%s monitoring detected\n", r->name);
return 0;
}
late_initcall(intel_rdt_late_init);
late_initcall(resctrl_late_init);
static void __exit intel_rdt_exit(void)
static void __exit resctrl_exit(void)
{
cpuhp_remove_state(rdt_online);
rdtgroup_exit();
}
__exitcall(intel_rdt_exit);
__exitcall(resctrl_exit);
......@@ -27,7 +27,54 @@
#include <linux/kernfs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "intel_rdt.h"
#include "internal.h"
/*
* Check whether MBA bandwidth percentage value is correct. The value is
* checked against the minimum and maximum bandwidth values specified by
* the hardware. The allocated bandwidth percentage is rounded to the next
* control step available on the hardware.
*/
static bool bw_validate_amd(char *buf, unsigned long *data,
struct rdt_resource *r)
{
unsigned long bw;
int ret;
ret = kstrtoul(buf, 10, &bw);
if (ret) {
rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
return false;
}
if (bw < r->membw.min_bw || bw > r->default_ctrl) {
rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
r->membw.min_bw, r->default_ctrl);
return false;
}
*data = roundup(bw, (unsigned long)r->membw.bw_gran);
return true;
}
int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d)
{
unsigned long bw_val;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
return -EINVAL;
}
if (!bw_validate_amd(data->buf, &bw_val, r))
return -EINVAL;
d->new_ctrl = bw_val;
d->have_new_ctrl = true;
return 0;
}
/*
* Check whether MBA bandwidth percentage value is correct. The value is
......@@ -65,13 +112,13 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d)
int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d)
{
unsigned long bw_val;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
return -EINVAL;
}
...